mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
d78c620a2e
In the process of debugging a system with an NVDIMM that was failing to unlock it was found that the kernel is reporting 'locked' while the DIMM security interface is 'frozen'. Unfortunately the security state is tracked internally as an enum which prevents it from communicating the difference between 'locked' and 'locked + frozen'. It follows that the enum also prevents the kernel from communicating 'unlocked + frozen' which would be useful for debugging why security operations like 'change passphrase' are disabled. Ditch the security state enum for a set of flags and introduce a new sysfs attribute explicitly for the 'frozen' state. The regression risk is low because the 'frozen' state was already blocked behind the 'locked' state, but will need to revisit if there were cases where applications need 'frozen' to show up in the primary 'security' attribute. The expectation is that communicating 'frozen' is mostly a helper for debug and status monitoring. Reviewed-by: Dave Jiang <dave.jiang@intel.com> Reported-by: Jeff Moyer <jmoyer@redhat.com> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Link: https://lore.kernel.org/r/156686729474.184120.5835135644278860826.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
392 lines
9.8 KiB
C
392 lines
9.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright(c) 2018 Intel Corporation. All rights reserved. */
|
|
#include <linux/libnvdimm.h>
|
|
#include <linux/ndctl.h>
|
|
#include <linux/acpi.h>
|
|
#include <asm/smp.h>
|
|
#include "intel.h"
|
|
#include "nfit.h"
|
|
|
|
static unsigned long intel_security_flags(struct nvdimm *nvdimm,
|
|
enum nvdimm_passphrase_type ptype)
|
|
{
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
unsigned long security_flags = 0;
|
|
struct {
|
|
struct nd_cmd_pkg pkg;
|
|
struct nd_intel_get_security_state cmd;
|
|
} nd_cmd = {
|
|
.pkg = {
|
|
.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
|
|
.nd_family = NVDIMM_FAMILY_INTEL,
|
|
.nd_size_out =
|
|
sizeof(struct nd_intel_get_security_state),
|
|
.nd_fw_size =
|
|
sizeof(struct nd_intel_get_security_state),
|
|
},
|
|
};
|
|
int rc;
|
|
|
|
if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask))
|
|
return 0;
|
|
|
|
/*
|
|
* Short circuit the state retrieval while we are doing overwrite.
|
|
* The DSM spec states that the security state is indeterminate
|
|
* until the overwrite DSM completes.
|
|
*/
|
|
if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER)
|
|
return BIT(NVDIMM_SECURITY_OVERWRITE);
|
|
|
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
|
if (rc < 0 || nd_cmd.cmd.status) {
|
|
pr_err("%s: security state retrieval failed (%d:%#x)\n",
|
|
nvdimm_name(nvdimm), rc, nd_cmd.cmd.status);
|
|
return 0;
|
|
}
|
|
|
|
/* check and see if security is enabled and locked */
|
|
if (ptype == NVDIMM_MASTER) {
|
|
if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED)
|
|
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
|
|
else
|
|
set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
|
|
if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT)
|
|
set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
|
|
return security_flags;
|
|
}
|
|
|
|
if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
|
|
return 0;
|
|
|
|
if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
|
|
if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN ||
|
|
nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT)
|
|
set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
|
|
|
|
if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
|
|
set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
|
|
else
|
|
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
|
|
} else
|
|
set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
|
|
|
|
return security_flags;
|
|
}
|
|
|
|
static int intel_security_freeze(struct nvdimm *nvdimm)
|
|
{
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
struct {
|
|
struct nd_cmd_pkg pkg;
|
|
struct nd_intel_freeze_lock cmd;
|
|
} nd_cmd = {
|
|
.pkg = {
|
|
.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
|
|
.nd_family = NVDIMM_FAMILY_INTEL,
|
|
.nd_size_out = ND_INTEL_STATUS_SIZE,
|
|
.nd_fw_size = ND_INTEL_STATUS_SIZE,
|
|
},
|
|
};
|
|
int rc;
|
|
|
|
if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask))
|
|
return -ENOTTY;
|
|
|
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
|
if (rc < 0)
|
|
return rc;
|
|
if (nd_cmd.cmd.status)
|
|
return -EIO;
|
|
return 0;
|
|
}
|
|
|
|
static int intel_security_change_key(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *old_data,
|
|
const struct nvdimm_key_data *new_data,
|
|
enum nvdimm_passphrase_type ptype)
|
|
{
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
unsigned int cmd = ptype == NVDIMM_MASTER ?
|
|
NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
|
|
NVDIMM_INTEL_SET_PASSPHRASE;
|
|
struct {
|
|
struct nd_cmd_pkg pkg;
|
|
struct nd_intel_set_passphrase cmd;
|
|
} nd_cmd = {
|
|
.pkg = {
|
|
.nd_family = NVDIMM_FAMILY_INTEL,
|
|
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
|
|
.nd_size_out = ND_INTEL_STATUS_SIZE,
|
|
.nd_fw_size = ND_INTEL_STATUS_SIZE,
|
|
.nd_command = cmd,
|
|
},
|
|
};
|
|
int rc;
|
|
|
|
if (!test_bit(cmd, &nfit_mem->dsm_mask))
|
|
return -ENOTTY;
|
|
|
|
memcpy(nd_cmd.cmd.old_pass, old_data->data,
|
|
sizeof(nd_cmd.cmd.old_pass));
|
|
memcpy(nd_cmd.cmd.new_pass, new_data->data,
|
|
sizeof(nd_cmd.cmd.new_pass));
|
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
switch (nd_cmd.cmd.status) {
|
|
case 0:
|
|
return 0;
|
|
case ND_INTEL_STATUS_INVALID_PASS:
|
|
return -EINVAL;
|
|
case ND_INTEL_STATUS_NOT_SUPPORTED:
|
|
return -EOPNOTSUPP;
|
|
case ND_INTEL_STATUS_INVALID_STATE:
|
|
default:
|
|
return -EIO;
|
|
}
|
|
}
|
|
|
|
static void nvdimm_invalidate_cache(void);
|
|
|
|
static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *key_data)
|
|
{
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
struct {
|
|
struct nd_cmd_pkg pkg;
|
|
struct nd_intel_unlock_unit cmd;
|
|
} nd_cmd = {
|
|
.pkg = {
|
|
.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
|
|
.nd_family = NVDIMM_FAMILY_INTEL,
|
|
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
|
|
.nd_size_out = ND_INTEL_STATUS_SIZE,
|
|
.nd_fw_size = ND_INTEL_STATUS_SIZE,
|
|
},
|
|
};
|
|
int rc;
|
|
|
|
if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
|
|
return -ENOTTY;
|
|
|
|
memcpy(nd_cmd.cmd.passphrase, key_data->data,
|
|
sizeof(nd_cmd.cmd.passphrase));
|
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
|
if (rc < 0)
|
|
return rc;
|
|
switch (nd_cmd.cmd.status) {
|
|
case 0:
|
|
break;
|
|
case ND_INTEL_STATUS_INVALID_PASS:
|
|
return -EINVAL;
|
|
default:
|
|
return -EIO;
|
|
}
|
|
|
|
/* DIMM unlocked, invalidate all CPU caches before we read it */
|
|
nvdimm_invalidate_cache();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int intel_security_disable(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *key_data)
|
|
{
|
|
int rc;
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
struct {
|
|
struct nd_cmd_pkg pkg;
|
|
struct nd_intel_disable_passphrase cmd;
|
|
} nd_cmd = {
|
|
.pkg = {
|
|
.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
|
|
.nd_family = NVDIMM_FAMILY_INTEL,
|
|
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
|
|
.nd_size_out = ND_INTEL_STATUS_SIZE,
|
|
.nd_fw_size = ND_INTEL_STATUS_SIZE,
|
|
},
|
|
};
|
|
|
|
if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask))
|
|
return -ENOTTY;
|
|
|
|
memcpy(nd_cmd.cmd.passphrase, key_data->data,
|
|
sizeof(nd_cmd.cmd.passphrase));
|
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
switch (nd_cmd.cmd.status) {
|
|
case 0:
|
|
break;
|
|
case ND_INTEL_STATUS_INVALID_PASS:
|
|
return -EINVAL;
|
|
case ND_INTEL_STATUS_INVALID_STATE:
|
|
default:
|
|
return -ENXIO;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *key,
|
|
enum nvdimm_passphrase_type ptype)
|
|
{
|
|
int rc;
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
unsigned int cmd = ptype == NVDIMM_MASTER ?
|
|
NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
|
|
struct {
|
|
struct nd_cmd_pkg pkg;
|
|
struct nd_intel_secure_erase cmd;
|
|
} nd_cmd = {
|
|
.pkg = {
|
|
.nd_family = NVDIMM_FAMILY_INTEL,
|
|
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
|
|
.nd_size_out = ND_INTEL_STATUS_SIZE,
|
|
.nd_fw_size = ND_INTEL_STATUS_SIZE,
|
|
.nd_command = cmd,
|
|
},
|
|
};
|
|
|
|
if (!test_bit(cmd, &nfit_mem->dsm_mask))
|
|
return -ENOTTY;
|
|
|
|
/* flush all cache before we erase DIMM */
|
|
nvdimm_invalidate_cache();
|
|
memcpy(nd_cmd.cmd.passphrase, key->data,
|
|
sizeof(nd_cmd.cmd.passphrase));
|
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
switch (nd_cmd.cmd.status) {
|
|
case 0:
|
|
break;
|
|
case ND_INTEL_STATUS_NOT_SUPPORTED:
|
|
return -EOPNOTSUPP;
|
|
case ND_INTEL_STATUS_INVALID_PASS:
|
|
return -EINVAL;
|
|
case ND_INTEL_STATUS_INVALID_STATE:
|
|
default:
|
|
return -ENXIO;
|
|
}
|
|
|
|
/* DIMM erased, invalidate all CPU caches before we read it */
|
|
nvdimm_invalidate_cache();
|
|
return 0;
|
|
}
|
|
|
|
static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
|
|
{
|
|
int rc;
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
struct {
|
|
struct nd_cmd_pkg pkg;
|
|
struct nd_intel_query_overwrite cmd;
|
|
} nd_cmd = {
|
|
.pkg = {
|
|
.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
|
|
.nd_family = NVDIMM_FAMILY_INTEL,
|
|
.nd_size_out = ND_INTEL_STATUS_SIZE,
|
|
.nd_fw_size = ND_INTEL_STATUS_SIZE,
|
|
},
|
|
};
|
|
|
|
if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
|
|
return -ENOTTY;
|
|
|
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
switch (nd_cmd.cmd.status) {
|
|
case 0:
|
|
break;
|
|
case ND_INTEL_STATUS_OQUERY_INPROGRESS:
|
|
return -EBUSY;
|
|
default:
|
|
return -ENXIO;
|
|
}
|
|
|
|
/* flush all cache before we make the nvdimms available */
|
|
nvdimm_invalidate_cache();
|
|
return 0;
|
|
}
|
|
|
|
static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
|
|
const struct nvdimm_key_data *nkey)
|
|
{
|
|
int rc;
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
struct {
|
|
struct nd_cmd_pkg pkg;
|
|
struct nd_intel_overwrite cmd;
|
|
} nd_cmd = {
|
|
.pkg = {
|
|
.nd_command = NVDIMM_INTEL_OVERWRITE,
|
|
.nd_family = NVDIMM_FAMILY_INTEL,
|
|
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
|
|
.nd_size_out = ND_INTEL_STATUS_SIZE,
|
|
.nd_fw_size = ND_INTEL_STATUS_SIZE,
|
|
},
|
|
};
|
|
|
|
if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
|
|
return -ENOTTY;
|
|
|
|
/* flush all cache before we erase DIMM */
|
|
nvdimm_invalidate_cache();
|
|
memcpy(nd_cmd.cmd.passphrase, nkey->data,
|
|
sizeof(nd_cmd.cmd.passphrase));
|
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
switch (nd_cmd.cmd.status) {
|
|
case 0:
|
|
return 0;
|
|
case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED:
|
|
return -ENOTSUPP;
|
|
case ND_INTEL_STATUS_INVALID_PASS:
|
|
return -EINVAL;
|
|
case ND_INTEL_STATUS_INVALID_STATE:
|
|
default:
|
|
return -ENXIO;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* TODO: define a cross arch wbinvd equivalent when/if
|
|
* NVDIMM_FAMILY_INTEL command support arrives on another arch.
|
|
*/
|
|
#ifdef CONFIG_X86
|
|
static void nvdimm_invalidate_cache(void)
|
|
{
|
|
wbinvd_on_all_cpus();
|
|
}
|
|
#else
|
|
static void nvdimm_invalidate_cache(void)
|
|
{
|
|
WARN_ON_ONCE("cache invalidation required after unlock\n");
|
|
}
|
|
#endif
|
|
|
|
static const struct nvdimm_security_ops __intel_security_ops = {
|
|
.get_flags = intel_security_flags,
|
|
.freeze = intel_security_freeze,
|
|
.change_key = intel_security_change_key,
|
|
.disable = intel_security_disable,
|
|
#ifdef CONFIG_X86
|
|
.unlock = intel_security_unlock,
|
|
.erase = intel_security_erase,
|
|
.overwrite = intel_security_overwrite,
|
|
.query_overwrite = intel_security_query_overwrite,
|
|
#endif
|
|
};
|
|
|
|
const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
|