mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 16:57:47 +07:00
f67e3fb489
* Replace the /sys/class/dax device model with /sys/bus/dax, and include a compat driver so distributions can opt-in to the new ABI. * Allow for an alternative driver for the device-dax address-range * Introduce the 'kmem' driver to hotplug / assign a device-dax address-range to the core-mm. * Arrange for the device-dax target-node to be onlined so that the newly added memory range can be uniquely referenced by numa apis. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJchWpGAAoJEB7SkWpmfYgCJk8P/0Q1DINszUDO/vKjJ09cDs9P Jw3it6GBIL50rDOu9QdcprSpwYDD0h1mLAV/m6oa3bVO+p4uWGvnxaxRx2HN2c/v vhZFtUDpHlqR63vzWMNVKRprYixCRJDUr6xQhhCcE3ak/ELN6w7LWfikKVWv15UL MfR96IQU38f+xRda/zSXnL9606Dvkvu/inEHj84lRcHIwj3sQAUalrE8bR3O32gZ bDg/l5kzT49o8ZXUo/TegvRSSSZpJmOl2DD0RW+ax5q3NI2bOXFrVDUKBKxf/hcQ E/V9i57TrqQx0GqRhnU7rN/v53cFZGGs31TEEIB/xs3bzCnADxwXcjL5b5K005J6 vJjBA2ODBewHFK3uVx46Hy1iV4eCtZWj4QrMnrjdSrjXOfbF5GTbWOhPFgoq7TWf S7VqFEf3I2gDPaMq4o8Ej1kLH4HMYeor2NSOZjyvGn87rSZ3ZIQguwbaNIVl+itz gdDt0ZOU0BgOBkV+rZIeZDaGdloWCHcDPL15CkZaOZyzdWhfEZ7dod6ad+9udilU EUPH62RgzXZtfm5zpebYyjNVLbb9pLZ0nT+UypyGR6zqWx1SqU3mXi63NFXPco+x XA9j//edPeI6NHg2CXLEh8DLuCg3dG1zWRJANkiF+niBwyCR8CHtGWAoY6soXbKe 2UrXGcIfXxyJ8V9v8v4q =hfa3 -----END PGP SIGNATURE----- Merge tag 'devdax-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull device-dax updates from Dan Williams: "New device-dax infrastructure to allow persistent memory and other "reserved" / performance differentiated memories, to be assigned to the core-mm as "System RAM". Some users want to use persistent memory as additional volatile memory. They are willing to cope with potential performance differences, for example between DRAM and 3D Xpoint, and want to use typical Linux memory management apis rather than a userspace memory allocator layered over an mmap() of a dax file. The administration model is to decide how much Persistent Memory (pmem) to use as System RAM, create a device-dax-mode namespace of that size, and then assign it to the core-mm. The rationale for device-dax is that it is a generic memory-mapping driver that can be layered over any "special purpose" memory, not just pmem. On subsequent boots udev rules can be used to restore the memory assignment. One implication of using pmem as RAM is that mlock() no longer keeps data off persistent media. For this reason it is recommended to enable NVDIMM Security (previously merged for 5.0) to encrypt pmem contents at rest. We considered making this recommendation an actively enforced requirement, but in the end decided to leave it as a distribution / administrator policy to allow for emulation and test environments that lack security capable NVDIMMs. Summary: - Replace the /sys/class/dax device model with /sys/bus/dax, and include a compat driver so distributions can opt-in to the new ABI. - Allow for an alternative driver for the device-dax address-range - Introduce the 'kmem' driver to hotplug / assign a device-dax address-range to the core-mm. - Arrange for the device-dax target-node to be onlined so that the newly added memory range can be uniquely referenced by numa apis" NOTE! I'm not entirely happy with the whole "PMEM as RAM" model because we currently have special - and very annoying rules in the kernel about accessing PMEM only with the "MC safe" accessors, because machine checks inside the regular repeat string copy functions can be fatal in some (not described) circumstances. And apparently the PMEM modules can cause that a lot more than regular RAM. The argument is that this happens because PMEM doesn't necessarily get scrubbed at boot like RAM does, but that is planned to be added for the user space tooling. Quoting Dan from another email: "The exposure can be reduced in the volatile-RAM case by scanning for and clearing errors before it is onlined as RAM. The userspace tooling for that can be in place before v5.1-final. There's also runtime notifications of errors via acpi_nfit_uc_error_notify() from background scrubbers on the DIMM devices. With that mechanism the kernel could proactively clear newly discovered poison in the volatile case, but that would be additional development more suitable for v5.2. I understand the concern, and the need to highlight this issue by tapping the brakes on feature development, but I don't see PMEM as RAM making the situation worse when the exposure is also there via DAX in the PMEM case. Volatile-RAM is arguably a safer use case since it's possible to repair pages where the persistent case needs active application coordination" * tag 'devdax-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: device-dax: "Hotplug" persistent memory for use like normal RAM mm/resource: Let walk_system_ram_range() search child resources mm/memory-hotplug: Allow memory resources to be children mm/resource: Move HMM pr_debug() deeper into resource code mm/resource: Return real error codes from walk failures device-dax: Add a 'modalias' attribute to DAX 'bus' devices device-dax: Add a 'target_node' attribute device-dax: Auto-bind device after successful new_id acpi/nfit, device-dax: Identify differentiated memory with a unique numa-node device-dax: Add /sys/class/dax backwards compatibility device-dax: Add support for a dax override driver device-dax: Move resource pinning+mapping into the common driver device-dax: Introduce bus + driver model device-dax: Start defining a dax bus model device-dax: Remove multi-resource infrastructure device-dax: Kill dax_region base device-dax: Kill dax_region ida
371 lines
8.4 KiB
C
371 lines
8.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#define pr_fmt(fmt) "papr-scm: " fmt
|
|
|
|
#include <linux/of.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/ioport.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/ndctl.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/libnvdimm.h>
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <asm/plpar_wrappers.h>
|
|
|
|
#define BIND_ANY_ADDR (~0ul)
|
|
|
|
#define PAPR_SCM_DIMM_CMD_MASK \
|
|
((1ul << ND_CMD_GET_CONFIG_SIZE) | \
|
|
(1ul << ND_CMD_GET_CONFIG_DATA) | \
|
|
(1ul << ND_CMD_SET_CONFIG_DATA))
|
|
|
|
struct papr_scm_priv {
|
|
struct platform_device *pdev;
|
|
struct device_node *dn;
|
|
uint32_t drc_index;
|
|
uint64_t blocks;
|
|
uint64_t block_size;
|
|
int metadata_size;
|
|
|
|
uint64_t bound_addr;
|
|
|
|
struct nvdimm_bus_descriptor bus_desc;
|
|
struct nvdimm_bus *bus;
|
|
struct nvdimm *nvdimm;
|
|
struct resource res;
|
|
struct nd_region *region;
|
|
struct nd_interleave_set nd_set;
|
|
};
|
|
|
|
static int drc_pmem_bind(struct papr_scm_priv *p)
|
|
{
|
|
unsigned long ret[PLPAR_HCALL_BUFSIZE];
|
|
uint64_t rc, token;
|
|
uint64_t saved = 0;
|
|
|
|
/*
|
|
* When the hypervisor cannot map all the requested memory in a single
|
|
* hcall it returns H_BUSY and we call again with the token until
|
|
* we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
|
|
* leave the system in an undefined state, so we wait.
|
|
*/
|
|
token = 0;
|
|
|
|
do {
|
|
rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
|
|
p->blocks, BIND_ANY_ADDR, token);
|
|
token = ret[0];
|
|
if (!saved)
|
|
saved = ret[1];
|
|
cond_resched();
|
|
} while (rc == H_BUSY);
|
|
|
|
if (rc) {
|
|
dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
|
|
return -ENXIO;
|
|
}
|
|
|
|
p->bound_addr = saved;
|
|
|
|
dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int drc_pmem_unbind(struct papr_scm_priv *p)
|
|
{
|
|
unsigned long ret[PLPAR_HCALL_BUFSIZE];
|
|
uint64_t rc, token;
|
|
|
|
token = 0;
|
|
|
|
/* NB: unbind has the same retry requirements mentioned above */
|
|
do {
|
|
rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
|
|
p->bound_addr, p->blocks, token);
|
|
token = ret[0];
|
|
cond_resched();
|
|
} while (rc == H_BUSY);
|
|
|
|
if (rc)
|
|
dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
|
|
|
|
return !!rc;
|
|
}
|
|
|
|
static int papr_scm_meta_get(struct papr_scm_priv *p,
|
|
struct nd_cmd_get_config_data_hdr *hdr)
|
|
{
|
|
unsigned long data[PLPAR_HCALL_BUFSIZE];
|
|
int64_t ret;
|
|
|
|
if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
|
|
return -EINVAL;
|
|
|
|
ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
|
|
hdr->in_offset, 1);
|
|
|
|
if (ret == H_PARAMETER) /* bad DRC index */
|
|
return -ENODEV;
|
|
if (ret)
|
|
return -EINVAL; /* other invalid parameter */
|
|
|
|
hdr->out_buf[0] = data[0] & 0xff;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int papr_scm_meta_set(struct papr_scm_priv *p,
|
|
struct nd_cmd_set_config_hdr *hdr)
|
|
{
|
|
int64_t ret;
|
|
|
|
if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
|
|
return -EINVAL;
|
|
|
|
ret = plpar_hcall_norets(H_SCM_WRITE_METADATA,
|
|
p->drc_index, hdr->in_offset, hdr->in_buf[0], 1);
|
|
|
|
if (ret == H_PARAMETER) /* bad DRC index */
|
|
return -ENODEV;
|
|
if (ret)
|
|
return -EINVAL; /* other invalid parameter */
|
|
|
|
return 0;
|
|
}
|
|
|
|
int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
|
|
{
|
|
struct nd_cmd_get_config_size *get_size_hdr;
|
|
struct papr_scm_priv *p;
|
|
|
|
/* Only dimm-specific calls are supported atm */
|
|
if (!nvdimm)
|
|
return -EINVAL;
|
|
|
|
p = nvdimm_provider_data(nvdimm);
|
|
|
|
switch (cmd) {
|
|
case ND_CMD_GET_CONFIG_SIZE:
|
|
get_size_hdr = buf;
|
|
|
|
get_size_hdr->status = 0;
|
|
get_size_hdr->max_xfer = 1;
|
|
get_size_hdr->config_size = p->metadata_size;
|
|
*cmd_rc = 0;
|
|
break;
|
|
|
|
case ND_CMD_GET_CONFIG_DATA:
|
|
*cmd_rc = papr_scm_meta_get(p, buf);
|
|
break;
|
|
|
|
case ND_CMD_SET_CONFIG_DATA:
|
|
*cmd_rc = papr_scm_meta_set(p, buf);
|
|
break;
|
|
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct attribute_group *region_attr_groups[] = {
|
|
&nd_region_attribute_group,
|
|
&nd_device_attribute_group,
|
|
&nd_mapping_attribute_group,
|
|
&nd_numa_attribute_group,
|
|
NULL,
|
|
};
|
|
|
|
static const struct attribute_group *bus_attr_groups[] = {
|
|
&nvdimm_bus_attribute_group,
|
|
NULL,
|
|
};
|
|
|
|
static const struct attribute_group *papr_scm_dimm_groups[] = {
|
|
&nvdimm_attribute_group,
|
|
&nd_device_attribute_group,
|
|
NULL,
|
|
};
|
|
|
|
static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
|
|
{
|
|
struct device *dev = &p->pdev->dev;
|
|
struct nd_mapping_desc mapping;
|
|
struct nd_region_desc ndr_desc;
|
|
unsigned long dimm_flags;
|
|
|
|
p->bus_desc.ndctl = papr_scm_ndctl;
|
|
p->bus_desc.module = THIS_MODULE;
|
|
p->bus_desc.of_node = p->pdev->dev.of_node;
|
|
p->bus_desc.attr_groups = bus_attr_groups;
|
|
p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
|
|
|
|
if (!p->bus_desc.provider_name)
|
|
return -ENOMEM;
|
|
|
|
p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
|
|
if (!p->bus) {
|
|
dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
|
|
return -ENXIO;
|
|
}
|
|
|
|
dimm_flags = 0;
|
|
set_bit(NDD_ALIASING, &dimm_flags);
|
|
|
|
p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
|
|
dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
|
|
if (!p->nvdimm) {
|
|
dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
|
|
goto err;
|
|
}
|
|
|
|
if (nvdimm_bus_check_dimm_count(p->bus, 1))
|
|
goto err;
|
|
|
|
/* now add the region */
|
|
|
|
memset(&mapping, 0, sizeof(mapping));
|
|
mapping.nvdimm = p->nvdimm;
|
|
mapping.start = 0;
|
|
mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
|
|
|
|
memset(&ndr_desc, 0, sizeof(ndr_desc));
|
|
ndr_desc.attr_groups = region_attr_groups;
|
|
ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
|
|
ndr_desc.target_node = ndr_desc.numa_node;
|
|
ndr_desc.res = &p->res;
|
|
ndr_desc.of_node = p->dn;
|
|
ndr_desc.provider_data = p;
|
|
ndr_desc.mapping = &mapping;
|
|
ndr_desc.num_mappings = 1;
|
|
ndr_desc.nd_set = &p->nd_set;
|
|
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
|
|
|
|
p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
|
|
if (!p->region) {
|
|
dev_err(dev, "Error registering region %pR from %pOF\n",
|
|
ndr_desc.res, p->dn);
|
|
goto err;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err: nvdimm_bus_unregister(p->bus);
|
|
kfree(p->bus_desc.provider_name);
|
|
return -ENXIO;
|
|
}
|
|
|
|
static int papr_scm_probe(struct platform_device *pdev)
|
|
{
|
|
struct device_node *dn = pdev->dev.of_node;
|
|
u32 drc_index, metadata_size;
|
|
u64 blocks, block_size;
|
|
struct papr_scm_priv *p;
|
|
const char *uuid_str;
|
|
u64 uuid[2];
|
|
int rc;
|
|
|
|
/* check we have all the required DT properties */
|
|
if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
|
|
dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
|
|
return -ENODEV;
|
|
}
|
|
|
|
if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
|
|
dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
|
|
return -ENODEV;
|
|
}
|
|
|
|
if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
|
|
dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
|
|
return -ENODEV;
|
|
}
|
|
|
|
if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
|
|
dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
|
|
return -ENODEV;
|
|
}
|
|
|
|
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
|
if (!p)
|
|
return -ENOMEM;
|
|
|
|
/* optional DT properties */
|
|
of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
|
|
|
|
p->dn = dn;
|
|
p->drc_index = drc_index;
|
|
p->block_size = block_size;
|
|
p->blocks = blocks;
|
|
|
|
/* We just need to ensure that set cookies are unique across */
|
|
uuid_parse(uuid_str, (uuid_t *) uuid);
|
|
p->nd_set.cookie1 = uuid[0];
|
|
p->nd_set.cookie2 = uuid[1];
|
|
|
|
/* might be zero */
|
|
p->metadata_size = metadata_size;
|
|
p->pdev = pdev;
|
|
|
|
/* request the hypervisor to bind this region to somewhere in memory */
|
|
rc = drc_pmem_bind(p);
|
|
if (rc)
|
|
goto err;
|
|
|
|
/* setup the resource for the newly bound range */
|
|
p->res.start = p->bound_addr;
|
|
p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
|
|
p->res.name = pdev->name;
|
|
p->res.flags = IORESOURCE_MEM;
|
|
|
|
rc = papr_scm_nvdimm_init(p);
|
|
if (rc)
|
|
goto err2;
|
|
|
|
platform_set_drvdata(pdev, p);
|
|
|
|
return 0;
|
|
|
|
err2: drc_pmem_unbind(p);
|
|
err: kfree(p);
|
|
return rc;
|
|
}
|
|
|
|
static int papr_scm_remove(struct platform_device *pdev)
|
|
{
|
|
struct papr_scm_priv *p = platform_get_drvdata(pdev);
|
|
|
|
nvdimm_bus_unregister(p->bus);
|
|
drc_pmem_unbind(p);
|
|
kfree(p);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct of_device_id papr_scm_match[] = {
|
|
{ .compatible = "ibm,pmemory" },
|
|
{ },
|
|
};
|
|
|
|
static struct platform_driver papr_scm_driver = {
|
|
.probe = papr_scm_probe,
|
|
.remove = papr_scm_remove,
|
|
.driver = {
|
|
.name = "papr_scm",
|
|
.owner = THIS_MODULE,
|
|
.of_match_table = papr_scm_match,
|
|
},
|
|
};
|
|
|
|
module_platform_driver(papr_scm_driver);
|
|
MODULE_DEVICE_TABLE(of, papr_scm_match);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("IBM Corporation");
|