mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-29 22:36:48 +07:00
53ef7d0e20
* Region media error reporting: A libnvdimm region device is the parent to one or more namespaces. To date, media errors have been reported via the "badblocks" attribute attached to pmem block devices for namespaces in "raw" or "memory" mode. Given that namespaces can be in "device-dax" or "btt-sector" mode this new interface reports media errors generically, i.e. independent of namespace modes or state. This subsequently allows userspace tooling to craft "ACPI 6.1 Section 9.20.7.6 Function Index 4 - Clear Uncorrectable Error" requests and submit them via the ioctl path for NVDIMM root bus devices. * Introduce 'struct dax_device' and 'struct dax_operations': Prompted by a request from Linus and feedback from Christoph this allows for dax capable drivers to publish their own custom dax operations. This fixes the broken assumption that all dax operations are related to a persistent memory device, and makes it easier for other architectures and platforms to add customized persistent memory support. * 'libnvdimm' core updates: A new "deep_flush" sysfs attribute is available for storage appliance applications to manually trigger memory controllers to drain write-pending buffers that would otherwise be flushed automatically by the platform ADR (asynchronous-DRAM-refresh) mechanism at a power loss event. Support for "locked" DIMMs is included to prevent namespaces from surfacing when the namespace label data area is locked. Finally, fixes for various reported deadlocks and crashes, also tagged for -stable. * ACPI / nfit driver updates: General updates of the nfit driver to add DSM command overrides, ACPI 6.1 health state flags support, DSM payload debug available by default, and various fixes. Acknowledgements that came after the branch was pushed: commmit565851c972
"device-dax: fix sysfs attribute deadlock" Tested-by: Yi Zhang <yizhan@redhat.com> commit23f4984483
"libnvdimm: rework region badblocks clearing" Tested-by: Toshi Kani <toshi.kani@hpe.com> -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJZDONJAAoJEB7SkWpmfYgC3SsP/2KrLvTUcz646ViuPOgZ2cC4 W6wAx6cvDSt+H52kLnFEsYoFt7WAj20ggPirb/Bc5jkGlvwE0lT9Xtmso9GpVkYT J9ZJ9pP/4YaAD3II1gmTwaUjYi0FxoOdx3Eb92yuWkO/8ylz4b2Nu3cBpYwyziGQ nIfEVwDXRLE86u6x0bWuf6TlVuvsbdiAI55CDqDMVQC6xIOLbSez7b8QIHlpiKEb Mw+xqdQva0esoreZEOXEhWNO+qtfILx8/ceBEGTNMp4e/JjZ2FbrSNplM+9bH5k7 ywqP8lW+mBEw0fmBBkYoVG/xyesiiBb55JLnbi8Ew+7IUxw8a3iV7wftRi62lHcK zAjsHe4L+MansgtZsCL8wluvIPaktAdtB4xr7l9VNLKRYRUG73jEWU0gcUNryHIL BkQJ52pUS1PkClyAsWbBBHl1I/CvzVPd21VW0YELmLR4OywKy1c+eKw2bcYgjrb4 59HZSv6S6EoKaQC+2qvVNpePil7cdfg5V2ubH/ki9HoYVyoxDptEWHnvf0NNatIH Y7mNcOPvhOksJmnKSyHbDjtRur7WoHIlC9D7UjEFkSBWsKPjxJHoidN4SnCMRtjQ WKQU0seoaKj04b68Bs/Qm9NozVgnsPFIUDZeLMikLFX2Jt7YSPu+Jmi2s4re6WLh TmJQ3Ly9t3o3/weHSzmn =Ox0s -----END PGP SIGNATURE----- Merge tag 'libnvdimm-for-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull libnvdimm updates from Dan Williams: "The bulk of this has been in multiple -next releases. There were a few late breaking fixes and small features that got added in the last couple days, but the whole set has received a build success notification from the kbuild robot. Change summary: - Region media error reporting: A libnvdimm region device is the parent to one or more namespaces. To date, media errors have been reported via the "badblocks" attribute attached to pmem block devices for namespaces in "raw" or "memory" mode. Given that namespaces can be in "device-dax" or "btt-sector" mode this new interface reports media errors generically, i.e. independent of namespace modes or state. This subsequently allows userspace tooling to craft "ACPI 6.1 Section 9.20.7.6 Function Index 4 - Clear Uncorrectable Error" requests and submit them via the ioctl path for NVDIMM root bus devices. - Introduce 'struct dax_device' and 'struct dax_operations': Prompted by a request from Linus and feedback from Christoph this allows for dax capable drivers to publish their own custom dax operations. This fixes the broken assumption that all dax operations are related to a persistent memory device, and makes it easier for other architectures and platforms to add customized persistent memory support. - 'libnvdimm' core updates: A new "deep_flush" sysfs attribute is available for storage appliance applications to manually trigger memory controllers to drain write-pending buffers that would otherwise be flushed automatically by the platform ADR (asynchronous-DRAM-refresh) mechanism at a power loss event. Support for "locked" DIMMs is included to prevent namespaces from surfacing when the namespace label data area is locked. Finally, fixes for various reported deadlocks and crashes, also tagged for -stable. - ACPI / nfit driver updates: General updates of the nfit driver to add DSM command overrides, ACPI 6.1 health state flags support, DSM payload debug available by default, and various fixes. Acknowledgements that came after the branch was pushed: - commmit565851c972
"device-dax: fix sysfs attribute deadlock": Tested-by: Yi Zhang <yizhan@redhat.com> - commit23f4984483
"libnvdimm: rework region badblocks clearing" Tested-by: Toshi Kani <toshi.kani@hpe.com>" * tag 'libnvdimm-for-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (52 commits) libnvdimm, pfn: fix 'npfns' vs section alignment libnvdimm: handle locked label storage areas libnvdimm: convert NDD_ flags to use bitops, introduce NDD_LOCKED brd: fix uninitialized use of brd->dax_dev block, dax: use correct format string in bdev_dax_supported device-dax: fix sysfs attribute deadlock libnvdimm: restore "libnvdimm: band aid btt vs clear poison locking" libnvdimm: fix nvdimm_bus_lock() vs device_lock() ordering libnvdimm: rework region badblocks clearing acpi, nfit: kill ACPI_NFIT_DEBUG libnvdimm: fix clear length of nvdimm_forget_poison() libnvdimm, pmem: fix a NULL pointer BUG in nd_pmem_notify libnvdimm, region: sysfs trigger for nvdimm_flush() libnvdimm: fix phys_addr for nvdimm_clear_poison x86, dax, pmem: remove indirection around memcpy_from_pmem() block: remove block_device_operations ->direct_access() block, dax: convert bdev_dax_supported() to dax_direct_access() filesystem-dax: convert to dax_direct_access() Revert "block: use DAX for partition table reads" ext2, ext4, xfs: retrieve dax_device for iomap operations ...
471 lines
11 KiB
C
471 lines
11 KiB
C
/*
|
|
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
|
|
*
|
|
* This file is released under the GPL.
|
|
*/
|
|
|
|
#include "dm.h"
|
|
#include <linux/device-mapper.h>
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/dax.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/log2.h>
|
|
|
|
#define DM_MSG_PREFIX "striped"
|
|
#define DM_IO_ERROR_THRESHOLD 15
|
|
|
|
struct stripe {
|
|
struct dm_dev *dev;
|
|
sector_t physical_start;
|
|
|
|
atomic_t error_count;
|
|
};
|
|
|
|
struct stripe_c {
|
|
uint32_t stripes;
|
|
int stripes_shift;
|
|
|
|
/* The size of this target / num. stripes */
|
|
sector_t stripe_width;
|
|
|
|
uint32_t chunk_size;
|
|
int chunk_size_shift;
|
|
|
|
/* Needed for handling events */
|
|
struct dm_target *ti;
|
|
|
|
/* Work struct used for triggering events*/
|
|
struct work_struct trigger_event;
|
|
|
|
struct stripe stripe[0];
|
|
};
|
|
|
|
/*
|
|
* An event is triggered whenever a drive
|
|
* drops out of a stripe volume.
|
|
*/
|
|
static void trigger_event(struct work_struct *work)
|
|
{
|
|
struct stripe_c *sc = container_of(work, struct stripe_c,
|
|
trigger_event);
|
|
dm_table_event(sc->ti->table);
|
|
}
|
|
|
|
static inline struct stripe_c *alloc_context(unsigned int stripes)
|
|
{
|
|
size_t len;
|
|
|
|
if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
|
|
stripes))
|
|
return NULL;
|
|
|
|
len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
|
|
|
|
return kmalloc(len, GFP_KERNEL);
|
|
}
|
|
|
|
/*
|
|
* Parse a single <dev> <sector> pair
|
|
*/
|
|
static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
|
|
unsigned int stripe, char **argv)
|
|
{
|
|
unsigned long long start;
|
|
char dummy;
|
|
int ret;
|
|
|
|
if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
|
|
return -EINVAL;
|
|
|
|
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
|
|
&sc->stripe[stripe].dev);
|
|
if (ret)
|
|
return ret;
|
|
|
|
sc->stripe[stripe].physical_start = start;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Construct a striped mapping.
|
|
* <number of stripes> <chunk size> [<dev_path> <offset>]+
|
|
*/
|
|
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
{
|
|
struct stripe_c *sc;
|
|
sector_t width, tmp_len;
|
|
uint32_t stripes;
|
|
uint32_t chunk_size;
|
|
int r;
|
|
unsigned int i;
|
|
|
|
if (argc < 2) {
|
|
ti->error = "Not enough arguments";
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (kstrtouint(argv[0], 10, &stripes) || !stripes) {
|
|
ti->error = "Invalid stripe count";
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) {
|
|
ti->error = "Invalid chunk_size";
|
|
return -EINVAL;
|
|
}
|
|
|
|
width = ti->len;
|
|
if (sector_div(width, stripes)) {
|
|
ti->error = "Target length not divisible by "
|
|
"number of stripes";
|
|
return -EINVAL;
|
|
}
|
|
|
|
tmp_len = width;
|
|
if (sector_div(tmp_len, chunk_size)) {
|
|
ti->error = "Target length not divisible by "
|
|
"chunk size";
|
|
return -EINVAL;
|
|
}
|
|
|
|
/*
|
|
* Do we have enough arguments for that many stripes ?
|
|
*/
|
|
if (argc != (2 + 2 * stripes)) {
|
|
ti->error = "Not enough destinations "
|
|
"specified";
|
|
return -EINVAL;
|
|
}
|
|
|
|
sc = alloc_context(stripes);
|
|
if (!sc) {
|
|
ti->error = "Memory allocation for striped context "
|
|
"failed";
|
|
return -ENOMEM;
|
|
}
|
|
|
|
INIT_WORK(&sc->trigger_event, trigger_event);
|
|
|
|
/* Set pointer to dm target; used in trigger_event */
|
|
sc->ti = ti;
|
|
sc->stripes = stripes;
|
|
sc->stripe_width = width;
|
|
|
|
if (stripes & (stripes - 1))
|
|
sc->stripes_shift = -1;
|
|
else
|
|
sc->stripes_shift = __ffs(stripes);
|
|
|
|
r = dm_set_target_max_io_len(ti, chunk_size);
|
|
if (r) {
|
|
kfree(sc);
|
|
return r;
|
|
}
|
|
|
|
ti->num_flush_bios = stripes;
|
|
ti->num_discard_bios = stripes;
|
|
ti->num_write_same_bios = stripes;
|
|
ti->num_write_zeroes_bios = stripes;
|
|
|
|
sc->chunk_size = chunk_size;
|
|
if (chunk_size & (chunk_size - 1))
|
|
sc->chunk_size_shift = -1;
|
|
else
|
|
sc->chunk_size_shift = __ffs(chunk_size);
|
|
|
|
/*
|
|
* Get the stripe destinations.
|
|
*/
|
|
for (i = 0; i < stripes; i++) {
|
|
argv += 2;
|
|
|
|
r = get_stripe(ti, sc, i, argv);
|
|
if (r < 0) {
|
|
ti->error = "Couldn't parse stripe destination";
|
|
while (i--)
|
|
dm_put_device(ti, sc->stripe[i].dev);
|
|
kfree(sc);
|
|
return r;
|
|
}
|
|
atomic_set(&(sc->stripe[i].error_count), 0);
|
|
}
|
|
|
|
ti->private = sc;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void stripe_dtr(struct dm_target *ti)
|
|
{
|
|
unsigned int i;
|
|
struct stripe_c *sc = (struct stripe_c *) ti->private;
|
|
|
|
for (i = 0; i < sc->stripes; i++)
|
|
dm_put_device(ti, sc->stripe[i].dev);
|
|
|
|
flush_work(&sc->trigger_event);
|
|
kfree(sc);
|
|
}
|
|
|
|
static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
|
|
uint32_t *stripe, sector_t *result)
|
|
{
|
|
sector_t chunk = dm_target_offset(sc->ti, sector);
|
|
sector_t chunk_offset;
|
|
|
|
if (sc->chunk_size_shift < 0)
|
|
chunk_offset = sector_div(chunk, sc->chunk_size);
|
|
else {
|
|
chunk_offset = chunk & (sc->chunk_size - 1);
|
|
chunk >>= sc->chunk_size_shift;
|
|
}
|
|
|
|
if (sc->stripes_shift < 0)
|
|
*stripe = sector_div(chunk, sc->stripes);
|
|
else {
|
|
*stripe = chunk & (sc->stripes - 1);
|
|
chunk >>= sc->stripes_shift;
|
|
}
|
|
|
|
if (sc->chunk_size_shift < 0)
|
|
chunk *= sc->chunk_size;
|
|
else
|
|
chunk <<= sc->chunk_size_shift;
|
|
|
|
*result = chunk + chunk_offset;
|
|
}
|
|
|
|
static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
|
|
uint32_t target_stripe, sector_t *result)
|
|
{
|
|
uint32_t stripe;
|
|
|
|
stripe_map_sector(sc, sector, &stripe, result);
|
|
if (stripe == target_stripe)
|
|
return;
|
|
|
|
/* round down */
|
|
sector = *result;
|
|
if (sc->chunk_size_shift < 0)
|
|
*result -= sector_div(sector, sc->chunk_size);
|
|
else
|
|
*result = sector & ~(sector_t)(sc->chunk_size - 1);
|
|
|
|
if (target_stripe < stripe)
|
|
*result += sc->chunk_size; /* next chunk */
|
|
}
|
|
|
|
static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
|
|
uint32_t target_stripe)
|
|
{
|
|
sector_t begin, end;
|
|
|
|
stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
|
|
target_stripe, &begin);
|
|
stripe_map_range_sector(sc, bio_end_sector(bio),
|
|
target_stripe, &end);
|
|
if (begin < end) {
|
|
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
|
|
bio->bi_iter.bi_sector = begin +
|
|
sc->stripe[target_stripe].physical_start;
|
|
bio->bi_iter.bi_size = to_bytes(end - begin);
|
|
return DM_MAPIO_REMAPPED;
|
|
} else {
|
|
/* The range doesn't map to the target stripe */
|
|
bio_endio(bio);
|
|
return DM_MAPIO_SUBMITTED;
|
|
}
|
|
}
|
|
|
|
static int stripe_map(struct dm_target *ti, struct bio *bio)
|
|
{
|
|
struct stripe_c *sc = ti->private;
|
|
uint32_t stripe;
|
|
unsigned target_bio_nr;
|
|
|
|
if (bio->bi_opf & REQ_PREFLUSH) {
|
|
target_bio_nr = dm_bio_get_target_bio_nr(bio);
|
|
BUG_ON(target_bio_nr >= sc->stripes);
|
|
bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
|
|
return DM_MAPIO_REMAPPED;
|
|
}
|
|
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
|
|
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
|
|
unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
|
|
target_bio_nr = dm_bio_get_target_bio_nr(bio);
|
|
BUG_ON(target_bio_nr >= sc->stripes);
|
|
return stripe_map_range(sc, bio, target_bio_nr);
|
|
}
|
|
|
|
stripe_map_sector(sc, bio->bi_iter.bi_sector,
|
|
&stripe, &bio->bi_iter.bi_sector);
|
|
|
|
bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
|
|
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
|
|
|
|
return DM_MAPIO_REMAPPED;
|
|
}
|
|
|
|
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
|
long nr_pages, void **kaddr, pfn_t *pfn)
|
|
{
|
|
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
|
|
struct stripe_c *sc = ti->private;
|
|
struct dax_device *dax_dev;
|
|
struct block_device *bdev;
|
|
uint32_t stripe;
|
|
long ret;
|
|
|
|
stripe_map_sector(sc, sector, &stripe, &dev_sector);
|
|
dev_sector += sc->stripe[stripe].physical_start;
|
|
dax_dev = sc->stripe[stripe].dev->dax_dev;
|
|
bdev = sc->stripe[stripe].dev->bdev;
|
|
|
|
ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
|
|
if (ret)
|
|
return ret;
|
|
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
|
|
}
|
|
|
|
/*
|
|
* Stripe status:
|
|
*
|
|
* INFO
|
|
* #stripes [stripe_name <stripe_name>] [group word count]
|
|
* [error count 'A|D' <error count 'A|D'>]
|
|
*
|
|
* TABLE
|
|
* #stripes [stripe chunk size]
|
|
* [stripe_name physical_start <stripe_name physical_start>]
|
|
*
|
|
*/
|
|
|
|
static void stripe_status(struct dm_target *ti, status_type_t type,
|
|
unsigned status_flags, char *result, unsigned maxlen)
|
|
{
|
|
struct stripe_c *sc = (struct stripe_c *) ti->private;
|
|
char buffer[sc->stripes + 1];
|
|
unsigned int sz = 0;
|
|
unsigned int i;
|
|
|
|
switch (type) {
|
|
case STATUSTYPE_INFO:
|
|
DMEMIT("%d ", sc->stripes);
|
|
for (i = 0; i < sc->stripes; i++) {
|
|
DMEMIT("%s ", sc->stripe[i].dev->name);
|
|
buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
|
|
'D' : 'A';
|
|
}
|
|
buffer[i] = '\0';
|
|
DMEMIT("1 %s", buffer);
|
|
break;
|
|
|
|
case STATUSTYPE_TABLE:
|
|
DMEMIT("%d %llu", sc->stripes,
|
|
(unsigned long long)sc->chunk_size);
|
|
for (i = 0; i < sc->stripes; i++)
|
|
DMEMIT(" %s %llu", sc->stripe[i].dev->name,
|
|
(unsigned long long)sc->stripe[i].physical_start);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
|
|
{
|
|
unsigned i;
|
|
char major_minor[16];
|
|
struct stripe_c *sc = ti->private;
|
|
|
|
if (!error)
|
|
return 0; /* I/O complete */
|
|
|
|
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
|
|
return error;
|
|
|
|
if (error == -EOPNOTSUPP)
|
|
return error;
|
|
|
|
memset(major_minor, 0, sizeof(major_minor));
|
|
sprintf(major_minor, "%d:%d",
|
|
MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
|
|
MINOR(disk_devt(bio->bi_bdev->bd_disk)));
|
|
|
|
/*
|
|
* Test to see which stripe drive triggered the event
|
|
* and increment error count for all stripes on that device.
|
|
* If the error count for a given device exceeds the threshold
|
|
* value we will no longer trigger any further events.
|
|
*/
|
|
for (i = 0; i < sc->stripes; i++)
|
|
if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
|
|
atomic_inc(&(sc->stripe[i].error_count));
|
|
if (atomic_read(&(sc->stripe[i].error_count)) <
|
|
DM_IO_ERROR_THRESHOLD)
|
|
schedule_work(&sc->trigger_event);
|
|
}
|
|
|
|
return error;
|
|
}
|
|
|
|
static int stripe_iterate_devices(struct dm_target *ti,
|
|
iterate_devices_callout_fn fn, void *data)
|
|
{
|
|
struct stripe_c *sc = ti->private;
|
|
int ret = 0;
|
|
unsigned i = 0;
|
|
|
|
do {
|
|
ret = fn(ti, sc->stripe[i].dev,
|
|
sc->stripe[i].physical_start,
|
|
sc->stripe_width, data);
|
|
} while (!ret && ++i < sc->stripes);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void stripe_io_hints(struct dm_target *ti,
|
|
struct queue_limits *limits)
|
|
{
|
|
struct stripe_c *sc = ti->private;
|
|
unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
|
|
|
|
blk_limits_io_min(limits, chunk_size);
|
|
blk_limits_io_opt(limits, chunk_size * sc->stripes);
|
|
}
|
|
|
|
static struct target_type stripe_target = {
|
|
.name = "striped",
|
|
.version = {1, 6, 0},
|
|
.features = DM_TARGET_PASSES_INTEGRITY,
|
|
.module = THIS_MODULE,
|
|
.ctr = stripe_ctr,
|
|
.dtr = stripe_dtr,
|
|
.map = stripe_map,
|
|
.end_io = stripe_end_io,
|
|
.status = stripe_status,
|
|
.iterate_devices = stripe_iterate_devices,
|
|
.io_hints = stripe_io_hints,
|
|
.direct_access = stripe_dax_direct_access,
|
|
};
|
|
|
|
int __init dm_stripe_init(void)
|
|
{
|
|
int r;
|
|
|
|
r = dm_register_target(&stripe_target);
|
|
if (r < 0)
|
|
DMWARN("target registration failed");
|
|
|
|
return r;
|
|
}
|
|
|
|
void dm_stripe_exit(void)
|
|
{
|
|
dm_unregister_target(&stripe_target);
|
|
}
|