mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 20:18:24 +07:00
e9c7469bb4
This patch converts md to support REQ_FLUSH/FUA instead of now deprecated REQ_HARDBARRIER. In the core part (md.c), the following changes are notable. * Unlike REQ_HARDBARRIER, REQ_FLUSH/FUA don't interfere with processing of other requests and thus there is no reason to mark the queue congested while FLUSH/FUA is in progress. * REQ_FLUSH/FUA failures are final and its users don't need retry logic. Retry logic is removed. * Preflush needs to be issued to all member devices but FUA writes can be handled the same way as other writes - their processing can be deferred to request_queue of member devices. md_barrier_request() is renamed to md_flush_request() and simplified accordingly. For linear, raid0 and multipath, the core changes are enough. raid1, 5 and 10 need the following conversions. * raid1: Handling of FLUSH/FUA bio's can simply be deferred to request_queues of member devices. Barrier related logic removed. * raid5: Queue draining logic dropped. FUA bit is propagated through biodrain and stripe resconstruction such that all the updated parts of the stripe are written out with FUA writes if any of the dirtying writes was FUA. preread_active_stripes handling in make_request() is updated as suggested by Neil Brown. * raid10: FUA bit needs to be propagated to write clones. linear, raid0, 1, 5 and 10 tested. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Neil Brown <neilb@suse.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
389 lines
9.3 KiB
C
389 lines
9.3 KiB
C
/*
|
|
linear.c : Multiple Devices driver for Linux
|
|
Copyright (C) 1994-96 Marc ZYNGIER
|
|
<zyngier@ufr-info-p7.ibp.fr> or
|
|
<maz@gloups.fdn.fr>
|
|
|
|
Linear mode management functions.
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 2, or (at your option)
|
|
any later version.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
(for example /usr/src/linux/COPYING); if not, write to the Free
|
|
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
*/
|
|
|
|
#include <linux/blkdev.h>
|
|
#include <linux/raid/md_u.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/slab.h>
|
|
#include "md.h"
|
|
#include "linear.h"
|
|
|
|
/*
|
|
* find which device holds a particular offset
|
|
*/
|
|
static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
|
|
{
|
|
int lo, mid, hi;
|
|
linear_conf_t *conf;
|
|
|
|
lo = 0;
|
|
hi = mddev->raid_disks - 1;
|
|
conf = rcu_dereference(mddev->private);
|
|
|
|
/*
|
|
* Binary Search
|
|
*/
|
|
|
|
while (hi > lo) {
|
|
|
|
mid = (hi + lo) / 2;
|
|
if (sector < conf->disks[mid].end_sector)
|
|
hi = mid;
|
|
else
|
|
lo = mid + 1;
|
|
}
|
|
|
|
return conf->disks + lo;
|
|
}
|
|
|
|
/**
|
|
* linear_mergeable_bvec -- tell bio layer if two requests can be merged
|
|
* @q: request queue
|
|
* @bvm: properties of new bio
|
|
* @biovec: the request that could be merged to it.
|
|
*
|
|
* Return amount of bytes we can take at this offset
|
|
*/
|
|
static int linear_mergeable_bvec(struct request_queue *q,
|
|
struct bvec_merge_data *bvm,
|
|
struct bio_vec *biovec)
|
|
{
|
|
mddev_t *mddev = q->queuedata;
|
|
dev_info_t *dev0;
|
|
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
|
|
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
|
|
|
|
rcu_read_lock();
|
|
dev0 = which_dev(mddev, sector);
|
|
maxsectors = dev0->end_sector - sector;
|
|
rcu_read_unlock();
|
|
|
|
if (maxsectors < bio_sectors)
|
|
maxsectors = 0;
|
|
else
|
|
maxsectors -= bio_sectors;
|
|
|
|
if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
|
|
return biovec->bv_len;
|
|
/* The bytes available at this offset could be really big,
|
|
* so we cap at 2^31 to avoid overflow */
|
|
if (maxsectors > (1 << (31-9)))
|
|
return 1<<31;
|
|
return maxsectors << 9;
|
|
}
|
|
|
|
static void linear_unplug(struct request_queue *q)
|
|
{
|
|
mddev_t *mddev = q->queuedata;
|
|
linear_conf_t *conf;
|
|
int i;
|
|
|
|
rcu_read_lock();
|
|
conf = rcu_dereference(mddev->private);
|
|
|
|
for (i=0; i < mddev->raid_disks; i++) {
|
|
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
|
|
blk_unplug(r_queue);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
static int linear_congested(void *data, int bits)
|
|
{
|
|
mddev_t *mddev = data;
|
|
linear_conf_t *conf;
|
|
int i, ret = 0;
|
|
|
|
if (mddev_congested(mddev, bits))
|
|
return 1;
|
|
|
|
rcu_read_lock();
|
|
conf = rcu_dereference(mddev->private);
|
|
|
|
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
|
|
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
|
|
ret |= bdi_congested(&q->backing_dev_info, bits);
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
return ret;
|
|
}
|
|
|
|
static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
|
{
|
|
linear_conf_t *conf;
|
|
sector_t array_sectors;
|
|
|
|
rcu_read_lock();
|
|
conf = rcu_dereference(mddev->private);
|
|
WARN_ONCE(sectors || raid_disks,
|
|
"%s does not support generic reshape\n", __func__);
|
|
array_sectors = conf->array_sectors;
|
|
rcu_read_unlock();
|
|
|
|
return array_sectors;
|
|
}
|
|
|
|
static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
|
|
{
|
|
linear_conf_t *conf;
|
|
mdk_rdev_t *rdev;
|
|
int i, cnt;
|
|
|
|
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
|
|
GFP_KERNEL);
|
|
if (!conf)
|
|
return NULL;
|
|
|
|
cnt = 0;
|
|
conf->array_sectors = 0;
|
|
|
|
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
|
int j = rdev->raid_disk;
|
|
dev_info_t *disk = conf->disks + j;
|
|
sector_t sectors;
|
|
|
|
if (j < 0 || j >= raid_disks || disk->rdev) {
|
|
printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
|
|
mdname(mddev));
|
|
goto out;
|
|
}
|
|
|
|
disk->rdev = rdev;
|
|
if (mddev->chunk_sectors) {
|
|
sectors = rdev->sectors;
|
|
sector_div(sectors, mddev->chunk_sectors);
|
|
rdev->sectors = sectors * mddev->chunk_sectors;
|
|
}
|
|
|
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
rdev->data_offset << 9);
|
|
/* as we don't honour merge_bvec_fn, we must never risk
|
|
* violating it, so limit max_segments to 1 lying within
|
|
* a single page.
|
|
*/
|
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
|
|
blk_queue_max_segments(mddev->queue, 1);
|
|
blk_queue_segment_boundary(mddev->queue,
|
|
PAGE_CACHE_SIZE - 1);
|
|
}
|
|
|
|
conf->array_sectors += rdev->sectors;
|
|
cnt++;
|
|
|
|
}
|
|
if (cnt != raid_disks) {
|
|
printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
|
|
mdname(mddev));
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Here we calculate the device offsets.
|
|
*/
|
|
conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
|
|
|
|
for (i = 1; i < raid_disks; i++)
|
|
conf->disks[i].end_sector =
|
|
conf->disks[i-1].end_sector +
|
|
conf->disks[i].rdev->sectors;
|
|
|
|
return conf;
|
|
|
|
out:
|
|
kfree(conf);
|
|
return NULL;
|
|
}
|
|
|
|
static int linear_run (mddev_t *mddev)
|
|
{
|
|
linear_conf_t *conf;
|
|
|
|
if (md_check_no_bitmap(mddev))
|
|
return -EINVAL;
|
|
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
|
|
conf = linear_conf(mddev, mddev->raid_disks);
|
|
|
|
if (!conf)
|
|
return 1;
|
|
mddev->private = conf;
|
|
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
|
|
|
|
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
|
|
mddev->queue->unplug_fn = linear_unplug;
|
|
mddev->queue->backing_dev_info.congested_fn = linear_congested;
|
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
|
md_integrity_register(mddev);
|
|
return 0;
|
|
}
|
|
|
|
static void free_conf(struct rcu_head *head)
|
|
{
|
|
linear_conf_t *conf = container_of(head, linear_conf_t, rcu);
|
|
kfree(conf);
|
|
}
|
|
|
|
static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
|
|
{
|
|
/* Adding a drive to a linear array allows the array to grow.
|
|
* It is permitted if the new drive has a matching superblock
|
|
* already on it, with raid_disk equal to raid_disks.
|
|
* It is achieved by creating a new linear_private_data structure
|
|
* and swapping it in in-place of the current one.
|
|
* The current one is never freed until the array is stopped.
|
|
* This avoids races.
|
|
*/
|
|
linear_conf_t *newconf, *oldconf;
|
|
|
|
if (rdev->saved_raid_disk != mddev->raid_disks)
|
|
return -EINVAL;
|
|
|
|
rdev->raid_disk = rdev->saved_raid_disk;
|
|
|
|
newconf = linear_conf(mddev,mddev->raid_disks+1);
|
|
|
|
if (!newconf)
|
|
return -ENOMEM;
|
|
|
|
oldconf = rcu_dereference(mddev->private);
|
|
mddev->raid_disks++;
|
|
rcu_assign_pointer(mddev->private, newconf);
|
|
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
|
|
set_capacity(mddev->gendisk, mddev->array_sectors);
|
|
revalidate_disk(mddev->gendisk);
|
|
call_rcu(&oldconf->rcu, free_conf);
|
|
return 0;
|
|
}
|
|
|
|
static int linear_stop (mddev_t *mddev)
|
|
{
|
|
linear_conf_t *conf = mddev->private;
|
|
|
|
/*
|
|
* We do not require rcu protection here since
|
|
* we hold reconfig_mutex for both linear_add and
|
|
* linear_stop, so they cannot race.
|
|
* We should make sure any old 'conf's are properly
|
|
* freed though.
|
|
*/
|
|
rcu_barrier();
|
|
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
|
|
kfree(conf);
|
|
mddev->private = NULL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int linear_make_request (mddev_t *mddev, struct bio *bio)
|
|
{
|
|
dev_info_t *tmp_dev;
|
|
sector_t start_sector;
|
|
|
|
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
|
|
md_flush_request(mddev, bio);
|
|
return 0;
|
|
}
|
|
|
|
rcu_read_lock();
|
|
tmp_dev = which_dev(mddev, bio->bi_sector);
|
|
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
|
|
|
|
|
|
if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
|
|
|| (bio->bi_sector < start_sector))) {
|
|
char b[BDEVNAME_SIZE];
|
|
|
|
printk(KERN_ERR
|
|
"md/linear:%s: make_request: Sector %llu out of bounds on "
|
|
"dev %s: %llu sectors, offset %llu\n",
|
|
mdname(mddev),
|
|
(unsigned long long)bio->bi_sector,
|
|
bdevname(tmp_dev->rdev->bdev, b),
|
|
(unsigned long long)tmp_dev->rdev->sectors,
|
|
(unsigned long long)start_sector);
|
|
rcu_read_unlock();
|
|
bio_io_error(bio);
|
|
return 0;
|
|
}
|
|
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
|
|
tmp_dev->end_sector)) {
|
|
/* This bio crosses a device boundary, so we have to
|
|
* split it.
|
|
*/
|
|
struct bio_pair *bp;
|
|
sector_t end_sector = tmp_dev->end_sector;
|
|
|
|
rcu_read_unlock();
|
|
|
|
bp = bio_split(bio, end_sector - bio->bi_sector);
|
|
|
|
if (linear_make_request(mddev, &bp->bio1))
|
|
generic_make_request(&bp->bio1);
|
|
if (linear_make_request(mddev, &bp->bio2))
|
|
generic_make_request(&bp->bio2);
|
|
bio_pair_release(bp);
|
|
return 0;
|
|
}
|
|
|
|
bio->bi_bdev = tmp_dev->rdev->bdev;
|
|
bio->bi_sector = bio->bi_sector - start_sector
|
|
+ tmp_dev->rdev->data_offset;
|
|
rcu_read_unlock();
|
|
|
|
return 1;
|
|
}
|
|
|
|
static void linear_status (struct seq_file *seq, mddev_t *mddev)
|
|
{
|
|
|
|
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
|
|
}
|
|
|
|
|
|
static struct mdk_personality linear_personality =
|
|
{
|
|
.name = "linear",
|
|
.level = LEVEL_LINEAR,
|
|
.owner = THIS_MODULE,
|
|
.make_request = linear_make_request,
|
|
.run = linear_run,
|
|
.stop = linear_stop,
|
|
.status = linear_status,
|
|
.hot_add_disk = linear_add,
|
|
.size = linear_size,
|
|
};
|
|
|
|
static int __init linear_init (void)
|
|
{
|
|
return register_md_personality (&linear_personality);
|
|
}
|
|
|
|
static void linear_exit (void)
|
|
{
|
|
unregister_md_personality (&linear_personality);
|
|
}
|
|
|
|
|
|
module_init(linear_init);
|
|
module_exit(linear_exit);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Linear device concatenation personality for MD");
|
|
MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
|
|
MODULE_ALIAS("md-linear");
|
|
MODULE_ALIAS("md-level--1");
|