Merge branch 'for-4.11/next' into for-4.11/linus-merge

Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Jens Axboe 2017-02-17 14:08:19 -07:00
commit 818551e2b2
171 changed files with 2500 additions and 2856 deletions

View File

@ -49,9 +49,13 @@ config LBDAF
If unsure, say Y. If unsure, say Y.
config BLK_SCSI_REQUEST
bool
config BLK_DEV_BSG config BLK_DEV_BSG
bool "Block layer SG support v4" bool "Block layer SG support v4"
default y default y
select BLK_SCSI_REQUEST
help help
Saying Y here will enable generic SG (SCSI generic) v4 support Saying Y here will enable generic SG (SCSI generic) v4 support
for any block device. for any block device.
@ -71,6 +75,7 @@ config BLK_DEV_BSGLIB
bool "Block layer SG support v4 helper lib" bool "Block layer SG support v4 helper lib"
default n default n
select BLK_DEV_BSG select BLK_DEV_BSG
select BLK_SCSI_REQUEST
help help
Subsystems will normally enable this if needed. Users will not Subsystems will normally enable this if needed. Users will not
normally need to manually enable this. normally need to manually enable this.

View File

@ -7,10 +7,11 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \ blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ genhd.o partition-generic.o ioprio.o \
badblocks.o partitions/ badblocks.o partitions/
obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o

View File

@ -1227,9 +1227,6 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (!bio) if (!bio)
goto out_bmd; goto out_bmd;
if (iter->type & WRITE)
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = 0; ret = 0;
if (map_data) { if (map_data) {
@ -1394,12 +1391,6 @@ struct bio *bio_map_user_iov(struct request_queue *q,
kfree(pages); kfree(pages);
/*
* set data direction, and check if mapped pages need bouncing
*/
if (iter->type & WRITE)
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio_set_flag(bio, BIO_USER_MAPPED); bio_set_flag(bio, BIO_USER_MAPPED);
/* /*
@ -1590,7 +1581,6 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
bio->bi_private = data; bio->bi_private = data;
} else { } else {
bio->bi_end_io = bio_copy_kern_endio; bio->bi_end_io = bio_copy_kern_endio;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
} }
return bio; return bio;

View File

@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
goto err_free_blkg; goto err_free_blkg;
} }
wb_congested = wb_congested_get_create(&q->backing_dev_info, wb_congested = wb_congested_get_create(q->backing_dev_info,
blkcg->css.id, blkcg->css.id,
GFP_NOWAIT | __GFP_NOWARN); GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) { if (!wb_congested) {
@ -469,8 +469,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
const char *blkg_dev_name(struct blkcg_gq *blkg) const char *blkg_dev_name(struct blkcg_gq *blkg)
{ {
/* some drivers (floppy) instantiate a queue w/o disk registered */ /* some drivers (floppy) instantiate a queue w/o disk registered */
if (blkg->q->backing_dev_info.dev) if (blkg->q->backing_dev_info->dev)
return dev_name(blkg->q->backing_dev_info.dev); return dev_name(blkg->q->backing_dev_info->dev);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(blkg_dev_name); EXPORT_SYMBOL_GPL(blkg_dev_name);
@ -1079,10 +1079,8 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded) if (preloaded)
radix_tree_preload_end(); radix_tree_preload_end();
if (IS_ERR(blkg)) { if (IS_ERR(blkg))
blkg_free(new_blkg);
return PTR_ERR(blkg); return PTR_ERR(blkg);
}
q->root_blkg = blkg; q->root_blkg = blkg;
q->root_rl.blkg = blkg; q->root_rl.blkg = blkg;

View File

@ -33,6 +33,7 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h> #include <linux/blk-cgroup.h>
#include <linux/debugfs.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/block.h> #include <trace/events/block.h>
@ -42,6 +43,10 @@
#include "blk-mq-sched.h" #include "blk-mq-sched.h"
#include "blk-wbt.h" #include "blk-wbt.h"
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
@ -75,7 +80,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
* flip its congestion state for events on other blkcgs. * flip its congestion state for events on other blkcgs.
*/ */
if (rl == &rl->q->root_rl) if (rl == &rl->q->root_rl)
clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif #endif
} }
@ -86,7 +91,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
#else #else
/* see blk_clear_congested() */ /* see blk_clear_congested() */
if (rl == &rl->q->root_rl) if (rl == &rl->q->root_rl)
set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif #endif
} }
@ -105,22 +110,6 @@ void blk_queue_congestion_threshold(struct request_queue *q)
q->nr_congestion_off = nr; q->nr_congestion_off = nr;
} }
/**
* blk_get_backing_dev_info - get the address of a queue's backing_dev_info
* @bdev: device
*
* Locates the passed device's request queue and returns the address of its
* backing_dev_info. This function can only be called if @bdev is opened
* and the return value is never NULL.
*/
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
return &q->backing_dev_info;
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
void blk_rq_init(struct request_queue *q, struct request *rq) void blk_rq_init(struct request_queue *q, struct request *rq)
{ {
memset(rq, 0, sizeof(*rq)); memset(rq, 0, sizeof(*rq));
@ -132,8 +121,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash); INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node); RB_CLEAR_NODE(&rq->rb_node);
rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1; rq->tag = -1;
rq->internal_tag = -1; rq->internal_tag = -1;
rq->start_time = jiffies; rq->start_time = jiffies;
@ -160,10 +147,8 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
void blk_dump_rq_flags(struct request *rq, char *msg) void blk_dump_rq_flags(struct request *rq, char *msg)
{ {
int bit; printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?",
printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
(unsigned long long) rq->cmd_flags); (unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
@ -171,13 +156,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
printk(KERN_INFO " bio %p, biotail %p, len %u\n", printk(KERN_INFO " bio %p, biotail %p, len %u\n",
rq->bio, rq->biotail, blk_rq_bytes(rq)); rq->bio, rq->biotail, blk_rq_bytes(rq));
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
printk(KERN_INFO " cdb: ");
for (bit = 0; bit < BLK_MAX_CDB; bit++)
printk("%02x ", rq->cmd[bit]);
printk("\n");
}
} }
EXPORT_SYMBOL(blk_dump_rq_flags); EXPORT_SYMBOL(blk_dump_rq_flags);
@ -588,7 +566,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_flush_integrity(); blk_flush_integrity();
/* @q won't process any more request, flush async actions */ /* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q); blk_sync_queue(q);
if (q->mq_ops) if (q->mq_ops)
@ -600,7 +578,8 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock; q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock); spin_unlock_irq(lock);
bdi_unregister(&q->backing_dev_info); bdi_unregister(q->backing_dev_info);
put_disk_devt(q->disk_devt);
/* @q is and will stay empty, shutdown and put */ /* @q is and will stay empty, shutdown and put */
blk_put_queue(q); blk_put_queue(q);
@ -608,17 +587,41 @@ void blk_cleanup_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_cleanup_queue);
/* Allocate memory local to the request queue */ /* Allocate memory local to the request queue */
static void *alloc_request_struct(gfp_t gfp_mask, void *data) static void *alloc_request_simple(gfp_t gfp_mask, void *data)
{ {
int nid = (int)(long)data; struct request_queue *q = data;
return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
} }
static void free_request_struct(void *element, void *unused) static void free_request_simple(void *element, void *data)
{ {
kmem_cache_free(request_cachep, element); kmem_cache_free(request_cachep, element);
} }
static void *alloc_request_size(gfp_t gfp_mask, void *data)
{
struct request_queue *q = data;
struct request *rq;
rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
q->node);
if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
kfree(rq);
rq = NULL;
}
return rq;
}
static void free_request_size(void *element, void *data)
{
struct request_queue *q = data;
if (q->exit_rq_fn)
q->exit_rq_fn(q, element);
kfree(element);
}
int blk_init_rl(struct request_list *rl, struct request_queue *q, int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
@ -631,10 +634,15 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, if (q->cmd_size) {
free_request_struct, rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
(void *)(long)q->node, gfp_mask, alloc_request_size, free_request_size,
q->node); q, gfp_mask, q->node);
} else {
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
alloc_request_simple, free_request_simple,
q, gfp_mask, q->node);
}
if (!rl->rq_pool) if (!rl->rq_pool)
return -ENOMEM; return -ENOMEM;
@ -697,7 +705,6 @@ static void blk_rq_timed_out_timer(unsigned long data)
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{ {
struct request_queue *q; struct request_queue *q;
int err;
q = kmem_cache_alloc_node(blk_requestq_cachep, q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id); gfp_mask | __GFP_ZERO, node_id);
@ -712,17 +719,17 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q->bio_split) if (!q->bio_split)
goto fail_id; goto fail_id;
q->backing_dev_info.ra_pages = q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE; if (!q->backing_dev_info)
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block";
q->node = node_id;
err = bdi_init(&q->backing_dev_info);
if (err)
goto fail_split; goto fail_split;
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->queue_head);
@ -772,7 +779,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
fail_ref: fail_ref:
percpu_ref_exit(&q->q_usage_counter); percpu_ref_exit(&q->q_usage_counter);
fail_bdi: fail_bdi:
bdi_destroy(&q->backing_dev_info); bdi_put(q->backing_dev_info);
fail_split: fail_split:
bioset_free(q->bio_split); bioset_free(q->bio_split);
fail_id: fail_id:
@ -825,15 +832,19 @@ EXPORT_SYMBOL(blk_init_queue);
struct request_queue * struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{ {
struct request_queue *uninit_q, *q; struct request_queue *q;
uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); q = blk_alloc_queue_node(GFP_KERNEL, node_id);
if (!uninit_q) if (!q)
return NULL; return NULL;
q = blk_init_allocated_queue(uninit_q, rfn, lock); q->request_fn = rfn;
if (!q) if (lock)
blk_cleanup_queue(uninit_q); q->queue_lock = lock;
if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q);
return NULL;
}
return q; return q;
} }
@ -841,30 +852,22 @@ EXPORT_SYMBOL(blk_init_queue_node);
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock)
{
if (!q)
return NULL;
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); int blk_init_allocated_queue(struct request_queue *q)
{
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
if (!q->fq) if (!q->fq)
return NULL; return -ENOMEM;
if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
goto out_free_flush_queue;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto fail; goto out_exit_flush_rq;
INIT_WORK(&q->timeout_work, blk_timeout_work); INIT_WORK(&q->timeout_work, blk_timeout_work);
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
q->queue_flags |= QUEUE_FLAG_DEFAULT; q->queue_flags |= QUEUE_FLAG_DEFAULT;
/* Override internal queue lock with supplied lock pointer */
if (lock)
q->queue_lock = lock;
/* /*
* This also sets hw/phys segments, boundary and size * This also sets hw/phys segments, boundary and size
*/ */
@ -878,17 +881,19 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
/* init elevator */ /* init elevator */
if (elevator_init(q, NULL)) { if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
goto fail; goto out_exit_flush_rq;
} }
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return 0;
return q; out_exit_flush_rq:
if (q->exit_rq_fn)
fail: q->exit_rq_fn(q, q->fq->flush_rq);
out_free_flush_queue:
blk_free_flush_queue(q->fq); blk_free_flush_queue(q->fq);
wbt_exit(q); wbt_exit(q);
return NULL; return -ENOMEM;
} }
EXPORT_SYMBOL(blk_init_allocated_queue); EXPORT_SYMBOL(blk_init_allocated_queue);
@ -1024,25 +1029,6 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
return 0; return 0;
} }
/*
* Determine if elevator data should be initialized when allocating the
* request associated with @bio.
*/
static bool blk_rq_should_init_elevator(struct bio *bio)
{
if (!bio)
return true;
/*
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
if (op_is_flush(bio->bi_opf))
return false;
return true;
}
/** /**
* __get_request - get a free request * __get_request - get a free request
* @rl: request list to allocate from * @rl: request list to allocate from
@ -1121,10 +1107,13 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
* request is freed. This guarantees icq's won't be destroyed and * request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe. * makes creating new ones safe.
* *
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*
* Also, lookup icq while holding queue_lock. If it doesn't exist, * Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock. * it will be created after releasing queue_lock.
*/ */
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { if (!op_is_flush(op) && !blk_queue_bypass(q)) {
rq_flags |= RQF_ELVPRIV; rq_flags |= RQF_ELVPRIV;
q->nr_rqs_elvpriv++; q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc) if (et->icq_cache && ioc)
@ -1184,7 +1173,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
* disturb iosched and blkcg but weird is bettern than dead. * disturb iosched and blkcg but weird is bettern than dead.
*/ */
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
__func__, dev_name(q->backing_dev_info.dev)); __func__, dev_name(q->backing_dev_info->dev));
rq->rq_flags &= ~RQF_ELVPRIV; rq->rq_flags &= ~RQF_ELVPRIV;
rq->elv.icq = NULL; rq->elv.icq = NULL;
@ -1278,8 +1267,6 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
{ {
struct request *rq; struct request *rq;
BUG_ON(rw != READ && rw != WRITE);
/* create ioc upfront */ /* create ioc upfront */
create_io_context(gfp_mask, q->node); create_io_context(gfp_mask, q->node);
@ -1308,18 +1295,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
} }
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
/**
* blk_rq_set_block_pc - initialize a request to type BLOCK_PC
* @rq: request to be initialized
*
*/
void blk_rq_set_block_pc(struct request *rq)
{
rq->cmd_type = REQ_TYPE_BLOCK_PC;
memset(rq->__cmd, 0, sizeof(rq->__cmd));
}
EXPORT_SYMBOL(blk_rq_set_block_pc);
/** /**
* blk_requeue_request - put a request back on queue * blk_requeue_request - put a request back on queue
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
@ -1510,6 +1485,30 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
return true; return true;
} }
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short segments = blk_rq_nr_discard_segments(req);
if (segments >= queue_max_discard_segments(q))
goto no_merge;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req, blk_rq_pos(req)))
goto no_merge;
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
req->nr_phys_segments = segments + 1;
blk_account_io_start(req, false);
return true;
no_merge:
req_set_nomerge(q, req);
return false;
}
/** /**
* blk_attempt_plug_merge - try to merge with %current's plugged list * blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at * @q: request_queue new bio is being queued at
@ -1538,12 +1537,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
{ {
struct blk_plug *plug; struct blk_plug *plug;
struct request *rq; struct request *rq;
bool ret = false;
struct list_head *plug_list; struct list_head *plug_list;
plug = current->plug; plug = current->plug;
if (!plug) if (!plug)
goto out; return false;
*request_count = 0; *request_count = 0;
if (q->mq_ops) if (q->mq_ops)
@ -1552,7 +1550,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
plug_list = &plug->list; plug_list = &plug->list;
list_for_each_entry_reverse(rq, plug_list, queuelist) { list_for_each_entry_reverse(rq, plug_list, queuelist) {
int el_ret; bool merged = false;
if (rq->q == q) { if (rq->q == q) {
(*request_count)++; (*request_count)++;
@ -1568,19 +1566,25 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
if (rq->q != q || !blk_rq_merge_ok(rq, bio)) if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue; continue;
el_ret = blk_try_merge(rq, bio); switch (blk_try_merge(rq, bio)) {
if (el_ret == ELEVATOR_BACK_MERGE) { case ELEVATOR_BACK_MERGE:
ret = bio_attempt_back_merge(q, rq, bio); merged = bio_attempt_back_merge(q, rq, bio);
if (ret) break;
break; case ELEVATOR_FRONT_MERGE:
} else if (el_ret == ELEVATOR_FRONT_MERGE) { merged = bio_attempt_front_merge(q, rq, bio);
ret = bio_attempt_front_merge(q, rq, bio); break;
if (ret) case ELEVATOR_DISCARD_MERGE:
break; merged = bio_attempt_discard_merge(q, rq, bio);
break;
default:
break;
} }
if (merged)
return true;
} }
out:
return ret; return false;
} }
unsigned int blk_plug_queued_count(struct request_queue *q) unsigned int blk_plug_queued_count(struct request_queue *q)
@ -1609,7 +1613,6 @@ unsigned int blk_plug_queued_count(struct request_queue *q)
void init_request_from_bio(struct request *req, struct bio *bio) void init_request_from_bio(struct request *req, struct bio *bio)
{ {
req->cmd_type = REQ_TYPE_FS;
if (bio->bi_opf & REQ_RAHEAD) if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK; req->cmd_flags |= REQ_FAILFAST_MASK;
@ -1623,8 +1626,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{ {
struct blk_plug *plug; struct blk_plug *plug;
int el_ret, where = ELEVATOR_INSERT_SORT; int where = ELEVATOR_INSERT_SORT;
struct request *req; struct request *req, *free;
unsigned int request_count = 0; unsigned int request_count = 0;
unsigned int wb_acct; unsigned int wb_acct;
@ -1661,21 +1664,29 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
el_ret = elv_merge(q, &req, bio); switch (elv_merge(q, &req, bio)) {
if (el_ret == ELEVATOR_BACK_MERGE) { case ELEVATOR_BACK_MERGE:
if (bio_attempt_back_merge(q, req, bio)) { if (!bio_attempt_back_merge(q, req, bio))
elv_bio_merged(q, req, bio); break;
if (!attempt_back_merge(q, req)) elv_bio_merged(q, req, bio);
elv_merged_request(q, req, el_ret); free = attempt_back_merge(q, req);
goto out_unlock; if (free)
} __blk_put_request(q, free);
} else if (el_ret == ELEVATOR_FRONT_MERGE) { else
if (bio_attempt_front_merge(q, req, bio)) { elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
elv_bio_merged(q, req, bio); goto out_unlock;
if (!attempt_front_merge(q, req)) case ELEVATOR_FRONT_MERGE:
elv_merged_request(q, req, el_ret); if (!bio_attempt_front_merge(q, req, bio))
goto out_unlock; break;
} elv_bio_merged(q, req, bio);
free = attempt_front_merge(q, req);
if (free)
__blk_put_request(q, free);
else
elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
goto out_unlock;
default:
break;
} }
get_rq: get_rq:
@ -2452,14 +2463,6 @@ void blk_start_request(struct request *req)
wbt_issue(req->q->rq_wb, &req->issue_stat); wbt_issue(req->q->rq_wb, &req->issue_stat);
} }
/*
* We are now handing the request to the hardware, initialize
* resid_len to full count and add the timeout handler.
*/
req->resid_len = blk_rq_bytes(req);
if (unlikely(blk_bidi_rq(req)))
req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
blk_add_timer(req); blk_add_timer(req);
} }
@ -2530,10 +2533,10 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* TODO: tj: This is too subtle. It would be better to let * TODO: tj: This is too subtle. It would be better to let
* low level drivers do what they see fit. * low level drivers do what they see fit.
*/ */
if (req->cmd_type == REQ_TYPE_FS) if (!blk_rq_is_passthrough(req))
req->errors = 0; req->errors = 0;
if (error && req->cmd_type == REQ_TYPE_FS && if (error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)) { !(req->rq_flags & RQF_QUIET)) {
char *error_type; char *error_type;
@ -2605,7 +2608,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->__data_len -= total_bytes; req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */ /* update sector only for requests with clear definition of sector */
if (req->cmd_type == REQ_TYPE_FS) if (!blk_rq_is_passthrough(req))
req->__sector += total_bytes >> 9; req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */ /* mixed attributes always follow the first bio */
@ -2683,8 +2686,8 @@ void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req)); BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
laptop_io_completion(&req->q->backing_dev_info); laptop_io_completion(req->q->backing_dev_info);
blk_delete_timer(req); blk_delete_timer(req);
@ -3007,8 +3010,6 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
static void __blk_rq_prep_clone(struct request *dst, struct request *src) static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{ {
dst->cpu = src->cpu; dst->cpu = src->cpu;
dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src); dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src); dst->__data_len = blk_rq_bytes(src);
dst->nr_phys_segments = src->nr_phys_segments; dst->nr_phys_segments = src->nr_phys_segments;
@ -3484,5 +3485,9 @@ int __init blk_dev_init(void)
blk_requestq_cachep = kmem_cache_create("request_queue", blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL); sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
#ifdef CONFIG_DEBUG_FS
blk_debugfs_root = debugfs_create_dir("block", NULL);
#endif
return 0; return 0;
} }

View File

@ -11,11 +11,6 @@
#include "blk.h" #include "blk.h"
#include "blk-mq-sched.h" #include "blk-mq-sched.h"
/*
* for max sense size
*/
#include <scsi/scsi_cmnd.h>
/** /**
* blk_end_sync_rq - executes a completion event on a request * blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete * @rq: request to complete
@ -56,7 +51,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
WARN_ON(rq->cmd_type == REQ_TYPE_FS); WARN_ON(!blk_rq_is_passthrough(rq));
rq->rq_disk = bd_disk; rq->rq_disk = bd_disk;
rq->end_io = done; rq->end_io = done;
@ -101,16 +96,9 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head) struct request *rq, int at_head)
{ {
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
char sense[SCSI_SENSE_BUFFERSIZE];
int err = 0; int err = 0;
unsigned long hang_check; unsigned long hang_check;
if (!rq->sense) {
memset(sense, 0, sizeof(sense));
rq->sense = sense;
rq->sense_len = 0;
}
rq->end_io_data = &wait; rq->end_io_data = &wait;
blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
@ -124,11 +112,6 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
if (rq->errors) if (rq->errors)
err = -EIO; err = -EIO;
if (rq->sense == sense) {
rq->sense = NULL;
rq->sense_len = 0;
}
return err; return err;
} }
EXPORT_SYMBOL(blk_execute_rq); EXPORT_SYMBOL(blk_execute_rq);

View File

@ -297,8 +297,14 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
return false; return false;
/* C2 and C3 */ /* C2 and C3
*
* For blk-mq + scheduling, we can risk having all driver tags
* assigned to empty flushes, and we deadlock if we are expecting
* other requests to make progress. Don't defer for that case.
*/
if (!list_empty(&fq->flush_data_in_flight) && if (!list_empty(&fq->flush_data_in_flight) &&
!(q->mq_ops && q->elevator) &&
time_before(jiffies, time_before(jiffies,
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
return false; return false;
@ -327,7 +333,6 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
} }
flush_rq->cmd_type = REQ_TYPE_FS;
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
flush_rq->rq_flags |= RQF_FLUSH_SEQ; flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk; flush_rq->rq_disk = first_rq->rq_disk;
@ -547,11 +552,10 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
if (!fq) if (!fq)
goto fail; goto fail;
if (q->mq_ops) { if (q->mq_ops)
spin_lock_init(&fq->mq_flush_lock); spin_lock_init(&fq->mq_flush_lock);
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
}
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
if (!fq->flush_rq) if (!fq->flush_rq)
goto fail_rq; goto fail_rq;

View File

@ -443,10 +443,10 @@ void blk_integrity_revalidate(struct gendisk *disk)
return; return;
if (bi->profile) if (bi->profile)
disk->queue->backing_dev_info.capabilities |= disk->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES; BDI_CAP_STABLE_WRITES;
else else
disk->queue->backing_dev_info.capabilities &= disk->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES; ~BDI_CAP_STABLE_WRITES;
} }

View File

@ -35,7 +35,10 @@ static void icq_free_icq_rcu(struct rcu_head *head)
kmem_cache_free(icq->__rcu_icq_cache, icq); kmem_cache_free(icq->__rcu_icq_cache, icq);
} }
/* Exit an icq. Called with both ioc and q locked. */ /*
* Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
* mq.
*/
static void ioc_exit_icq(struct io_cq *icq) static void ioc_exit_icq(struct io_cq *icq)
{ {
struct elevator_type *et = icq->q->elevator->type; struct elevator_type *et = icq->q->elevator->type;
@ -166,6 +169,7 @@ EXPORT_SYMBOL(put_io_context);
*/ */
void put_io_context_active(struct io_context *ioc) void put_io_context_active(struct io_context *ioc)
{ {
struct elevator_type *et;
unsigned long flags; unsigned long flags;
struct io_cq *icq; struct io_cq *icq;
@ -184,13 +188,19 @@ void put_io_context_active(struct io_context *ioc)
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED) if (icq->flags & ICQ_EXITED)
continue; continue;
if (spin_trylock(icq->q->queue_lock)) {
et = icq->q->elevator->type;
if (et->uses_mq) {
ioc_exit_icq(icq); ioc_exit_icq(icq);
spin_unlock(icq->q->queue_lock);
} else { } else {
spin_unlock_irqrestore(&ioc->lock, flags); if (spin_trylock(icq->q->queue_lock)) {
cpu_relax(); ioc_exit_icq(icq);
goto retry; spin_unlock(icq->q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
cpu_relax();
goto retry;
}
} }
} }
spin_unlock_irqrestore(&ioc->lock, flags); spin_unlock_irqrestore(&ioc->lock, flags);

View File

@ -16,8 +16,6 @@
int blk_rq_append_bio(struct request *rq, struct bio *bio) int blk_rq_append_bio(struct request *rq, struct bio *bio)
{ {
if (!rq->bio) { if (!rq->bio) {
rq->cmd_flags &= REQ_OP_MASK;
rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
blk_rq_bio_prep(rq->q, rq, bio); blk_rq_bio_prep(rq->q, rq, bio);
} else { } else {
if (!ll_back_merge_fn(rq->q, rq, bio)) if (!ll_back_merge_fn(rq->q, rq, bio))
@ -62,6 +60,9 @@ static int __blk_rq_map_user_iov(struct request *rq,
if (IS_ERR(bio)) if (IS_ERR(bio))
return PTR_ERR(bio); return PTR_ERR(bio);
bio->bi_opf &= ~REQ_OP_MASK;
bio->bi_opf |= req_op(rq);
if (map_data && map_data->null_mapped) if (map_data && map_data->null_mapped)
bio_set_flag(bio, BIO_NULL_MAPPED); bio_set_flag(bio, BIO_NULL_MAPPED);
@ -90,7 +91,7 @@ static int __blk_rq_map_user_iov(struct request *rq,
} }
/** /**
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage * blk_rq_map_user_iov - map user data to a request, for passthrough requests
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
* @rq: request to map data to * @rq: request to map data to
* @map_data: pointer to the rq_map_data holding pages (if necessary) * @map_data: pointer to the rq_map_data holding pages (if necessary)
@ -199,7 +200,7 @@ int blk_rq_unmap_user(struct bio *bio)
EXPORT_SYMBOL(blk_rq_unmap_user); EXPORT_SYMBOL(blk_rq_unmap_user);
/** /**
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage * blk_rq_map_kern - map kernel data to a request, for passthrough requests
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
* @rq: request to fill * @rq: request to fill
* @kbuf: the kernel buffer * @kbuf: the kernel buffer
@ -234,8 +235,8 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (IS_ERR(bio)) if (IS_ERR(bio))
return PTR_ERR(bio); return PTR_ERR(bio);
if (!reading) bio->bi_opf &= ~REQ_OP_MASK;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_opf |= req_op(rq);
if (do_copy) if (do_copy)
rq->rq_flags |= RQF_COPY_USER; rq->rq_flags |= RQF_COPY_USER;

View File

@ -482,13 +482,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
} }
EXPORT_SYMBOL(blk_rq_map_sg); EXPORT_SYMBOL(blk_rq_map_sg);
static void req_set_nomerge(struct request_queue *q, struct request *req)
{
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
}
static inline int ll_new_hw_segment(struct request_queue *q, static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req, struct request *req,
struct bio *bio) struct bio *bio)
@ -659,31 +652,32 @@ static void blk_account_io_merge(struct request *req)
} }
/* /*
* Has to be called with the request spinlock acquired * For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
*/ */
static int attempt_merge(struct request_queue *q, struct request *req, static struct request *attempt_merge(struct request_queue *q,
struct request *next) struct request *req, struct request *next)
{ {
if (!rq_mergeable(req) || !rq_mergeable(next)) if (!rq_mergeable(req) || !rq_mergeable(next))
return 0; return NULL;
if (req_op(req) != req_op(next)) if (req_op(req) != req_op(next))
return 0; return NULL;
/* /*
* not contiguous * not contiguous
*/ */
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
return 0; return NULL;
if (rq_data_dir(req) != rq_data_dir(next) if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_disk != next->rq_disk || req->rq_disk != next->rq_disk
|| req_no_special_merge(next)) || req_no_special_merge(next))
return 0; return NULL;
if (req_op(req) == REQ_OP_WRITE_SAME && if (req_op(req) == REQ_OP_WRITE_SAME &&
!blk_write_same_mergeable(req->bio, next->bio)) !blk_write_same_mergeable(req->bio, next->bio))
return 0; return NULL;
/* /*
* If we are allowed to merge, then append bio list * If we are allowed to merge, then append bio list
@ -692,7 +686,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
* counts here. * counts here.
*/ */
if (!ll_merge_requests_fn(q, req, next)) if (!ll_merge_requests_fn(q, req, next))
return 0; return NULL;
/* /*
* If failfast settings disagree or any of the two is already * If failfast settings disagree or any of the two is already
@ -732,42 +726,51 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (blk_rq_cpu_valid(next)) if (blk_rq_cpu_valid(next))
req->cpu = next->cpu; req->cpu = next->cpu;
/* owner-ship of bio passed from next to req */ /*
* ownership of bio passed from next to req, return 'next' for
* the caller to free
*/
next->bio = NULL; next->bio = NULL;
__blk_put_request(q, next); return next;
return 1;
} }
int attempt_back_merge(struct request_queue *q, struct request *rq) struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
{ {
struct request *next = elv_latter_request(q, rq); struct request *next = elv_latter_request(q, rq);
if (next) if (next)
return attempt_merge(q, rq, next); return attempt_merge(q, rq, next);
return 0; return NULL;
} }
int attempt_front_merge(struct request_queue *q, struct request *rq) struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
{ {
struct request *prev = elv_former_request(q, rq); struct request *prev = elv_former_request(q, rq);
if (prev) if (prev)
return attempt_merge(q, prev, rq); return attempt_merge(q, prev, rq);
return 0; return NULL;
} }
int blk_attempt_req_merge(struct request_queue *q, struct request *rq, int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next) struct request *next)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct request *free;
if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
return 0; return 0;
return attempt_merge(q, rq, next); free = attempt_merge(q, rq, next);
if (free) {
__blk_put_request(q, free);
return 1;
}
return 0;
} }
bool blk_rq_merge_ok(struct request *rq, struct bio *bio) bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
@ -798,9 +801,12 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return true; return true;
} }
int blk_try_merge(struct request *rq, struct bio *bio) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{ {
if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) if (req_op(rq) == REQ_OP_DISCARD &&
queue_max_discard_segments(rq->q) > 1)
return ELEVATOR_DISCARD_MERGE;
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE; return ELEVATOR_BACK_MERGE;
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
return ELEVATOR_FRONT_MERGE; return ELEVATOR_FRONT_MERGE;

View File

@ -19,6 +19,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h" #include "blk-mq.h"
#include "blk-mq-tag.h" #include "blk-mq-tag.h"
@ -28,8 +29,6 @@ struct blk_mq_debugfs_attr {
const struct file_operations *fops; const struct file_operations *fops;
}; };
static struct dentry *block_debugfs_root;
static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file, static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file,
const struct seq_operations *ops) const struct seq_operations *ops)
{ {
@ -88,13 +87,14 @@ static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
{ {
struct request *rq = list_entry_rq(v); struct request *rq = list_entry_rq(v);
seq_printf(m, "%p {.cmd_type=%u, .cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n", seq_printf(m, "%p {.cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
rq, rq->cmd_type, rq->cmd_flags, (unsigned int)rq->rq_flags, rq, rq->cmd_flags, (__force unsigned int)rq->rq_flags,
rq->tag, rq->internal_tag); rq->tag, rq->internal_tag);
return 0; return 0;
} }
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
__acquires(&hctx->lock)
{ {
struct blk_mq_hw_ctx *hctx = m->private; struct blk_mq_hw_ctx *hctx = m->private;
@ -110,6 +110,7 @@ static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
} }
static void hctx_dispatch_stop(struct seq_file *m, void *v) static void hctx_dispatch_stop(struct seq_file *m, void *v)
__releases(&hctx->lock)
{ {
struct blk_mq_hw_ctx *hctx = m->private; struct blk_mq_hw_ctx *hctx = m->private;
@ -176,13 +177,17 @@ static int hctx_tags_show(struct seq_file *m, void *v)
{ {
struct blk_mq_hw_ctx *hctx = m->private; struct blk_mq_hw_ctx *hctx = m->private;
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
int res;
mutex_lock(&q->sysfs_lock); res = mutex_lock_interruptible(&q->sysfs_lock);
if (res)
goto out;
if (hctx->tags) if (hctx->tags)
blk_mq_debugfs_tags_show(m, hctx->tags); blk_mq_debugfs_tags_show(m, hctx->tags);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return 0; out:
return res;
} }
static int hctx_tags_open(struct inode *inode, struct file *file) static int hctx_tags_open(struct inode *inode, struct file *file)
@ -201,12 +206,17 @@ static int hctx_tags_bitmap_show(struct seq_file *m, void *v)
{ {
struct blk_mq_hw_ctx *hctx = m->private; struct blk_mq_hw_ctx *hctx = m->private;
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
int res;
mutex_lock(&q->sysfs_lock); res = mutex_lock_interruptible(&q->sysfs_lock);
if (res)
goto out;
if (hctx->tags) if (hctx->tags)
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return 0;
out:
return res;
} }
static int hctx_tags_bitmap_open(struct inode *inode, struct file *file) static int hctx_tags_bitmap_open(struct inode *inode, struct file *file)
@ -225,13 +235,17 @@ static int hctx_sched_tags_show(struct seq_file *m, void *v)
{ {
struct blk_mq_hw_ctx *hctx = m->private; struct blk_mq_hw_ctx *hctx = m->private;
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
int res;
mutex_lock(&q->sysfs_lock); res = mutex_lock_interruptible(&q->sysfs_lock);
if (res)
goto out;
if (hctx->sched_tags) if (hctx->sched_tags)
blk_mq_debugfs_tags_show(m, hctx->sched_tags); blk_mq_debugfs_tags_show(m, hctx->sched_tags);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return 0; out:
return res;
} }
static int hctx_sched_tags_open(struct inode *inode, struct file *file) static int hctx_sched_tags_open(struct inode *inode, struct file *file)
@ -250,12 +264,17 @@ static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v)
{ {
struct blk_mq_hw_ctx *hctx = m->private; struct blk_mq_hw_ctx *hctx = m->private;
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
int res;
mutex_lock(&q->sysfs_lock); res = mutex_lock_interruptible(&q->sysfs_lock);
if (res)
goto out;
if (hctx->sched_tags) if (hctx->sched_tags)
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return 0;
out:
return res;
} }
static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file) static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file)
@ -482,6 +501,7 @@ static const struct file_operations hctx_active_fops = {
}; };
static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos) static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
__acquires(&ctx->lock)
{ {
struct blk_mq_ctx *ctx = m->private; struct blk_mq_ctx *ctx = m->private;
@ -497,6 +517,7 @@ static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
} }
static void ctx_rq_list_stop(struct seq_file *m, void *v) static void ctx_rq_list_stop(struct seq_file *m, void *v)
__releases(&ctx->lock)
{ {
struct blk_mq_ctx *ctx = m->private; struct blk_mq_ctx *ctx = m->private;
@ -630,6 +651,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
{"queued", 0600, &hctx_queued_fops}, {"queued", 0600, &hctx_queued_fops},
{"run", 0600, &hctx_run_fops}, {"run", 0600, &hctx_run_fops},
{"active", 0400, &hctx_active_fops}, {"active", 0400, &hctx_active_fops},
{},
}; };
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
@ -637,14 +659,15 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
{"dispatched", 0600, &ctx_dispatched_fops}, {"dispatched", 0600, &ctx_dispatched_fops},
{"merged", 0600, &ctx_merged_fops}, {"merged", 0600, &ctx_merged_fops},
{"completed", 0600, &ctx_completed_fops}, {"completed", 0600, &ctx_completed_fops},
{},
}; };
int blk_mq_debugfs_register(struct request_queue *q, const char *name) int blk_mq_debugfs_register(struct request_queue *q, const char *name)
{ {
if (!block_debugfs_root) if (!blk_debugfs_root)
return -ENOENT; return -ENOENT;
q->debugfs_dir = debugfs_create_dir(name, block_debugfs_root); q->debugfs_dir = debugfs_create_dir(name, blk_debugfs_root);
if (!q->debugfs_dir) if (!q->debugfs_dir)
goto err; goto err;
@ -665,27 +688,31 @@ void blk_mq_debugfs_unregister(struct request_queue *q)
q->debugfs_dir = NULL; q->debugfs_dir = NULL;
} }
static bool debugfs_create_files(struct dentry *parent, void *data,
const struct blk_mq_debugfs_attr *attr)
{
for (; attr->name; attr++) {
if (!debugfs_create_file(attr->name, attr->mode, parent,
data, attr->fops))
return false;
}
return true;
}
static int blk_mq_debugfs_register_ctx(struct request_queue *q, static int blk_mq_debugfs_register_ctx(struct request_queue *q,
struct blk_mq_ctx *ctx, struct blk_mq_ctx *ctx,
struct dentry *hctx_dir) struct dentry *hctx_dir)
{ {
struct dentry *ctx_dir; struct dentry *ctx_dir;
char name[20]; char name[20];
int i;
snprintf(name, sizeof(name), "cpu%u", ctx->cpu); snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
ctx_dir = debugfs_create_dir(name, hctx_dir); ctx_dir = debugfs_create_dir(name, hctx_dir);
if (!ctx_dir) if (!ctx_dir)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(blk_mq_debugfs_ctx_attrs); i++) { if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
const struct blk_mq_debugfs_attr *attr; return -ENOMEM;
attr = &blk_mq_debugfs_ctx_attrs[i];
if (!debugfs_create_file(attr->name, attr->mode, ctx_dir, ctx,
attr->fops))
return -ENOMEM;
}
return 0; return 0;
} }
@ -703,14 +730,8 @@ static int blk_mq_debugfs_register_hctx(struct request_queue *q,
if (!hctx_dir) if (!hctx_dir)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(blk_mq_debugfs_hctx_attrs); i++) { if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs))
const struct blk_mq_debugfs_attr *attr; return -ENOMEM;
attr = &blk_mq_debugfs_hctx_attrs[i];
if (!debugfs_create_file(attr->name, attr->mode, hctx_dir, hctx,
attr->fops))
return -ENOMEM;
}
hctx_for_each_ctx(hctx, ctx, i) { hctx_for_each_ctx(hctx, ctx, i) {
if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir)) if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir))
@ -749,8 +770,3 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
debugfs_remove_recursive(q->mq_debugfs_dir); debugfs_remove_recursive(q->mq_debugfs_dir);
q->mq_debugfs_dir = NULL; q->mq_debugfs_dir = NULL;
} }
void blk_mq_debugfs_init(void)
{
block_debugfs_root = debugfs_create_dir("block", NULL);
}

View File

@ -68,7 +68,9 @@ int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
EXPORT_SYMBOL_GPL(blk_mq_sched_init_hctx_data); EXPORT_SYMBOL_GPL(blk_mq_sched_init_hctx_data);
static void __blk_mq_sched_assign_ioc(struct request_queue *q, static void __blk_mq_sched_assign_ioc(struct request_queue *q,
struct request *rq, struct io_context *ioc) struct request *rq,
struct bio *bio,
struct io_context *ioc)
{ {
struct io_cq *icq; struct io_cq *icq;
@ -83,7 +85,7 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
} }
rq->elv.icq = icq; rq->elv.icq = icq;
if (!blk_mq_sched_get_rq_priv(q, rq)) { if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
rq->rq_flags |= RQF_ELVPRIV; rq->rq_flags |= RQF_ELVPRIV;
get_io_context(icq->ioc); get_io_context(icq->ioc);
return; return;
@ -99,7 +101,7 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
ioc = rq_ioc(bio); ioc = rq_ioc(bio);
if (ioc) if (ioc)
__blk_mq_sched_assign_ioc(q, rq, ioc); __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
} }
struct request *blk_mq_sched_get_request(struct request_queue *q, struct request *blk_mq_sched_get_request(struct request_queue *q,
@ -173,6 +175,8 @@ void blk_mq_sched_put_request(struct request *rq)
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{ {
struct elevator_queue *e = hctx->queue->elevator; struct elevator_queue *e = hctx->queue->elevator;
const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
bool did_work = false;
LIST_HEAD(rq_list); LIST_HEAD(rq_list);
if (unlikely(blk_mq_hctx_stopped(hctx))) if (unlikely(blk_mq_hctx_stopped(hctx)))
@ -202,11 +206,18 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
*/ */
if (!list_empty(&rq_list)) { if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart(hctx); blk_mq_sched_mark_restart(hctx);
blk_mq_dispatch_rq_list(hctx, &rq_list); did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
} else if (!e || !e->type->ops.mq.dispatch_request) { } else if (!has_sched_dispatch) {
blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_flush_busy_ctxs(hctx, &rq_list);
blk_mq_dispatch_rq_list(hctx, &rq_list); blk_mq_dispatch_rq_list(hctx, &rq_list);
} else { }
/*
* We want to dispatch from the scheduler if we had no work left
* on the dispatch list, OR if we did have work but weren't able
* to make progress.
*/
if (!did_work && has_sched_dispatch) {
do { do {
struct request *rq; struct request *rq;
@ -234,31 +245,33 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
} }
EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch); EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio) bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
struct request **merged_request)
{ {
struct request *rq; struct request *rq;
int ret;
ret = elv_merge(q, &rq, bio); switch (elv_merge(q, &rq, bio)) {
if (ret == ELEVATOR_BACK_MERGE) { case ELEVATOR_BACK_MERGE:
if (!blk_mq_sched_allow_merge(q, rq, bio)) if (!blk_mq_sched_allow_merge(q, rq, bio))
return false; return false;
if (bio_attempt_back_merge(q, rq, bio)) { if (!bio_attempt_back_merge(q, rq, bio))
if (!attempt_back_merge(q, rq)) return false;
elv_merged_request(q, rq, ret); *merged_request = attempt_back_merge(q, rq);
return true; if (!*merged_request)
} elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
} else if (ret == ELEVATOR_FRONT_MERGE) { return true;
case ELEVATOR_FRONT_MERGE:
if (!blk_mq_sched_allow_merge(q, rq, bio)) if (!blk_mq_sched_allow_merge(q, rq, bio))
return false; return false;
if (bio_attempt_front_merge(q, rq, bio)) { if (!bio_attempt_front_merge(q, rq, bio))
if (!attempt_front_merge(q, rq)) return false;
elv_merged_request(q, rq, ret); *merged_request = attempt_front_merge(q, rq);
return true; if (!*merged_request)
} elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
return true;
default:
return false;
} }
return false;
} }
EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
@ -289,7 +302,8 @@ void blk_mq_sched_request_inserted(struct request *rq)
} }
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq) static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{ {
if (rq->tag == -1) { if (rq->tag == -1) {
rq->rq_flags |= RQF_SORTED; rq->rq_flags |= RQF_SORTED;
@ -305,7 +319,6 @@ bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
spin_unlock(&hctx->lock); spin_unlock(&hctx->lock);
return true; return true;
} }
EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert);
static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{ {
@ -347,7 +360,7 @@ static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
blk_insert_flush(rq); blk_insert_flush(rq);
blk_mq_run_hw_queue(hctx, true); blk_mq_run_hw_queue(hctx, true);
} else } else
blk_mq_add_to_requeue_list(rq, true, true); blk_mq_add_to_requeue_list(rq, false, true);
} }
void blk_mq_sched_insert_request(struct request *rq, bool at_head, void blk_mq_sched_insert_request(struct request *rq, bool at_head,
@ -363,6 +376,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
return; return;
} }
if (e && blk_mq_sched_bypass_insert(hctx, rq))
goto run;
if (e && e->type->ops.mq.insert_requests) { if (e && e->type->ops.mq.insert_requests) {
LIST_HEAD(list); LIST_HEAD(list);
@ -374,6 +390,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
} }
run:
if (run_queue) if (run_queue)
blk_mq_run_hw_queue(hctx, async); blk_mq_run_hw_queue(hctx, async);
} }
@ -385,6 +402,23 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct elevator_queue *e = hctx->queue->elevator; struct elevator_queue *e = hctx->queue->elevator;
if (e) {
struct request *rq, *next;
/*
* We bypass requests that already have a driver tag assigned,
* which should only be flushes. Flushes are only ever inserted
* as single requests, so we shouldn't ever hit the
* WARN_ON_ONCE() below (but let's handle it just in case).
*/
list_for_each_entry_safe(rq, next, list, queuelist) {
if (WARN_ON_ONCE(rq->tag != -1)) {
list_del_init(&rq->queuelist);
blk_mq_sched_bypass_insert(hctx, rq);
}
}
}
if (e && e->type->ops.mq.insert_requests) if (e && e->type->ops.mq.insert_requests)
e->type->ops.mq.insert_requests(hctx, list, false); e->type->ops.mq.insert_requests(hctx, list, false);
else else

View File

@ -15,8 +15,8 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bi
void blk_mq_sched_put_request(struct request *rq); void blk_mq_sched_put_request(struct request *rq);
void blk_mq_sched_request_inserted(struct request *rq); void blk_mq_sched_request_inserted(struct request *rq);
bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq); bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio); struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
@ -49,12 +49,13 @@ blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
} }
static inline int blk_mq_sched_get_rq_priv(struct request_queue *q, static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
struct request *rq) struct request *rq,
struct bio *bio)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.get_rq_priv) if (e && e->type->ops.mq.get_rq_priv)
return e->type->ops.mq.get_rq_priv(q, rq); return e->type->ops.mq.get_rq_priv(q, rq, bio);
return 0; return 0;
} }

View File

@ -254,7 +254,7 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
kobject_put(&hctx->kobj); kobject_put(&hctx->kobj);
} }
blk_mq_debugfs_unregister(q); blk_mq_debugfs_unregister_hctxs(q);
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj); kobject_del(&q->mq_kobj);

View File

@ -199,13 +199,7 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->special = NULL; rq->special = NULL;
/* tag was already set */ /* tag was already set */
rq->errors = 0; rq->errors = 0;
rq->cmd = rq->__cmd;
rq->extra_len = 0; rq->extra_len = 0;
rq->sense_len = 0;
rq->resid_len = 0;
rq->sense = NULL;
INIT_LIST_HEAD(&rq->timeout_list); INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0; rq->timeout = 0;
@ -487,10 +481,6 @@ void blk_mq_start_request(struct request *rq)
trace_block_rq_issue(q, rq); trace_block_rq_issue(q, rq);
rq->resid_len = blk_rq_bytes(rq);
if (unlikely(blk_bidi_rq(rq)))
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
blk_stat_set_issue_time(&rq->issue_stat); blk_stat_set_issue_time(&rq->issue_stat);
rq->rq_flags |= RQF_STATS; rq->rq_flags |= RQF_STATS;
@ -773,7 +763,7 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
int checked = 8; int checked = 8;
list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
int el_ret; bool merged = false;
if (!checked--) if (!checked--)
break; break;
@ -781,26 +771,25 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
if (!blk_rq_merge_ok(rq, bio)) if (!blk_rq_merge_ok(rq, bio))
continue; continue;
el_ret = blk_try_merge(rq, bio); switch (blk_try_merge(rq, bio)) {
if (el_ret == ELEVATOR_NO_MERGE) case ELEVATOR_BACK_MERGE:
if (blk_mq_sched_allow_merge(q, rq, bio))
merged = bio_attempt_back_merge(q, rq, bio);
break;
case ELEVATOR_FRONT_MERGE:
if (blk_mq_sched_allow_merge(q, rq, bio))
merged = bio_attempt_front_merge(q, rq, bio);
break;
case ELEVATOR_DISCARD_MERGE:
merged = bio_attempt_discard_merge(q, rq, bio);
break;
default:
continue; continue;
if (!blk_mq_sched_allow_merge(q, rq, bio))
break;
if (el_ret == ELEVATOR_BACK_MERGE) {
if (bio_attempt_back_merge(q, rq, bio)) {
ctx->rq_merged++;
return true;
}
break;
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
if (bio_attempt_front_merge(q, rq, bio)) {
ctx->rq_merged++;
return true;
}
break;
} }
if (merged)
ctx->rq_merged++;
return merged;
} }
return false; return false;
@ -1013,7 +1002,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
blk_mq_run_hw_queue(hctx, true); blk_mq_run_hw_queue(hctx, true);
} }
return ret != BLK_MQ_RQ_QUEUE_BUSY; return queued != 0;
} }
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
@ -1442,12 +1431,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t(data.hctx, rq); cookie = request_to_qc_t(data.hctx, rq);
if (unlikely(is_flush_fua)) { if (unlikely(is_flush_fua)) {
blk_mq_put_ctx(data.ctx); if (q->elevator)
goto elv_insert;
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
blk_mq_get_driver_tag(rq, NULL, true);
blk_insert_flush(rq); blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true); goto run_queue;
goto done;
} }
plug = current->plug; plug = current->plug;
@ -1497,6 +1485,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} }
if (q->elevator) { if (q->elevator) {
elv_insert:
blk_mq_put_ctx(data.ctx); blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true, blk_mq_sched_insert_request(rq, false, true,
@ -1510,6 +1499,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
* latter allows for merging opportunities and more efficient * latter allows for merging opportunities and more efficient
* dispatching. * dispatching.
*/ */
run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
} }
blk_mq_put_ctx(data.ctx); blk_mq_put_ctx(data.ctx);
@ -1565,12 +1555,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t(data.hctx, rq); cookie = request_to_qc_t(data.hctx, rq);
if (unlikely(is_flush_fua)) { if (unlikely(is_flush_fua)) {
blk_mq_put_ctx(data.ctx); if (q->elevator)
goto elv_insert;
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
blk_mq_get_driver_tag(rq, NULL, true);
blk_insert_flush(rq); blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true); goto run_queue;
goto done;
} }
/* /*
@ -1608,6 +1597,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
} }
if (q->elevator) { if (q->elevator) {
elv_insert:
blk_mq_put_ctx(data.ctx); blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true, blk_mq_sched_insert_request(rq, false, true,
@ -1621,6 +1611,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
* latter allows for merging opportunities and more efficient * latter allows for merging opportunities and more efficient
* dispatching. * dispatching.
*/ */
run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
} }
@ -2637,10 +2628,14 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
list_for_each_entry(q, &set->tag_list, tag_set_list) { list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q); blk_mq_realloc_hw_ctxs(set, q);
/*
* Manually set the make_request_fn as blk_queue_make_request
* resets a lot of the queue settings.
*/
if (q->nr_hw_queues > 1) if (q->nr_hw_queues > 1)
blk_queue_make_request(q, blk_mq_make_request); q->make_request_fn = blk_mq_make_request;
else else
blk_queue_make_request(q, blk_sq_make_request); q->make_request_fn = blk_sq_make_request;
blk_mq_queue_reinit(q, cpu_online_mask); blk_mq_queue_reinit(q, cpu_online_mask);
} }
@ -2824,8 +2819,6 @@ void blk_mq_enable_hotplug(void)
static int __init blk_mq_init(void) static int __init blk_mq_init(void)
{ {
blk_mq_debugfs_init();
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
blk_mq_hctx_notify_dead); blk_mq_hctx_notify_dead);

View File

@ -85,16 +85,11 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
* debugfs helpers * debugfs helpers
*/ */
#ifdef CONFIG_BLK_DEBUG_FS #ifdef CONFIG_BLK_DEBUG_FS
void blk_mq_debugfs_init(void);
int blk_mq_debugfs_register(struct request_queue *q, const char *name); int blk_mq_debugfs_register(struct request_queue *q, const char *name);
void blk_mq_debugfs_unregister(struct request_queue *q); void blk_mq_debugfs_unregister(struct request_queue *q);
int blk_mq_debugfs_register_hctxs(struct request_queue *q); int blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q); void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
#else #else
static inline void blk_mq_debugfs_init(void)
{
}
static inline int blk_mq_debugfs_register(struct request_queue *q, static inline int blk_mq_debugfs_register(struct request_queue *q,
const char *name) const char *name)
{ {

View File

@ -88,6 +88,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
void blk_set_default_limits(struct queue_limits *lim) void blk_set_default_limits(struct queue_limits *lim)
{ {
lim->max_segments = BLK_MAX_SEGMENTS; lim->max_segments = BLK_MAX_SEGMENTS;
lim->max_discard_segments = 1;
lim->max_integrity_segments = 0; lim->max_integrity_segments = 0;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->virt_boundary_mask = 0; lim->virt_boundary_mask = 0;
@ -128,6 +129,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
/* Inherit limits from component devices */ /* Inherit limits from component devices */
lim->discard_zeroes_data = 1; lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX; lim->max_segments = USHRT_MAX;
lim->max_discard_segments = 1;
lim->max_hw_sectors = UINT_MAX; lim->max_hw_sectors = UINT_MAX;
lim->max_segment_size = UINT_MAX; lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX; lim->max_sectors = UINT_MAX;
@ -253,7 +255,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
limits->max_sectors = max_sectors; limits->max_sectors = max_sectors;
q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9); q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
} }
EXPORT_SYMBOL(blk_queue_max_hw_sectors); EXPORT_SYMBOL(blk_queue_max_hw_sectors);
@ -336,6 +338,22 @@ void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments
} }
EXPORT_SYMBOL(blk_queue_max_segments); EXPORT_SYMBOL(blk_queue_max_segments);
/**
* blk_queue_max_discard_segments - set max segments for discard requests
* @q: the request queue for the device
* @max_segments: max number of segments
*
* Description:
* Enables a low level driver to set an upper limit on the number of
* segments in a discard request.
**/
void blk_queue_max_discard_segments(struct request_queue *q,
unsigned short max_segments)
{
q->limits.max_discard_segments = max_segments;
}
EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
/** /**
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
* @q: the request queue for the device * @q: the request queue for the device
@ -553,6 +571,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->virt_boundary_mask); b->virt_boundary_mask);
t->max_segments = min_not_zero(t->max_segments, b->max_segments); t->max_segments = min_not_zero(t->max_segments, b->max_segments);
t->max_discard_segments = min_not_zero(t->max_discard_segments,
b->max_discard_segments);
t->max_integrity_segments = min_not_zero(t->max_integrity_segments, t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
b->max_integrity_segments); b->max_integrity_segments);

View File

@ -89,7 +89,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page) static ssize_t queue_ra_show(struct request_queue *q, char *page)
{ {
unsigned long ra_kb = q->backing_dev_info.ra_pages << unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_SHIFT - 10); (PAGE_SHIFT - 10);
return queue_var_show(ra_kb, (page)); return queue_var_show(ra_kb, (page));
@ -104,7 +104,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0) if (ret < 0)
return ret; return ret;
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10); q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret; return ret;
} }
@ -121,6 +121,12 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
return queue_var_show(queue_max_segments(q), (page)); return queue_var_show(queue_max_segments(q), (page));
} }
static ssize_t queue_max_discard_segments_show(struct request_queue *q,
char *page)
{
return queue_var_show(queue_max_discard_segments(q), (page));
}
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{ {
return queue_var_show(q->limits.max_integrity_segments, (page)); return queue_var_show(q->limits.max_integrity_segments, (page));
@ -236,7 +242,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1; q->limits.max_sectors = max_sectors_kb << 1;
q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return ret; return ret;
@ -545,6 +551,11 @@ static struct queue_sysfs_entry queue_max_segments_entry = {
.show = queue_max_segments_show, .show = queue_max_segments_show,
}; };
static struct queue_sysfs_entry queue_max_discard_segments_entry = {
.attr = {.name = "max_discard_segments", .mode = S_IRUGO },
.show = queue_max_discard_segments_show,
};
static struct queue_sysfs_entry queue_max_integrity_segments_entry = { static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
.attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
.show = queue_max_integrity_segments_show, .show = queue_max_integrity_segments_show,
@ -697,6 +708,7 @@ static struct attribute *default_attrs[] = {
&queue_max_hw_sectors_entry.attr, &queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr, &queue_max_sectors_entry.attr,
&queue_max_segments_entry.attr, &queue_max_segments_entry.attr,
&queue_max_discard_segments_entry.attr,
&queue_max_integrity_segments_entry.attr, &queue_max_integrity_segments_entry.attr,
&queue_max_segment_size_entry.attr, &queue_max_segment_size_entry.attr,
&queue_iosched_entry.attr, &queue_iosched_entry.attr,
@ -799,7 +811,7 @@ static void blk_release_queue(struct kobject *kobj)
container_of(kobj, struct request_queue, kobj); container_of(kobj, struct request_queue, kobj);
wbt_exit(q); wbt_exit(q);
bdi_exit(&q->backing_dev_info); bdi_put(q->backing_dev_info);
blkcg_exit_queue(q); blkcg_exit_queue(q);
if (q->elevator) { if (q->elevator) {
@ -814,13 +826,19 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags) if (q->queue_tags)
__blk_queue_free_tags(q); __blk_queue_free_tags(q);
if (!q->mq_ops) if (!q->mq_ops) {
if (q->exit_rq_fn)
q->exit_rq_fn(q, q->fq->flush_rq);
blk_free_flush_queue(q->fq); blk_free_flush_queue(q->fq);
else } else {
blk_mq_release(q); blk_mq_release(q);
}
blk_trace_shutdown(q); blk_trace_shutdown(q);
if (q->mq_ops)
blk_mq_debugfs_unregister(q);
if (q->bio_split) if (q->bio_split)
bioset_free(q->bio_split); bioset_free(q->bio_split);
@ -884,32 +902,36 @@ int blk_register_queue(struct gendisk *disk)
if (ret) if (ret)
return ret; return ret;
if (q->mq_ops)
blk_mq_register_dev(dev, q);
/* Prevent changes through sysfs until registration is completed. */
mutex_lock(&q->sysfs_lock);
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0) { if (ret < 0) {
blk_trace_remove_sysfs(dev); blk_trace_remove_sysfs(dev);
return ret; goto unlock;
} }
kobject_uevent(&q->kobj, KOBJ_ADD); kobject_uevent(&q->kobj, KOBJ_ADD);
if (q->mq_ops)
blk_mq_register_dev(dev, q);
blk_wb_init(q); blk_wb_init(q);
if (!q->request_fn) if (q->request_fn || (q->mq_ops && q->elevator)) {
return 0; ret = elv_register_queue(q);
if (ret) {
ret = elv_register_queue(q); kobject_uevent(&q->kobj, KOBJ_REMOVE);
if (ret) { kobject_del(&q->kobj);
kobject_uevent(&q->kobj, KOBJ_REMOVE); blk_trace_remove_sysfs(dev);
kobject_del(&q->kobj); kobject_put(&dev->kobj);
blk_trace_remove_sysfs(dev); goto unlock;
kobject_put(&dev->kobj); }
return ret;
} }
ret = 0;
return 0; unlock:
mutex_unlock(&q->sysfs_lock);
return ret;
} }
void blk_unregister_queue(struct gendisk *disk) void blk_unregister_queue(struct gendisk *disk)
@ -922,7 +944,7 @@ void blk_unregister_queue(struct gendisk *disk)
if (q->mq_ops) if (q->mq_ops)
blk_mq_unregister_dev(disk_to_dev(disk), q); blk_mq_unregister_dev(disk_to_dev(disk), q);
if (q->request_fn) if (q->request_fn || (q->mq_ops && q->elevator))
elv_unregister_queue(q); elv_unregister_queue(q);
kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_uevent(&q->kobj, KOBJ_REMOVE);

View File

@ -96,7 +96,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/ */
static bool wb_recent_wait(struct rq_wb *rwb) static bool wb_recent_wait(struct rq_wb *rwb)
{ {
struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb; struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
return time_before(jiffies, wb->dirty_sleep + HZ); return time_before(jiffies, wb->dirty_sleep + HZ);
} }
@ -279,7 +279,7 @@ enum {
static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{ {
struct backing_dev_info *bdi = &rwb->queue->backing_dev_info; struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
u64 thislat; u64 thislat;
/* /*
@ -339,7 +339,7 @@ static int latency_exceeded(struct rq_wb *rwb)
static void rwb_trace_step(struct rq_wb *rwb, const char *msg) static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{ {
struct backing_dev_info *bdi = &rwb->queue->backing_dev_info; struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec, trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
rwb->wb_background, rwb->wb_normal, rwb->wb_max); rwb->wb_background, rwb->wb_normal, rwb->wb_max);
@ -423,7 +423,7 @@ static void wb_timer_fn(unsigned long data)
status = latency_exceeded(rwb); status = latency_exceeded(rwb);
trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step, trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
inflight); inflight);
/* /*

View File

@ -14,6 +14,10 @@
/* Max future timer expiry for timeouts */ /* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ) #define BLK_MAX_TIMEOUT (5 * HZ)
#ifdef CONFIG_DEBUG_FS
extern struct dentry *blk_debugfs_root;
#endif
struct blk_flush_queue { struct blk_flush_queue {
unsigned int flush_queue_delayed:1; unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1; unsigned int flush_pending_idx:1;
@ -96,6 +100,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio); struct bio *bio);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req, bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio); struct bio *bio);
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
struct bio *bio);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count, unsigned int *request_count,
struct request **same_queue_rq); struct request **same_queue_rq);
@ -204,14 +210,14 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio); struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req, int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio); struct bio *bio);
int attempt_back_merge(struct request_queue *q, struct request *rq); struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq); struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
int blk_attempt_req_merge(struct request_queue *q, struct request *rq, int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next); struct request *next);
void blk_recalc_rq_segments(struct request *rq); void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq); void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio); bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
int blk_try_merge(struct request *rq, struct bio *bio); enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q); void blk_queue_congestion_threshold(struct request_queue *q);
@ -249,7 +255,14 @@ static inline int blk_do_io_stat(struct request *rq)
{ {
return rq->rq_disk && return rq->rq_disk &&
(rq->rq_flags & RQF_IO_STAT) && (rq->rq_flags & RQF_IO_STAT) &&
(rq->cmd_type == REQ_TYPE_FS); !blk_rq_is_passthrough(rq);
}
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
} }
/* /*

View File

@ -71,22 +71,24 @@ void bsg_job_done(struct bsg_job *job, int result,
{ {
struct request *req = job->req; struct request *req = job->req;
struct request *rsp = req->next_rq; struct request *rsp = req->next_rq;
struct scsi_request *rq = scsi_req(req);
int err; int err;
err = job->req->errors = result; err = job->req->errors = result;
if (err < 0) if (err < 0)
/* we're only returning the result field in the reply */ /* we're only returning the result field in the reply */
job->req->sense_len = sizeof(u32); rq->sense_len = sizeof(u32);
else else
job->req->sense_len = job->reply_len; rq->sense_len = job->reply_len;
/* we assume all request payload was transferred, residual == 0 */ /* we assume all request payload was transferred, residual == 0 */
req->resid_len = 0; rq->resid_len = 0;
if (rsp) { if (rsp) {
WARN_ON(reply_payload_rcv_len > rsp->resid_len); WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len);
/* set reply (bidi) residual */ /* set reply (bidi) residual */
rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len); scsi_req(rsp)->resid_len -=
min(reply_payload_rcv_len, scsi_req(rsp)->resid_len);
} }
blk_complete_request(req); blk_complete_request(req);
} }
@ -113,6 +115,7 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
if (!buf->sg_list) if (!buf->sg_list)
return -ENOMEM; return -ENOMEM;
sg_init_table(buf->sg_list, req->nr_phys_segments); sg_init_table(buf->sg_list, req->nr_phys_segments);
scsi_req(req)->resid_len = blk_rq_bytes(req);
buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
buf->payload_len = blk_rq_bytes(req); buf->payload_len = blk_rq_bytes(req);
return 0; return 0;
@ -127,6 +130,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
{ {
struct request *rsp = req->next_rq; struct request *rsp = req->next_rq;
struct request_queue *q = req->q; struct request_queue *q = req->q;
struct scsi_request *rq = scsi_req(req);
struct bsg_job *job; struct bsg_job *job;
int ret; int ret;
@ -140,9 +144,9 @@ static int bsg_create_job(struct device *dev, struct request *req)
job->req = req; job->req = req;
if (q->bsg_job_size) if (q->bsg_job_size)
job->dd_data = (void *)&job[1]; job->dd_data = (void *)&job[1];
job->request = req->cmd; job->request = rq->cmd;
job->request_len = req->cmd_len; job->request_len = rq->cmd_len;
job->reply = req->sense; job->reply = rq->sense;
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
* allocated */ * allocated */
if (req->bio) { if (req->bio) {
@ -177,7 +181,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
* *
* Drivers/subsys should pass this to the queue init function. * Drivers/subsys should pass this to the queue init function.
*/ */
void bsg_request_fn(struct request_queue *q) static void bsg_request_fn(struct request_queue *q)
__releases(q->queue_lock) __releases(q->queue_lock)
__acquires(q->queue_lock) __acquires(q->queue_lock)
{ {
@ -214,24 +218,30 @@ void bsg_request_fn(struct request_queue *q)
put_device(dev); put_device(dev);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
} }
EXPORT_SYMBOL_GPL(bsg_request_fn);
/** /**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to * @dev: device to attach bsg device to
* @q: request queue setup by caller
* @name: device to give bsg device * @name: device to give bsg device
* @job_fn: bsg job handler * @job_fn: bsg job handler
* @dd_job_size: size of LLD data needed for each job * @dd_job_size: size of LLD data needed for each job
*
* The caller should have setup the reuqest queue with bsg_request_fn
* as the request_fn.
*/ */
int bsg_setup_queue(struct device *dev, struct request_queue *q, struct request_queue *bsg_setup_queue(struct device *dev, char *name,
char *name, bsg_job_fn *job_fn, int dd_job_size) bsg_job_fn *job_fn, int dd_job_size)
{ {
struct request_queue *q;
int ret; int ret;
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return ERR_PTR(-ENOMEM);
q->cmd_size = sizeof(struct scsi_request);
q->request_fn = bsg_request_fn;
ret = blk_init_allocated_queue(q);
if (ret)
goto out_cleanup_queue;
q->queuedata = dev; q->queuedata = dev;
q->bsg_job_size = dd_job_size; q->bsg_job_size = dd_job_size;
q->bsg_job_fn = job_fn; q->bsg_job_fn = job_fn;
@ -243,9 +253,12 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q,
if (ret) { if (ret) {
printk(KERN_ERR "%s: bsg interface failed to " printk(KERN_ERR "%s: bsg interface failed to "
"initialize - register queue\n", dev->kobj.name); "initialize - register queue\n", dev->kobj.name);
return ret; goto out_cleanup_queue;
} }
return 0; return q;
out_cleanup_queue:
blk_cleanup_queue(q);
return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(bsg_setup_queue); EXPORT_SYMBOL_GPL(bsg_setup_queue);

View File

@ -85,7 +85,6 @@ struct bsg_command {
struct bio *bidi_bio; struct bio *bidi_bio;
int err; int err;
struct sg_io_v4 hdr; struct sg_io_v4 hdr;
char sense[SCSI_SENSE_BUFFERSIZE];
}; };
static void bsg_free_command(struct bsg_command *bc) static void bsg_free_command(struct bsg_command *bc)
@ -140,18 +139,20 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, struct bsg_device *bd, struct sg_io_v4 *hdr, struct bsg_device *bd,
fmode_t has_write_perm) fmode_t has_write_perm)
{ {
struct scsi_request *req = scsi_req(rq);
if (hdr->request_len > BLK_MAX_CDB) { if (hdr->request_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); req->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
if (!rq->cmd) if (!req->cmd)
return -ENOMEM; return -ENOMEM;
} }
if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, if (copy_from_user(req->cmd, (void __user *)(unsigned long)hdr->request,
hdr->request_len)) hdr->request_len))
return -EFAULT; return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
if (blk_verify_command(rq->cmd, has_write_perm)) if (blk_verify_command(req->cmd, has_write_perm))
return -EPERM; return -EPERM;
} else if (!capable(CAP_SYS_RAWIO)) } else if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
@ -159,7 +160,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
/* /*
* fill in request structure * fill in request structure
*/ */
rq->cmd_len = hdr->request_len; req->cmd_len = hdr->request_len;
rq->timeout = msecs_to_jiffies(hdr->timeout); rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout) if (!rq->timeout)
@ -176,7 +177,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
* Check if sg_io_v4 from user is allowed and valid * Check if sg_io_v4 from user is allowed and valid
*/ */
static int static int
bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw) bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op)
{ {
int ret = 0; int ret = 0;
@ -197,7 +198,7 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
ret = -EINVAL; ret = -EINVAL;
} }
*rw = hdr->dout_xfer_len ? WRITE : READ; *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN;
return ret; return ret;
} }
@ -205,13 +206,12 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
* map sg_io_v4 to a request. * map sg_io_v4 to a request.
*/ */
static struct request * static struct request *
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
u8 *sense)
{ {
struct request_queue *q = bd->queue; struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL; struct request *rq, *next_rq = NULL;
int ret, rw; int ret;
unsigned int dxfer_len; unsigned int op, dxfer_len;
void __user *dxferp = NULL; void __user *dxferp = NULL;
struct bsg_class_device *bcd = &q->bsg_dev; struct bsg_class_device *bcd = &q->bsg_dev;
@ -226,36 +226,35 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len); hdr->din_xfer_len);
ret = bsg_validate_sgv4_hdr(hdr, &rw); ret = bsg_validate_sgv4_hdr(hdr, &op);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
/* /*
* map scatter-gather elements separately and string them to request * map scatter-gather elements separately and string them to request
*/ */
rq = blk_get_request(q, rw, GFP_KERNEL); rq = blk_get_request(q, op, GFP_KERNEL);
if (IS_ERR(rq)) if (IS_ERR(rq))
return rq; return rq;
blk_rq_set_block_pc(rq); scsi_req_init(rq);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
if (ret) if (ret)
goto out; goto out;
if (rw == WRITE && hdr->din_xfer_len) { if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) {
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out; goto out;
} }
next_rq = blk_get_request(q, READ, GFP_KERNEL); next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(next_rq)) { if (IS_ERR(next_rq)) {
ret = PTR_ERR(next_rq); ret = PTR_ERR(next_rq);
next_rq = NULL; next_rq = NULL;
goto out; goto out;
} }
rq->next_rq = next_rq; rq->next_rq = next_rq;
next_rq->cmd_type = rq->cmd_type;
dxferp = (void __user *)(unsigned long)hdr->din_xferp; dxferp = (void __user *)(unsigned long)hdr->din_xferp;
ret = blk_rq_map_user(q, next_rq, NULL, dxferp, ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
@ -280,13 +279,9 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
goto out; goto out;
} }
rq->sense = sense;
rq->sense_len = 0;
return rq; return rq;
out: out:
if (rq->cmd != rq->__cmd) scsi_req_free_cmd(scsi_req(rq));
kfree(rq->cmd);
blk_put_request(rq); blk_put_request(rq);
if (next_rq) { if (next_rq) {
blk_rq_unmap_user(next_rq->bio); blk_rq_unmap_user(next_rq->bio);
@ -393,6 +388,7 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct bio *bio, struct bio *bidi_bio) struct bio *bio, struct bio *bidi_bio)
{ {
struct scsi_request *req = scsi_req(rq);
int ret = 0; int ret = 0;
dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
@ -407,12 +403,12 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
hdr->info |= SG_INFO_CHECK; hdr->info |= SG_INFO_CHECK;
hdr->response_len = 0; hdr->response_len = 0;
if (rq->sense_len && hdr->response) { if (req->sense_len && hdr->response) {
int len = min_t(unsigned int, hdr->max_response_len, int len = min_t(unsigned int, hdr->max_response_len,
rq->sense_len); req->sense_len);
ret = copy_to_user((void __user *)(unsigned long)hdr->response, ret = copy_to_user((void __user *)(unsigned long)hdr->response,
rq->sense, len); req->sense, len);
if (!ret) if (!ret)
hdr->response_len = len; hdr->response_len = len;
else else
@ -420,14 +416,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
} }
if (rq->next_rq) { if (rq->next_rq) {
hdr->dout_resid = rq->resid_len; hdr->dout_resid = req->resid_len;
hdr->din_resid = rq->next_rq->resid_len; hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
blk_rq_unmap_user(bidi_bio); blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq); blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ) } else if (rq_data_dir(rq) == READ)
hdr->din_resid = rq->resid_len; hdr->din_resid = req->resid_len;
else else
hdr->dout_resid = rq->resid_len; hdr->dout_resid = req->resid_len;
/* /*
* If the request generated a negative error number, return it * If the request generated a negative error number, return it
@ -439,8 +435,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
ret = rq->errors; ret = rq->errors;
blk_rq_unmap_user(bio); blk_rq_unmap_user(bio);
if (rq->cmd != rq->__cmd) scsi_req_free_cmd(req);
kfree(rq->cmd);
blk_put_request(rq); blk_put_request(rq);
return ret; return ret;
@ -625,7 +620,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
/* /*
* get a request, fill in the blanks, and add to request queue * get a request, fill in the blanks, and add to request queue
*/ */
rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
ret = PTR_ERR(rq); ret = PTR_ERR(rq);
rq = NULL; rq = NULL;
@ -911,12 +906,11 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct bio *bio, *bidi_bio = NULL; struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr; struct sg_io_v4 hdr;
int at_head; int at_head;
u8 sense[SCSI_SENSE_BUFFERSIZE];
if (copy_from_user(&hdr, uarg, sizeof(hdr))) if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT; return -EFAULT;
rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);

View File

@ -2528,7 +2528,7 @@ static void cfq_remove_request(struct request *rq)
} }
} }
static int cfq_merge(struct request_queue *q, struct request **req, static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
struct bio *bio) struct bio *bio)
{ {
struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_data *cfqd = q->elevator->elevator_data;
@ -2544,7 +2544,7 @@ static int cfq_merge(struct request_queue *q, struct request **req,
} }
static void cfq_merged_request(struct request_queue *q, struct request *req, static void cfq_merged_request(struct request_queue *q, struct request *req,
int type) enum elv_merge type)
{ {
if (type == ELEVATOR_FRONT_MERGE) { if (type == ELEVATOR_FRONT_MERGE) {
struct cfq_queue *cfqq = RQ_CFQQ(req); struct cfq_queue *cfqq = RQ_CFQQ(req);

View File

@ -661,7 +661,6 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
struct block_device *bdev = inode->i_bdev; struct block_device *bdev = inode->i_bdev;
struct gendisk *disk = bdev->bd_disk; struct gendisk *disk = bdev->bd_disk;
fmode_t mode = file->f_mode; fmode_t mode = file->f_mode;
struct backing_dev_info *bdi;
loff_t size; loff_t size;
unsigned int max_sectors; unsigned int max_sectors;
@ -708,9 +707,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKFRAGET: case BLKFRAGET:
if (!arg) if (!arg)
return -EINVAL; return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return compat_put_long(arg, return compat_put_long(arg,
(bdi->ra_pages * PAGE_SIZE) / 512); (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: /* compatible */ case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0); return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@ -728,8 +726,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKFRASET: case BLKFRASET:
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
bdi = blk_get_backing_dev_info(bdev); bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0; return 0;
case BLKGETSIZE: case BLKGETSIZE:
size = i_size_read(bdev->bd_inode); size = i_size_read(bdev->bd_inode);

View File

@ -120,12 +120,11 @@ static void deadline_remove_request(struct request_queue *q, struct request *rq)
deadline_del_rq_rb(dd, rq); deadline_del_rq_rb(dd, rq);
} }
static int static enum elv_merge
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{ {
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
struct request *__rq; struct request *__rq;
int ret;
/* /*
* check for front merge * check for front merge
@ -138,20 +137,17 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
BUG_ON(sector != blk_rq_pos(__rq)); BUG_ON(sector != blk_rq_pos(__rq));
if (elv_bio_merge_ok(__rq, bio)) { if (elv_bio_merge_ok(__rq, bio)) {
ret = ELEVATOR_FRONT_MERGE; *req = __rq;
goto out; return ELEVATOR_FRONT_MERGE;
} }
} }
} }
return ELEVATOR_NO_MERGE; return ELEVATOR_NO_MERGE;
out:
*req = __rq;
return ret;
} }
static void deadline_merged_request(struct request_queue *q, static void deadline_merged_request(struct request_queue *q,
struct request *req, int type) struct request *req, enum elv_merge type)
{ {
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;

View File

@ -428,11 +428,11 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
} }
EXPORT_SYMBOL(elv_dispatch_add_tail); EXPORT_SYMBOL(elv_dispatch_add_tail);
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) enum elv_merge elv_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct request *__rq; struct request *__rq;
int ret;
/* /*
* Levels of merges: * Levels of merges:
@ -447,7 +447,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
* First try one-hit cache. * First try one-hit cache.
*/ */
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
ret = blk_try_merge(q->last_merge, bio); enum elv_merge ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) { if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge; *req = q->last_merge;
return ret; return ret;
@ -515,7 +516,8 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
return ret; return ret;
} }
void elv_merged_request(struct request_queue *q, struct request *rq, int type) void elv_merged_request(struct request_queue *q, struct request *rq,
enum elv_merge type)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
@ -539,7 +541,7 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
if (e->uses_mq && e->type->ops.mq.requests_merged) if (e->uses_mq && e->type->ops.mq.requests_merged)
e->type->ops.mq.requests_merged(q, rq, next); e->type->ops.mq.requests_merged(q, rq, next);
else if (e->type->ops.sq.elevator_merge_req_fn) { else if (e->type->ops.sq.elevator_merge_req_fn) {
next_sorted = next->rq_flags & RQF_SORTED; next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
if (next_sorted) if (next_sorted)
e->type->ops.sq.elevator_merge_req_fn(q, rq, next); e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
} }
@ -635,7 +637,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (rq->rq_flags & RQF_SOFTBARRIER) { if (rq->rq_flags & RQF_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */ /* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS) { if (!blk_rq_is_passthrough(rq)) {
q->end_sector = rq_end_sector(rq); q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq; q->boundary_rq = rq;
} }
@ -677,7 +679,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (elv_attempt_insert_merge(q, rq)) if (elv_attempt_insert_merge(q, rq))
break; break;
case ELEVATOR_INSERT_SORT: case ELEVATOR_INSERT_SORT:
BUG_ON(rq->cmd_type != REQ_TYPE_FS); BUG_ON(blk_rq_is_passthrough(rq));
rq->rq_flags |= RQF_SORTED; rq->rq_flags |= RQF_SORTED;
q->nr_sorted++; q->nr_sorted++;
if (rq_mergeable(rq)) { if (rq_mergeable(rq)) {

View File

@ -572,6 +572,20 @@ static void register_disk(struct device *parent, struct gendisk *disk)
disk_part_iter_exit(&piter); disk_part_iter_exit(&piter);
} }
void put_disk_devt(struct disk_devt *disk_devt)
{
if (disk_devt && atomic_dec_and_test(&disk_devt->count))
disk_devt->release(disk_devt);
}
EXPORT_SYMBOL(put_disk_devt);
void get_disk_devt(struct disk_devt *disk_devt)
{
if (disk_devt)
atomic_inc(&disk_devt->count);
}
EXPORT_SYMBOL(get_disk_devt);
/** /**
* device_add_disk - add partitioning information to kernel list * device_add_disk - add partitioning information to kernel list
* @parent: parent device for the disk * @parent: parent device for the disk
@ -612,8 +626,15 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
disk_alloc_events(disk); disk_alloc_events(disk);
/*
* Take a reference on the devt and assign it to queue since it
* must not be reallocated while the bdi is registered
*/
disk->queue->disk_devt = disk->disk_devt;
get_disk_devt(disk->disk_devt);
/* Register BDI before referencing it from bdev */ /* Register BDI before referencing it from bdev */
bdi = &disk->queue->backing_dev_info; bdi = disk->queue->backing_dev_info;
bdi_register_owner(bdi, disk_to_dev(disk)); bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL, blk_register_region(disk_devt(disk), disk->minors, NULL,
@ -648,6 +669,8 @@ void del_gendisk(struct gendisk *disk)
disk_part_iter_init(&piter, disk, disk_part_iter_init(&piter, disk,
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
while ((part = disk_part_iter_next(&piter))) { while ((part = disk_part_iter_next(&piter))) {
bdev_unhash_inode(MKDEV(disk->major,
disk->first_minor + part->partno));
invalidate_partition(disk, part->partno); invalidate_partition(disk, part->partno);
delete_partition(disk, part->partno); delete_partition(disk, part->partno);
} }

View File

@ -505,7 +505,6 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg) unsigned long arg)
{ {
struct backing_dev_info *bdi;
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
loff_t size; loff_t size;
unsigned int max_sectors; unsigned int max_sectors;
@ -532,8 +531,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKFRAGET: case BLKFRAGET:
if (!arg) if (!arg)
return -EINVAL; return -EINVAL;
bdi = blk_get_backing_dev_info(bdev); return put_long(arg, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: case BLKROGET:
return put_int(arg, bdev_read_only(bdev) != 0); return put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@ -560,8 +558,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKFRASET: case BLKFRASET:
if(!capable(CAP_SYS_ADMIN)) if(!capable(CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
bdi = blk_get_backing_dev_info(bdev); bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0; return 0;
case BLKBSZSET: case BLKBSZSET:
return blkdev_bszset(bdev, mode, argp); return blkdev_bszset(bdev, mode, argp);

View File

@ -121,7 +121,7 @@ static void deadline_remove_request(struct request_queue *q, struct request *rq)
} }
static void dd_request_merged(struct request_queue *q, struct request *req, static void dd_request_merged(struct request_queue *q, struct request *req,
int type) enum elv_merge type)
{ {
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
@ -371,12 +371,16 @@ static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
{ {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
int ret; struct request *free = NULL;
bool ret;
spin_lock(&dd->lock); spin_lock(&dd->lock);
ret = blk_mq_sched_try_merge(q, bio); ret = blk_mq_sched_try_merge(q, bio, &free);
spin_unlock(&dd->lock); spin_unlock(&dd->lock);
if (free)
blk_mq_free_request(free);
return ret; return ret;
} }
@ -395,10 +399,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
blk_mq_sched_request_inserted(rq); blk_mq_sched_request_inserted(rq);
if (blk_mq_sched_bypass_insert(hctx, rq)) if (at_head || blk_rq_is_passthrough(rq)) {
return;
if (at_head || rq->cmd_type != REQ_TYPE_FS) {
if (at_head) if (at_head)
list_add(&rq->queuelist, &dd->dispatch); list_add(&rq->queuelist, &dd->dispatch);
else else

View File

@ -230,15 +230,17 @@ EXPORT_SYMBOL(blk_verify_command);
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, fmode_t mode) struct sg_io_hdr *hdr, fmode_t mode)
{ {
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) struct scsi_request *req = scsi_req(rq);
if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT; return -EFAULT;
if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) if (blk_verify_command(req->cmd, mode & FMODE_WRITE))
return -EPERM; return -EPERM;
/* /*
* fill in request structure * fill in request structure
*/ */
rq->cmd_len = hdr->cmd_len; req->cmd_len = hdr->cmd_len;
rq->timeout = msecs_to_jiffies(hdr->timeout); rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout) if (!rq->timeout)
@ -254,6 +256,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
struct bio *bio) struct bio *bio)
{ {
struct scsi_request *req = scsi_req(rq);
int r, ret = 0; int r, ret = 0;
/* /*
@ -267,13 +270,13 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
hdr->info = 0; hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status) if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK; hdr->info |= SG_INFO_CHECK;
hdr->resid = rq->resid_len; hdr->resid = req->resid_len;
hdr->sb_len_wr = 0; hdr->sb_len_wr = 0;
if (rq->sense_len && hdr->sbp) { if (req->sense_len && hdr->sbp) {
int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len); int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
if (!copy_to_user(hdr->sbp, rq->sense, len)) if (!copy_to_user(hdr->sbp, req->sense, len))
hdr->sb_len_wr = len; hdr->sb_len_wr = len;
else else
ret = -EFAULT; ret = -EFAULT;
@ -294,7 +297,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
int writing = 0; int writing = 0;
int at_head = 0; int at_head = 0;
struct request *rq; struct request *rq;
char sense[SCSI_SENSE_BUFFERSIZE]; struct scsi_request *req;
struct bio *bio; struct bio *bio;
if (hdr->interface_id != 'S') if (hdr->interface_id != 'S')
@ -318,14 +321,16 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
at_head = 1; at_head = 1;
ret = -ENOMEM; ret = -ENOMEM;
rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
GFP_KERNEL);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
blk_rq_set_block_pc(rq); req = scsi_req(rq);
scsi_req_init(rq);
if (hdr->cmd_len > BLK_MAX_CDB) { if (hdr->cmd_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
if (!rq->cmd) if (!req->cmd)
goto out_put_request; goto out_put_request;
} }
@ -357,9 +362,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
goto out_free_cdb; goto out_free_cdb;
bio = rq->bio; bio = rq->bio;
memset(sense, 0, sizeof(sense));
rq->sense = sense;
rq->sense_len = 0;
rq->retries = 0; rq->retries = 0;
start_time = jiffies; start_time = jiffies;
@ -375,8 +377,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
ret = blk_complete_sghdr_rq(rq, hdr, bio); ret = blk_complete_sghdr_rq(rq, hdr, bio);
out_free_cdb: out_free_cdb:
if (rq->cmd != rq->__cmd) scsi_req_free_cmd(req);
kfree(rq->cmd);
out_put_request: out_put_request:
blk_put_request(rq); blk_put_request(rq);
return ret; return ret;
@ -420,9 +421,10 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
struct scsi_ioctl_command __user *sic) struct scsi_ioctl_command __user *sic)
{ {
struct request *rq; struct request *rq;
struct scsi_request *req;
int err; int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen; unsigned int in_len, out_len, bytes, opcode, cmdlen;
char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; char *buffer = NULL;
if (!sic) if (!sic)
return -EINVAL; return -EINVAL;
@ -447,12 +449,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
} }
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM); rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
__GFP_RECLAIM);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto error_free_buffer; goto error_free_buffer;
} }
blk_rq_set_block_pc(rq); req = scsi_req(rq);
scsi_req_init(rq);
cmdlen = COMMAND_SIZE(opcode); cmdlen = COMMAND_SIZE(opcode);
@ -460,14 +464,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
* get command and data to send to device, if any * get command and data to send to device, if any
*/ */
err = -EFAULT; err = -EFAULT;
rq->cmd_len = cmdlen; req->cmd_len = cmdlen;
if (copy_from_user(rq->cmd, sic->data, cmdlen)) if (copy_from_user(req->cmd, sic->data, cmdlen))
goto error; goto error;
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error; goto error;
err = blk_verify_command(rq->cmd, mode & FMODE_WRITE); err = blk_verify_command(req->cmd, mode & FMODE_WRITE);
if (err) if (err)
goto error; goto error;
@ -503,18 +507,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
goto error; goto error;
} }
memset(sense, 0, sizeof(sense));
rq->sense = sense;
rq->sense_len = 0;
blk_execute_rq(q, disk, rq, 0); blk_execute_rq(q, disk, rq, 0);
err = rq->errors & 0xff; /* only 8 bit SCSI status */ err = rq->errors & 0xff; /* only 8 bit SCSI status */
if (err) { if (err) {
if (rq->sense_len && rq->sense) { if (req->sense_len && req->sense) {
bytes = (OMAX_SB_LEN > rq->sense_len) ? bytes = (OMAX_SB_LEN > req->sense_len) ?
rq->sense_len : OMAX_SB_LEN; req->sense_len : OMAX_SB_LEN;
if (copy_to_user(sic->data, rq->sense, bytes)) if (copy_to_user(sic->data, req->sense, bytes))
err = -EFAULT; err = -EFAULT;
} }
} else { } else {
@ -539,14 +539,14 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq; struct request *rq;
int err; int err;
rq = blk_get_request(q, WRITE, __GFP_RECLAIM); rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
blk_rq_set_block_pc(rq); scsi_req_init(rq);
rq->timeout = BLK_DEFAULT_SG_TIMEOUT; rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq->cmd[0] = cmd; scsi_req(rq)->cmd[0] = cmd;
rq->cmd[4] = data; scsi_req(rq)->cmd[4] = data;
rq->cmd_len = 6; scsi_req(rq)->cmd_len = 6;
err = blk_execute_rq(q, bd_disk, rq, 0); err = blk_execute_rq(q, bd_disk, rq, 0);
blk_put_request(rq); blk_put_request(rq);
@ -743,6 +743,17 @@ int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
} }
EXPORT_SYMBOL(scsi_cmd_blk_ioctl); EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
void scsi_req_init(struct request *rq)
{
struct scsi_request *req = scsi_req(rq);
memset(req->__cmd, 0, sizeof(req->__cmd));
req->cmd = req->__cmd;
req->cmd_len = BLK_MAX_CDB;
req->sense_len = 0;
}
EXPORT_SYMBOL(scsi_req_init);
static int __init blk_scsi_ioctl_init(void) static int __init blk_scsi_ioctl_init(void)
{ {
blk_set_cmd_filter_defaults(&blk_default_cmd_filter); blk_set_cmd_filter_defaults(&blk_default_cmd_filter);

View File

@ -1265,13 +1265,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
*/ */
static int atapi_drain_needed(struct request *rq) static int atapi_drain_needed(struct request *rq)
{ {
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) if (likely(!blk_rq_is_passthrough(rq)))
return 0; return 0;
if (!blk_rq_bytes(rq) || op_is_write(req_op(rq))) if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
return 0; return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
} }
static int ata_scsi_dev_config(struct scsi_device *sdev, static int ata_scsi_dev_config(struct scsi_device *sdev,

View File

@ -69,6 +69,7 @@ config AMIGA_Z2RAM
config GDROM config GDROM
tristate "SEGA Dreamcast GD-ROM drive" tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST depends on SH_DREAMCAST
select BLK_SCSI_REQUEST # only for the generic cdrom code
help help
A standard SEGA Dreamcast comes with a modified CD ROM drive called a A standard SEGA Dreamcast comes with a modified CD ROM drive called a
"GD-ROM" by SEGA to signify it is capable of reading special disks "GD-ROM" by SEGA to signify it is capable of reading special disks
@ -114,6 +115,7 @@ config BLK_CPQ_CISS_DA
tristate "Compaq Smart Array 5xxx support" tristate "Compaq Smart Array 5xxx support"
depends on PCI depends on PCI
select CHECK_SIGNATURE select CHECK_SIGNATURE
select BLK_SCSI_REQUEST
help help
This is the driver for Compaq Smart Array 5xxx controllers. This is the driver for Compaq Smart Array 5xxx controllers.
Everyone using these boards should say Y here. Everyone using these boards should say Y here.
@ -386,6 +388,7 @@ config BLK_DEV_RAM_DAX
config CDROM_PKTCDVD config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)" tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML depends on !UML
select BLK_SCSI_REQUEST
help help
Note: This driver is deprecated and will be removed from the Note: This driver is deprecated and will be removed from the
kernel in the near future! kernel in the near future!
@ -501,6 +504,16 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with This is the virtual block driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
config VIRTIO_BLK_SCSI
bool "SCSI passthrough request for the Virtio block driver"
depends on VIRTIO_BLK
select BLK_SCSI_REQUEST
---help---
Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on
virtio-blk devices. This is only supported for the legacy
virtio protocol and not enabled by default by any hypervisor.
Your probably want to virtio-scsi instead.
config BLK_DEV_HD config BLK_DEV_HD
bool "Very old hard disk (MFM/RLL/IDE) driver" bool "Very old hard disk (MFM/RLL/IDE) driver"
depends on HAVE_IDE depends on HAVE_IDE

View File

@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd); WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP); WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe"; q->backing_dev_info->name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE; q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp; d->bufpool = mp;
d->blkq = gd->queue = q; d->blkq = gd->queue = q;
q->queuedata = d; q->queuedata = d;

View File

@ -52,6 +52,7 @@
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/sg.h> #include <scsi/sg.h>
#include <scsi/scsi_ioctl.h> #include <scsi/scsi_ioctl.h>
#include <scsi/scsi_request.h>
#include <linux/cdrom.h> #include <linux/cdrom.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/kthread.h> #include <linux/kthread.h>
@ -1853,8 +1854,8 @@ static void cciss_softirq_done(struct request *rq)
dev_dbg(&h->pdev->dev, "Done with %p\n", rq); dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
/* set the residual count for pc requests */ /* set the residual count for pc requests */
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) if (blk_rq_is_passthrough(rq))
rq->resid_len = c->err_info->ResidualCnt; scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
@ -1941,9 +1942,16 @@ static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
int drv_index) int drv_index)
{ {
disk->queue = blk_init_queue(do_cciss_request, &h->lock); disk->queue = blk_alloc_queue(GFP_KERNEL);
if (!disk->queue) if (!disk->queue)
goto init_queue_failure; goto init_queue_failure;
disk->queue->cmd_size = sizeof(struct scsi_request);
disk->queue->request_fn = do_cciss_request;
disk->queue->queue_lock = &h->lock;
if (blk_init_allocated_queue(disk->queue) < 0)
goto cleanup_queue;
sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
disk->major = h->major; disk->major = h->major;
disk->first_minor = drv_index << NWD_SHIFT; disk->first_minor = drv_index << NWD_SHIFT;
@ -3075,7 +3083,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
driver_byte = DRIVER_OK; driver_byte = DRIVER_OK;
msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) if (blk_rq_is_passthrough(cmd->rq))
host_byte = DID_PASSTHROUGH; host_byte = DID_PASSTHROUGH;
else else
host_byte = DID_OK; host_byte = DID_OK;
@ -3084,7 +3092,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
host_byte, driver_byte); host_byte, driver_byte);
if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) if (!blk_rq_is_passthrough(cmd->rq))
dev_warn(&h->pdev->dev, "cmd %p " dev_warn(&h->pdev->dev, "cmd %p "
"has SCSI Status 0x%x\n", "has SCSI Status 0x%x\n",
cmd, cmd->err_info->ScsiStatus); cmd, cmd->err_info->ScsiStatus);
@ -3095,31 +3103,23 @@ static inline int evaluate_target_status(ctlr_info_t *h,
sense_key = 0xf & cmd->err_info->SenseInfo[2]; sense_key = 0xf & cmd->err_info->SenseInfo[2];
/* no status or recovered error */ /* no status or recovered error */
if (((sense_key == 0x0) || (sense_key == 0x1)) && if (((sense_key == 0x0) || (sense_key == 0x1)) &&
(cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) !blk_rq_is_passthrough(cmd->rq))
error_value = 0; error_value = 0;
if (check_for_unit_attention(h, cmd)) { if (check_for_unit_attention(h, cmd)) {
*retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); *retry_cmd = !blk_rq_is_passthrough(cmd->rq);
return 0; return 0;
} }
/* Not SG_IO or similar? */ /* Not SG_IO or similar? */
if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { if (!blk_rq_is_passthrough(cmd->rq)) {
if (error_value != 0) if (error_value != 0)
dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
" sense key = 0x%x\n", cmd, sense_key); " sense key = 0x%x\n", cmd, sense_key);
return error_value; return error_value;
} }
/* SG_IO or similar, copy sense data back */ scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen;
if (cmd->rq->sense) {
if (cmd->rq->sense_len > cmd->err_info->SenseLen)
cmd->rq->sense_len = cmd->err_info->SenseLen;
memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
cmd->rq->sense_len);
} else
cmd->rq->sense_len = 0;
return error_value; return error_value;
} }
@ -3146,15 +3146,14 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
rq->errors = evaluate_target_status(h, cmd, &retry_cmd); rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
break; break;
case CMD_DATA_UNDERRUN: case CMD_DATA_UNDERRUN:
if (cmd->rq->cmd_type == REQ_TYPE_FS) { if (!blk_rq_is_passthrough(cmd->rq)) {
dev_warn(&h->pdev->dev, "cmd %p has" dev_warn(&h->pdev->dev, "cmd %p has"
" completed with data underrun " " completed with data underrun "
"reported\n", cmd); "reported\n", cmd);
cmd->rq->resid_len = cmd->err_info->ResidualCnt;
} }
break; break;
case CMD_DATA_OVERRUN: case CMD_DATA_OVERRUN:
if (cmd->rq->cmd_type == REQ_TYPE_FS) if (!blk_rq_is_passthrough(cmd->rq))
dev_warn(&h->pdev->dev, "cciss: cmd %p has" dev_warn(&h->pdev->dev, "cciss: cmd %p has"
" completed with data overrun " " completed with data overrun "
"reported\n", cmd); "reported\n", cmd);
@ -3164,7 +3163,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"reported invalid\n", cmd); "reported invalid\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR); DID_PASSTHROUGH : DID_ERROR);
break; break;
case CMD_PROTOCOL_ERR: case CMD_PROTOCOL_ERR:
@ -3172,7 +3171,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"protocol error\n", cmd); "protocol error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR); DID_PASSTHROUGH : DID_ERROR);
break; break;
case CMD_HARDWARE_ERR: case CMD_HARDWARE_ERR:
@ -3180,7 +3179,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
" hardware error\n", cmd); " hardware error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR); DID_PASSTHROUGH : DID_ERROR);
break; break;
case CMD_CONNECTION_LOST: case CMD_CONNECTION_LOST:
@ -3188,7 +3187,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"connection lost\n", cmd); "connection lost\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR); DID_PASSTHROUGH : DID_ERROR);
break; break;
case CMD_ABORTED: case CMD_ABORTED:
@ -3196,7 +3195,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"aborted\n", cmd); "aborted\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ABORT); DID_PASSTHROUGH : DID_ABORT);
break; break;
case CMD_ABORT_FAILED: case CMD_ABORT_FAILED:
@ -3204,7 +3203,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"abort failed\n", cmd); "abort failed\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR); DID_PASSTHROUGH : DID_ERROR);
break; break;
case CMD_UNSOLICITED_ABORT: case CMD_UNSOLICITED_ABORT:
@ -3219,21 +3218,21 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"%p retried too many times\n", cmd); "%p retried too many times\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ABORT); DID_PASSTHROUGH : DID_ABORT);
break; break;
case CMD_TIMEOUT: case CMD_TIMEOUT:
dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR); DID_PASSTHROUGH : DID_ERROR);
break; break;
case CMD_UNABORTABLE: case CMD_UNABORTABLE:
dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR); DID_PASSTHROUGH : DID_ERROR);
break; break;
default: default:
@ -3242,7 +3241,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
cmd->err_info->CommandStatus); cmd->err_info->CommandStatus);
rq->errors = make_status_bytes(SAM_STAT_GOOD, rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK, cmd->err_info->CommandStatus, DRIVER_OK,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR); DID_PASSTHROUGH : DID_ERROR);
} }
@ -3395,7 +3394,9 @@ static void do_cciss_request(struct request_queue *q)
c->Header.SGList = h->max_cmd_sgentries; c->Header.SGList = h->max_cmd_sgentries;
set_performant_mode(h, c); set_performant_mode(h, c);
if (likely(creq->cmd_type == REQ_TYPE_FS)) { switch (req_op(creq)) {
case REQ_OP_READ:
case REQ_OP_WRITE:
if(h->cciss_read == CCISS_READ_10) { if(h->cciss_read == CCISS_READ_10) {
c->Request.CDB[1] = 0; c->Request.CDB[1] = 0;
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
@ -3425,12 +3426,16 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0; c->Request.CDB[14] = c->Request.CDB[15] = 0;
} }
} else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { break;
c->Request.CDBLen = creq->cmd_len; case REQ_OP_SCSI_IN:
memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); case REQ_OP_SCSI_OUT:
} else { c->Request.CDBLen = scsi_req(creq)->cmd_len;
memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
scsi_req(creq)->sense = c->err_info->SenseInfo;
break;
default:
dev_warn(&h->pdev->dev, "bad request type %d\n", dev_warn(&h->pdev->dev, "bad request type %d\n",
creq->cmd_type); creq->cmd_flags);
BUG(); BUG();
} }

View File

@ -2462,7 +2462,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
if (get_ldev(device)) { if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev); q = bdev_get_queue(device->ldev->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits); r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device); put_ldev(device);
if (r) if (r)
reason = 'b'; reason = 'b';
@ -2834,8 +2834,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */ /* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev; device->this_bdev->bd_contains = device->this_bdev;
q->backing_dev_info.congested_fn = drbd_congested; q->backing_dev_info->congested_fn = drbd_congested;
q->backing_dev_info.congested_data = device; q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request); blk_queue_make_request(q, drbd_make_request);
blk_queue_write_cache(q, true, true); blk_queue_write_cache(q, true, true);

View File

@ -1328,11 +1328,13 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
if (b) { if (b) {
blk_queue_stack_limits(q, b); blk_queue_stack_limits(q, b);
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { if (q->backing_dev_info->ra_pages !=
b->backing_dev_info->ra_pages) {
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
q->backing_dev_info.ra_pages, q->backing_dev_info->ra_pages,
b->backing_dev_info.ra_pages); b->backing_dev_info->ra_pages);
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; q->backing_dev_info->ra_pages =
b->backing_dev_info->ra_pages;
} }
} }
fixup_discard_if_not_supported(q); fixup_discard_if_not_supported(q);
@ -3345,7 +3347,7 @@ static void device_to_statistics(struct device_statistics *s,
s->dev_disk_flags = md->flags; s->dev_disk_flags = md->flags;
q = bdev_get_queue(device->ldev->backing_bdev); q = bdev_get_queue(device->ldev->backing_bdev);
s->dev_lower_blocked = s->dev_lower_blocked =
bdi_congested(&q->backing_dev_info, bdi_congested(q->backing_dev_info,
(1 << WB_async_congested) | (1 << WB_async_congested) |
(1 << WB_sync_congested)); (1 << WB_sync_congested));
put_ldev(device); put_ldev(device);

View File

@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d: cs:Unconfigured\n", i); seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else { } else {
/* reset device->congestion_reason */ /* reset device->congestion_reason */
bdi_rw_congested(&device->rq_queue->backing_dev_info); bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';

View File

@ -938,7 +938,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) { switch (rbm) {
case RB_CONGESTED_REMOTE: case RB_CONGESTED_REMOTE:
bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi); return bdi_read_congested(bdi);
case RB_LEAST_PENDING: case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) > return atomic_read(&device->local_cnt) >

View File

@ -2900,8 +2900,8 @@ static void do_fd_request(struct request_queue *q)
return; return;
if (WARN(atomic_read(&usage_count) == 0, if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n", "warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags)) (unsigned long long) current_req->cmd_flags))
return; return;

View File

@ -626,30 +626,29 @@ static void hd_request(void)
req_data_dir(req) == READ ? "read" : "writ", req_data_dir(req) == READ ? "read" : "writ",
cyl, head, sec, nsect, bio_data(req->bio)); cyl, head, sec, nsect, bio_data(req->bio));
#endif #endif
if (req->cmd_type == REQ_TYPE_FS) {
switch (rq_data_dir(req)) { switch (req_op(req)) {
case READ: case REQ_OP_READ:
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
&read_intr); &read_intr);
if (reset) if (reset)
goto repeat; goto repeat;
break; break;
case WRITE: case REQ_OP_WRITE:
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE, hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
&write_intr); &write_intr);
if (reset) if (reset)
goto repeat; goto repeat;
if (wait_DRQ()) { if (wait_DRQ()) {
bad_rw_intr(); bad_rw_intr();
goto repeat; goto repeat;
}
outsw(HD_DATA, bio_data(req->bio), 256);
break;
default:
printk("unknown hd-command\n");
hd_end_request_cur(-EIO);
break;
} }
outsw(HD_DATA, bio_data(req->bio), 256);
break;
default:
printk("unknown hd-command\n");
hd_end_request_cur(-EIO);
break;
} }
} }

View File

@ -670,15 +670,17 @@ static void mg_request_poll(struct request_queue *q)
break; break;
} }
if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { switch (req_op(host->req)) {
mg_end_request_cur(host, -EIO); case REQ_OP_READ:
continue;
}
if (rq_data_dir(host->req) == READ)
mg_read(host->req); mg_read(host->req);
else break;
case REQ_OP_WRITE:
mg_write(host->req); mg_write(host->req);
break;
default:
mg_end_request_cur(host, -EIO);
break;
}
} }
} }
@ -687,13 +689,15 @@ static unsigned int mg_issue_req(struct request *req,
unsigned int sect_num, unsigned int sect_num,
unsigned int sect_cnt) unsigned int sect_cnt)
{ {
if (rq_data_dir(req) == READ) { switch (req_op(host->req)) {
case REQ_OP_READ:
if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
!= MG_ERR_NONE) { != MG_ERR_NONE) {
mg_bad_rw_intr(host); mg_bad_rw_intr(host);
return host->error; return host->error;
} }
} else { break;
case REQ_OP_WRITE:
/* TODO : handler */ /* TODO : handler */
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
@ -712,6 +716,10 @@ static unsigned int mg_issue_req(struct request *req,
mod_timer(&host->timer, jiffies + 3 * HZ); mod_timer(&host->timer, jiffies + 3 * HZ);
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND); MG_REG_COMMAND);
break;
default:
mg_end_request_cur(host, -EIO);
break;
} }
return MG_ERR_NONE; return MG_ERR_NONE;
} }
@ -753,11 +761,6 @@ static void mg_request(struct request_queue *q)
continue; continue;
} }
if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
mg_end_request_cur(host, -EIO);
continue;
}
if (!mg_issue_req(req, host, sect_num, sect_cnt)) if (!mg_issue_req(req, host, sect_num, sect_cnt))
return; return;
} }

View File

@ -41,6 +41,9 @@
#include <linux/nbd.h> #include <linux/nbd.h>
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
struct nbd_sock { struct nbd_sock {
struct socket *sock; struct socket *sock;
struct mutex tx_lock; struct mutex tx_lock;
@ -89,8 +92,9 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548 #define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16; static unsigned int nbds_max = 16;
static struct nbd_device *nbd_dev;
static int max_part; static int max_part;
static struct workqueue_struct *recv_workqueue;
static int part_shift;
static inline struct device *nbd_to_dev(struct nbd_device *nbd) static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{ {
@ -193,13 +197,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
req->errors++; req->errors++;
/*
* If our disconnect packet times out then we're already holding the
* config_lock and could deadlock here, so just set an error and return,
* we'll handle shutting everything down later.
*/
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
return BLK_EH_HANDLED;
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
sock_shutdown(nbd); sock_shutdown(nbd);
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
@ -278,14 +275,29 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
u32 type; u32 type;
u32 tag = blk_mq_unique_tag(req); u32 tag = blk_mq_unique_tag(req);
if (req_op(req) == REQ_OP_DISCARD) switch (req_op(req)) {
case REQ_OP_DISCARD:
type = NBD_CMD_TRIM; type = NBD_CMD_TRIM;
else if (req_op(req) == REQ_OP_FLUSH) break;
case REQ_OP_FLUSH:
type = NBD_CMD_FLUSH; type = NBD_CMD_FLUSH;
else if (rq_data_dir(req) == WRITE) break;
case REQ_OP_WRITE:
type = NBD_CMD_WRITE; type = NBD_CMD_WRITE;
else break;
case REQ_OP_READ:
type = NBD_CMD_READ; type = NBD_CMD_READ;
break;
default:
return -EIO;
}
if (rq_data_dir(req) == WRITE &&
(nbd->flags & NBD_FLAG_READ_ONLY)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Write on read-only\n");
return -EIO;
}
memset(&request, 0, sizeof(request)); memset(&request, 0, sizeof(request));
request.magic = htonl(NBD_REQUEST_MAGIC); request.magic = htonl(NBD_REQUEST_MAGIC);
@ -510,18 +522,6 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
goto error_out; goto error_out;
} }
if (req->cmd_type != REQ_TYPE_FS &&
req->cmd_type != REQ_TYPE_DRV_PRIV)
goto error_out;
if (req->cmd_type == REQ_TYPE_FS &&
rq_data_dir(req) == WRITE &&
(nbd->flags & NBD_FLAG_READ_ONLY)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Write on read-only\n");
goto error_out;
}
req->errors = 0; req->errors = 0;
nsock = nbd->socks[index]; nsock = nbd->socks[index];
@ -785,7 +785,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
INIT_WORK(&args[i].work, recv_work); INIT_WORK(&args[i].work, recv_work);
args[i].nbd = nbd; args[i].nbd = nbd;
args[i].index = i; args[i].index = i;
queue_work(system_long_wq, &args[i].work); queue_work(recv_workqueue, &args[i].work);
} }
wait_event_interruptible(nbd->recv_wq, wait_event_interruptible(nbd->recv_wq,
atomic_read(&nbd->recv_threads) == 0); atomic_read(&nbd->recv_threads) == 0);
@ -996,6 +996,103 @@ static struct blk_mq_ops nbd_mq_ops = {
.timeout = nbd_xmit_timeout, .timeout = nbd_xmit_timeout,
}; };
static void nbd_dev_remove(struct nbd_device *nbd)
{
struct gendisk *disk = nbd->disk;
nbd->magic = 0;
if (disk) {
del_gendisk(disk);
blk_cleanup_queue(disk->queue);
blk_mq_free_tag_set(&nbd->tag_set);
put_disk(disk);
}
kfree(nbd);
}
static int nbd_dev_add(int index)
{
struct nbd_device *nbd;
struct gendisk *disk;
struct request_queue *q;
int err = -ENOMEM;
nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
if (!nbd)
goto out;
disk = alloc_disk(1 << part_shift);
if (!disk)
goto out_free_nbd;
if (index >= 0) {
err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
GFP_KERNEL);
if (err == -ENOSPC)
err = -EEXIST;
} else {
err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
if (err >= 0)
index = err;
}
if (err < 0)
goto out_free_disk;
nbd->disk = disk;
nbd->tag_set.ops = &nbd_mq_ops;
nbd->tag_set.nr_hw_queues = 1;
nbd->tag_set.queue_depth = 128;
nbd->tag_set.numa_node = NUMA_NO_NODE;
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
err = blk_mq_alloc_tag_set(&nbd->tag_set);
if (err)
goto out_free_idr;
q = blk_mq_init_queue(&nbd->tag_set);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto out_free_tags;
}
disk->queue = q;
/*
* Tell the block layer that we are not a rotational device
*/
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512;
blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
disk->queue->limits.discard_zeroes_data = 0;
blk_queue_max_hw_sectors(disk->queue, 65536);
disk->queue->limits.max_sectors = 256;
nbd->magic = NBD_MAGIC;
mutex_init(&nbd->config_lock);
disk->major = NBD_MAJOR;
disk->first_minor = index << part_shift;
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
init_waitqueue_head(&nbd->recv_wq);
nbd_reset(nbd);
add_disk(disk);
return index;
out_free_tags:
blk_mq_free_tag_set(&nbd->tag_set);
out_free_idr:
idr_remove(&nbd_index_idr, index);
out_free_disk:
put_disk(disk);
out_free_nbd:
kfree(nbd);
out:
return err;
}
/* /*
* And here should be modules and kernel interface * And here should be modules and kernel interface
* (Just smiley confuses emacs :-) * (Just smiley confuses emacs :-)
@ -1003,9 +1100,7 @@ static struct blk_mq_ops nbd_mq_ops = {
static int __init nbd_init(void) static int __init nbd_init(void)
{ {
int err = -ENOMEM;
int i; int i;
int part_shift;
BUILD_BUG_ON(sizeof(struct nbd_request) != 28); BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
@ -1034,111 +1129,38 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift)) if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL; return -EINVAL;
recv_workqueue = alloc_workqueue("knbd-recv",
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nbd_dev) if (!recv_workqueue)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < nbds_max; i++) { if (register_blkdev(NBD_MAJOR, "nbd"))
struct request_queue *q; return -EIO;
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
goto out;
nbd_dev[i].disk = disk;
nbd_dev[i].tag_set.ops = &nbd_mq_ops;
nbd_dev[i].tag_set.nr_hw_queues = 1;
nbd_dev[i].tag_set.queue_depth = 128;
nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
if (err) {
put_disk(disk);
goto out;
}
/*
* The new linux 2.5 block layer implementation requires
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
q = blk_mq_init_queue(&nbd_dev[i].tag_set);
if (IS_ERR(q)) {
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
goto out;
}
disk->queue = q;
/*
* Tell the block layer that we are not a rotational device
*/
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512;
blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
disk->queue->limits.discard_zeroes_data = 0;
blk_queue_max_hw_sectors(disk->queue, 65536);
disk->queue->limits.max_sectors = 256;
}
if (register_blkdev(NBD_MAJOR, "nbd")) {
err = -EIO;
goto out;
}
printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
nbd_dbg_init(); nbd_dbg_init();
for (i = 0; i < nbds_max; i++) { mutex_lock(&nbd_index_mutex);
struct gendisk *disk = nbd_dev[i].disk; for (i = 0; i < nbds_max; i++)
nbd_dev[i].magic = NBD_MAGIC; nbd_dev_add(i);
mutex_init(&nbd_dev[i].config_lock); mutex_unlock(&nbd_index_mutex);
disk->major = NBD_MAJOR; return 0;
disk->first_minor = i << part_shift; }
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i]; static int nbd_exit_cb(int id, void *ptr, void *data)
sprintf(disk->disk_name, "nbd%d", i); {
init_waitqueue_head(&nbd_dev[i].recv_wq); struct nbd_device *nbd = ptr;
nbd_reset(&nbd_dev[i]); nbd_dev_remove(nbd);
add_disk(disk);
}
return 0; return 0;
out:
while (i--) {
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
blk_cleanup_queue(nbd_dev[i].disk->queue);
put_disk(nbd_dev[i].disk);
}
kfree(nbd_dev);
return err;
} }
static void __exit nbd_cleanup(void) static void __exit nbd_cleanup(void)
{ {
int i;
nbd_dbg_close(); nbd_dbg_close();
for (i = 0; i < nbds_max; i++) { idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
struct gendisk *disk = nbd_dev[i].disk; idr_destroy(&nbd_index_idr);
nbd_dev[i].magic = 0; destroy_workqueue(recv_workqueue);
if (disk) {
del_gendisk(disk);
blk_cleanup_queue(disk->queue);
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
}
}
unregister_blkdev(NBD_MAJOR, "nbd"); unregister_blkdev(NBD_MAJOR, "nbd");
kfree(nbd_dev);
printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
} }
module_init(nbd_init); module_init(nbd_init);

View File

@ -432,11 +432,11 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct request *rq; struct request *rq;
struct bio *bio = rqd->bio; struct bio *bio = rqd->bio;
rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); rq = blk_mq_alloc_request(q,
op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) if (IS_ERR(rq))
return -ENOMEM; return -ENOMEM;
rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->__sector = bio->bi_iter.bi_sector; rq->__sector = bio->bi_iter.bi_sector;
rq->ioprio = bio_prio(bio); rq->ioprio = bio_prio(bio);

View File

@ -308,12 +308,6 @@ static void osdblk_rq_fn(struct request_queue *q)
if (!rq) if (!rq)
break; break;
/* filter out block requests we don't understand */
if (rq->cmd_type != REQ_TYPE_FS) {
blk_end_request_all(rq, 0);
continue;
}
/* deduce our operation (read, write, flush) */ /* deduce our operation (read, write, flush) */
/* I wish the block layer simplified cmd_type/cmd_flags/cmd[] /* I wish the block layer simplified cmd_type/cmd_flags/cmd[]
* into a clearly defined set of RPC commands: * into a clearly defined set of RPC commands:

View File

@ -25,6 +25,7 @@ config PARIDE_PD
config PARIDE_PCD config PARIDE_PCD
tristate "Parallel port ATAPI CD-ROMs" tristate "Parallel port ATAPI CD-ROMs"
depends on PARIDE depends on PARIDE
select BLK_SCSI_REQUEST # only for the generic cdrom code
---help--- ---help---
This option enables the high-level driver for ATAPI CD-ROM devices This option enables the high-level driver for ATAPI CD-ROM devices
connected through a parallel port. If you chose to build PARIDE connected through a parallel port. If you chose to build PARIDE

View File

@ -439,18 +439,16 @@ static int pd_retries = 0; /* i/o error retry count */
static int pd_block; /* address of next requested block */ static int pd_block; /* address of next requested block */
static int pd_count; /* number of blocks still to do */ static int pd_count; /* number of blocks still to do */
static int pd_run; /* sectors in current cluster */ static int pd_run; /* sectors in current cluster */
static int pd_cmd; /* current command READ/WRITE */
static char *pd_buf; /* buffer for request in progress */ static char *pd_buf; /* buffer for request in progress */
static enum action do_pd_io_start(void) static enum action do_pd_io_start(void)
{ {
if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) { switch (req_op(pd_req)) {
case REQ_OP_DRV_IN:
phase = pd_special; phase = pd_special;
return pd_special(); return pd_special();
} case REQ_OP_READ:
case REQ_OP_WRITE:
pd_cmd = rq_data_dir(pd_req);
if (pd_cmd == READ || pd_cmd == WRITE) {
pd_block = blk_rq_pos(pd_req); pd_block = blk_rq_pos(pd_req);
pd_count = blk_rq_cur_sectors(pd_req); pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
@ -458,7 +456,7 @@ static enum action do_pd_io_start(void)
pd_run = blk_rq_sectors(pd_req); pd_run = blk_rq_sectors(pd_req);
pd_buf = bio_data(pd_req->bio); pd_buf = bio_data(pd_req->bio);
pd_retries = 0; pd_retries = 0;
if (pd_cmd == READ) if (req_op(pd_req) == REQ_OP_READ)
return do_pd_read_start(); return do_pd_read_start();
else else
return do_pd_write_start(); return do_pd_write_start();
@ -723,11 +721,10 @@ static int pd_special_command(struct pd_unit *disk,
struct request *rq; struct request *rq;
int err = 0; int err = 0;
rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM); rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->special = func; rq->special = func;
err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);

View File

@ -704,10 +704,10 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
int ret = 0; int ret = 0;
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
WRITE : READ, __GFP_RECLAIM); REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
blk_rq_set_block_pc(rq); scsi_req_init(rq);
if (cgc->buflen) { if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@ -716,8 +716,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
goto out; goto out;
} }
rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
rq->timeout = 60*HZ; rq->timeout = 60*HZ;
if (cgc->quiet) if (cgc->quiet)
@ -1243,7 +1243,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
&& pd->bio_queue_size <= pd->write_congestion_off); && pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
if (wakeup) { if (wakeup) {
clear_bdi_congested(&pd->disk->queue->backing_dev_info, clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC); BLK_RW_ASYNC);
} }
@ -2370,7 +2370,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock); spin_lock(&pd->lock);
if (pd->write_congestion_on > 0 if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) { && pd->bio_queue_size >= pd->write_congestion_on) {
set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC); set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
do { do {
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ); congestion_wait(BLK_RW_ASYNC, HZ);

View File

@ -196,16 +196,19 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) { while ((req = blk_fetch_request(q))) {
if (req_op(req) == REQ_OP_FLUSH) { switch (req_op(req)) {
case REQ_OP_FLUSH:
if (ps3disk_submit_flush_request(dev, req)) if (ps3disk_submit_flush_request(dev, req))
break; return;
} else if (req->cmd_type == REQ_TYPE_FS) { break;
case REQ_OP_READ:
case REQ_OP_WRITE:
if (ps3disk_submit_request_sg(dev, req)) if (ps3disk_submit_request_sg(dev, req))
break; return;
} else { break;
default:
blk_dump_rq_flags(req, DEVICE_NAME " bad request"); blk_dump_rq_flags(req, DEVICE_NAME " bad request");
__blk_end_request_all(req, -EIO); __blk_end_request_all(req, -EIO);
continue;
} }
} }
} }

View File

@ -4099,20 +4099,22 @@ static void rbd_queue_workfn(struct work_struct *work)
bool must_be_locked; bool must_be_locked;
int result; int result;
if (rq->cmd_type != REQ_TYPE_FS) { switch (req_op(rq)) {
dout("%s: non-fs request type %d\n", __func__, case REQ_OP_DISCARD:
(int) rq->cmd_type); op_type = OBJ_OP_DISCARD;
break;
case REQ_OP_WRITE:
op_type = OBJ_OP_WRITE;
break;
case REQ_OP_READ:
op_type = OBJ_OP_READ;
break;
default:
dout("%s: non-fs request type %d\n", __func__, req_op(rq));
result = -EIO; result = -EIO;
goto err; goto err;
} }
if (req_op(rq) == REQ_OP_DISCARD)
op_type = OBJ_OP_DISCARD;
else if (req_op(rq) == REQ_OP_WRITE)
op_type = OBJ_OP_WRITE;
else
op_type = OBJ_OP_READ;
/* Ignore/skip any zero-length requests */ /* Ignore/skip any zero-length requests */
if (!length) { if (!length) {
@ -4524,7 +4526,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_zeroes_data = 1; q->limits.discard_zeroes_data = 1;
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
disk->queue = q; disk->queue = q;

View File

@ -1204,10 +1204,11 @@ static void skd_complete_special(struct skd_device *skdev,
static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
uint cmd_in, ulong arg) uint cmd_in, ulong arg)
{ {
int rc = 0; static const int sg_version_num = 30527;
int rc = 0, timeout;
struct gendisk *disk = bdev->bd_disk; struct gendisk *disk = bdev->bd_disk;
struct skd_device *skdev = disk->private_data; struct skd_device *skdev = disk->private_data;
void __user *p = (void *)arg; int __user *p = (int __user *)arg;
pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
skdev->name, __func__, __LINE__, skdev->name, __func__, __LINE__,
@ -1218,12 +1219,18 @@ static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd_in) { switch (cmd_in) {
case SG_SET_TIMEOUT: case SG_SET_TIMEOUT:
rc = get_user(timeout, p);
if (!rc)
disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
break;
case SG_GET_TIMEOUT: case SG_GET_TIMEOUT:
rc = jiffies_to_clock_t(disk->queue->sg_timeout);
break;
case SG_GET_VERSION_NUM: case SG_GET_VERSION_NUM:
rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p); rc = put_user(sg_version_num, p);
break; break;
case SG_IO: case SG_IO:
rc = skd_ioctl_sg_io(skdev, mode, p); rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
break; break;
default: default:

View File

@ -567,7 +567,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
if (!crq) if (!crq)
return NULL; return NULL;
rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
spin_lock_irqsave(&host->lock, flags); spin_lock_irqsave(&host->lock, flags);
carm_put_request(host, crq); carm_put_request(host, crq);
@ -620,7 +620,6 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock); spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
crq->rq->special = crq; crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
@ -661,7 +660,6 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc; crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
crq->rq->special = crq; crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);

View File

@ -52,11 +52,13 @@ struct virtio_blk {
}; };
struct virtblk_req { struct virtblk_req {
struct request *req; #ifdef CONFIG_VIRTIO_BLK_SCSI
struct virtio_blk_outhdr out_hdr; struct scsi_request sreq; /* for SCSI passthrough, must be first */
struct virtio_scsi_inhdr in_hdr;
u8 status;
u8 sense[SCSI_SENSE_BUFFERSIZE]; u8 sense[SCSI_SENSE_BUFFERSIZE];
struct virtio_scsi_inhdr in_hdr;
#endif
struct virtio_blk_outhdr out_hdr;
u8 status;
struct scatterlist sg[]; struct scatterlist sg[];
}; };
@ -72,28 +74,23 @@ static inline int virtblk_result(struct virtblk_req *vbr)
} }
} }
static int __virtblk_add_req(struct virtqueue *vq, /*
struct virtblk_req *vbr, * If this is a packet command we need a couple of additional headers. Behind
struct scatterlist *data_sg, * the normal outhdr we put a segment with the scsi command block, and before
bool have_data) * the normal inhdr we put the sense data and the inhdr with additional status
* information.
*/
#ifdef CONFIG_VIRTIO_BLK_SCSI
static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
struct scatterlist *data_sg, bool have_data)
{ {
struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
unsigned int num_out = 0, num_in = 0; unsigned int num_out = 0, num_in = 0;
__virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr; sgs[num_out++] = &hdr;
sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
/* sgs[num_out++] = &cmd;
* If this is a packet command we need a couple of additional headers.
* Behind the normal outhdr we put a segment with the scsi command
* block, and before the normal inhdr we put the sense data and the
* inhdr with additional status information.
*/
if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
sgs[num_out++] = &cmd;
}
if (have_data) { if (have_data) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
@ -102,12 +99,69 @@ static int __virtblk_add_req(struct virtqueue *vq,
sgs[num_out + num_in++] = data_sg; sgs[num_out + num_in++] = data_sg;
} }
if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); sgs[num_out + num_in++] = &sense;
sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE); sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
sgs[num_out + num_in++] = &sense; sgs[num_out + num_in++] = &inhdr;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); sg_init_one(&status, &vbr->status, sizeof(vbr->status));
sgs[num_out + num_in++] = &inhdr; sgs[num_out + num_in++] = &status;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
static inline void virtblk_scsi_reques_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
struct virtio_blk *vblk = req->q->queuedata;
struct scsi_request *sreq = &vbr->sreq;
sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
}
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
return -ENOTTY;
return scsi_cmd_blk_ioctl(bdev, mode, cmd,
(void __user *)data);
}
#else
static inline int virtblk_add_req_scsi(struct virtqueue *vq,
struct virtblk_req *vbr, struct scatterlist *data_sg,
bool have_data)
{
return -EIO;
}
static inline void virtblk_scsi_reques_done(struct request *req)
{
}
#define virtblk_ioctl NULL
#endif /* CONFIG_VIRTIO_BLK_SCSI */
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
struct scatterlist *data_sg, bool have_data)
{
struct scatterlist hdr, status, *sgs[3];
unsigned int num_out = 0, num_in = 0;
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
if (have_data) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
sgs[num_out++] = data_sg;
else
sgs[num_out + num_in++] = data_sg;
} }
sg_init_one(&status, &vbr->status, sizeof(vbr->status)); sg_init_one(&status, &vbr->status, sizeof(vbr->status));
@ -119,15 +173,16 @@ static int __virtblk_add_req(struct virtqueue *vq,
static inline void virtblk_request_done(struct request *req) static inline void virtblk_request_done(struct request *req)
{ {
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
struct virtio_blk *vblk = req->q->queuedata;
int error = virtblk_result(vbr); int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) { switch (req_op(req)) {
req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); case REQ_OP_SCSI_IN:
req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); case REQ_OP_SCSI_OUT:
req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); virtblk_scsi_reques_done(req);
} else if (req->cmd_type == REQ_TYPE_DRV_PRIV) { break;
case REQ_OP_DRV_IN:
req->errors = (error != 0); req->errors = (error != 0);
break;
} }
blk_mq_end_request(req, error); blk_mq_end_request(req, error);
@ -146,7 +201,9 @@ static void virtblk_done(struct virtqueue *vq)
do { do {
virtqueue_disable_cb(vq); virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
blk_mq_complete_request(vbr->req, vbr->req->errors); struct request *req = blk_mq_rq_from_pdu(vbr);
blk_mq_complete_request(req, req->errors);
req_done = true; req_done = true;
} }
if (unlikely(virtqueue_is_broken(vq))) if (unlikely(virtqueue_is_broken(vq)))
@ -170,49 +227,50 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
int qid = hctx->queue_num; int qid = hctx->queue_num;
int err; int err;
bool notify = false; bool notify = false;
u32 type;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
vbr->req = req; switch (req_op(req)) {
if (req_op(req) == REQ_OP_FLUSH) { case REQ_OP_READ:
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); case REQ_OP_WRITE:
vbr->out_hdr.sector = 0; type = 0;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); break;
} else { case REQ_OP_FLUSH:
switch (req->cmd_type) { type = VIRTIO_BLK_T_FLUSH;
case REQ_TYPE_FS: break;
vbr->out_hdr.type = 0; case REQ_OP_SCSI_IN:
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req)); case REQ_OP_SCSI_OUT:
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); type = VIRTIO_BLK_T_SCSI_CMD;
break; break;
case REQ_TYPE_BLOCK_PC: case REQ_OP_DRV_IN:
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD); type = VIRTIO_BLK_T_GET_ID;
vbr->out_hdr.sector = 0; break;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); default:
break; WARN_ON_ONCE(1);
case REQ_TYPE_DRV_PRIV: return BLK_MQ_RQ_QUEUE_ERROR;
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
default:
/* We don't put anything else in the queue. */
BUG();
}
} }
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
vbr->out_hdr.sector = type ?
0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
blk_mq_start_request(req); blk_mq_start_request(req);
num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
if (num) { if (num) {
if (rq_data_dir(vbr->req) == WRITE) if (rq_data_dir(req) == WRITE)
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT); vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
else else
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN); vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
} }
spin_lock_irqsave(&vblk->vqs[qid].lock, flags); spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
else
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) { if (err) {
virtqueue_kick(vblk->vqs[qid].vq); virtqueue_kick(vblk->vqs[qid].vq);
blk_mq_stop_hw_queue(hctx); blk_mq_stop_hw_queue(hctx);
@ -242,10 +300,9 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
struct request *req; struct request *req;
int err; int err;
req = blk_get_request(q, READ, GFP_KERNEL); req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
req->cmd_type = REQ_TYPE_DRV_PRIV;
err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err) if (err)
@ -257,22 +314,6 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
return err; return err;
} }
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
return -ENOTTY;
return scsi_cmd_blk_ioctl(bdev, mode, cmd,
(void __user *)data);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */ /* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{ {
@ -538,6 +579,9 @@ static int virtblk_init_request(void *data, struct request *rq,
struct virtio_blk *vblk = data; struct virtio_blk *vblk = data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
#ifdef CONFIG_VIRTIO_BLK_SCSI
vbr->sreq.sense = vbr->sense;
#endif
sg_init_table(vbr->sg, vblk->sg_elems); sg_init_table(vbr->sg, vblk->sg_elems);
return 0; return 0;
} }
@ -821,7 +865,10 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features_legacy[] = { static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
#ifdef CONFIG_VIRTIO_BLK_SCSI
VIRTIO_BLK_F_SCSI,
#endif
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_MQ,
} }

View File

@ -865,7 +865,7 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo)
static inline bool blkif_request_flush_invalid(struct request *req, static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info) struct blkfront_info *info)
{ {
return ((req->cmd_type != REQ_TYPE_FS) || return (blk_rq_is_passthrough(req) ||
((req_op(req) == REQ_OP_FLUSH) && ((req_op(req) == REQ_OP_FLUSH) &&
!info->feature_flush) || !info->feature_flush) ||
((req->cmd_flags & REQ_FUA) && ((req->cmd_flags & REQ_FUA) &&

View File

@ -468,7 +468,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
struct request *req; struct request *req;
while ((req = blk_peek_request(q)) != NULL) { while ((req = blk_peek_request(q)) != NULL) {
if (req->cmd_type == REQ_TYPE_FS) if (!blk_rq_is_passthrough(req))
break; break;
blk_start_request(req); blk_start_request(req);
__blk_end_request_all(req, -EIO); __blk_end_request_all(req, -EIO);

View File

@ -117,7 +117,7 @@ static void zram_revalidate_disk(struct zram *zram)
{ {
revalidate_disk(zram->disk); revalidate_disk(zram->disk);
/* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */ /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
zram->disk->queue->backing_dev_info.capabilities |= zram->disk->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES; BDI_CAP_STABLE_WRITES;
} }

View File

@ -281,8 +281,8 @@
#include <linux/fcntl.h> #include <linux/fcntl.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/times.h> #include <linux/times.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <scsi/scsi_request.h>
/* used to tell the module to turn on full debugging messages */ /* used to tell the module to turn on full debugging messages */
static bool debug; static bool debug;
@ -2170,6 +2170,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
{ {
struct request_queue *q = cdi->disk->queue; struct request_queue *q = cdi->disk->queue;
struct request *rq; struct request *rq;
struct scsi_request *req;
struct bio *bio; struct bio *bio;
unsigned int len; unsigned int len;
int nr, ret = 0; int nr, ret = 0;
@ -2188,12 +2189,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW; len = nr * CD_FRAMESIZE_RAW;
rq = blk_get_request(q, READ, GFP_KERNEL); rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
ret = PTR_ERR(rq); ret = PTR_ERR(rq);
break; break;
} }
blk_rq_set_block_pc(rq); req = scsi_req(rq);
scsi_req_init(rq);
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret) { if (ret) {
@ -2201,23 +2203,23 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
break; break;
} }
rq->cmd[0] = GPCMD_READ_CD; req->cmd[0] = GPCMD_READ_CD;
rq->cmd[1] = 1 << 2; req->cmd[1] = 1 << 2;
rq->cmd[2] = (lba >> 24) & 0xff; req->cmd[2] = (lba >> 24) & 0xff;
rq->cmd[3] = (lba >> 16) & 0xff; req->cmd[3] = (lba >> 16) & 0xff;
rq->cmd[4] = (lba >> 8) & 0xff; req->cmd[4] = (lba >> 8) & 0xff;
rq->cmd[5] = lba & 0xff; req->cmd[5] = lba & 0xff;
rq->cmd[6] = (nr >> 16) & 0xff; req->cmd[6] = (nr >> 16) & 0xff;
rq->cmd[7] = (nr >> 8) & 0xff; req->cmd[7] = (nr >> 8) & 0xff;
rq->cmd[8] = nr & 0xff; req->cmd[8] = nr & 0xff;
rq->cmd[9] = 0xf8; req->cmd[9] = 0xf8;
rq->cmd_len = 12; req->cmd_len = 12;
rq->timeout = 60 * HZ; rq->timeout = 60 * HZ;
bio = rq->bio; bio = rq->bio;
if (blk_execute_rq(q, cdi->disk, rq, 0)) { if (blk_execute_rq(q, cdi->disk, rq, 0)) {
struct request_sense *s = rq->sense; struct request_sense *s = req->sense;
ret = -EIO; ret = -EIO;
cdi->last_sense = s->sense_key; cdi->last_sense = s->sense_key;
} }

View File

@ -659,23 +659,24 @@ static void gdrom_request(struct request_queue *rq)
struct request *req; struct request *req;
while ((req = blk_fetch_request(rq)) != NULL) { while ((req = blk_fetch_request(rq)) != NULL) {
if (req->cmd_type != REQ_TYPE_FS) { switch (req_op(req)) {
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); case REQ_OP_READ:
__blk_end_request_all(req, -EIO); /*
continue; * Add to list of deferred work and then schedule
} * workqueue.
if (rq_data_dir(req) != READ) { */
list_add_tail(&req->queuelist, &gdrom_deferred);
schedule_work(&work);
break;
case REQ_OP_WRITE:
pr_notice("Read only device - write request ignored\n"); pr_notice("Read only device - write request ignored\n");
__blk_end_request_all(req, -EIO); __blk_end_request_all(req, -EIO);
continue; break;
default:
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
__blk_end_request_all(req, -EIO);
break;
} }
/*
* Add to list of deferred work and then schedule
* workqueue.
*/
list_add_tail(&req->queuelist, &gdrom_deferred);
schedule_work(&work);
} }
} }

View File

@ -10,6 +10,7 @@ menuconfig IDE
tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)" tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)"
depends on HAVE_IDE depends on HAVE_IDE
depends on BLOCK depends on BLOCK
select BLK_SCSI_REQUEST
---help--- ---help---
If you say Y here, your kernel will be able to manage ATA/(E)IDE and If you say Y here, your kernel will be able to manage ATA/(E)IDE and
ATAPI units. The most common cases are IDE hard drives and ATAPI ATAPI units. The most common cases are IDE hard drives and ATAPI

View File

@ -92,8 +92,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
struct request *rq; struct request *rq;
int error; int error;
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; scsi_req_init(rq);
ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = (char *)pc; rq->special = (char *)pc;
if (buf && bufflen) { if (buf && bufflen) {
@ -103,9 +104,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
goto put_req; goto put_req;
} }
memcpy(rq->cmd, pc->c, 12); memcpy(scsi_req(rq)->cmd, pc->c, 12);
if (drive->media == ide_tape) if (drive->media == ide_tape)
rq->cmd[13] = REQ_IDETAPE_PC1; scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
error = blk_execute_rq(drive->queue, disk, rq, 0); error = blk_execute_rq(drive->queue, disk, rq, 0);
put_req: put_req:
blk_put_request(rq); blk_put_request(rq);
@ -171,7 +172,8 @@ EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
void ide_prep_sense(ide_drive_t *drive, struct request *rq) void ide_prep_sense(ide_drive_t *drive, struct request *rq)
{ {
struct request_sense *sense = &drive->sense_data; struct request_sense *sense = &drive->sense_data;
struct request *sense_rq = &drive->sense_rq; struct request *sense_rq = drive->sense_rq;
struct scsi_request *req = scsi_req(sense_rq);
unsigned int cmd_len, sense_len; unsigned int cmd_len, sense_len;
int err; int err;
@ -191,12 +193,13 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
BUG_ON(sense_len > sizeof(*sense)); BUG_ON(sense_len > sizeof(*sense));
if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed) if (ata_sense_request(rq) || drive->sense_rq_armed)
return; return;
memset(sense, 0, sizeof(*sense)); memset(sense, 0, sizeof(*sense));
blk_rq_init(rq->q, sense_rq); blk_rq_init(rq->q, sense_rq);
scsi_req_init(sense_rq);
err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len, err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
GFP_NOIO); GFP_NOIO);
@ -208,13 +211,14 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
} }
sense_rq->rq_disk = rq->rq_disk; sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd[0] = GPCMD_REQUEST_SENSE; sense_rq->cmd_flags = REQ_OP_DRV_IN;
sense_rq->cmd[4] = cmd_len; ide_req(sense_rq)->type = ATA_PRIV_SENSE;
sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
sense_rq->rq_flags |= RQF_PREEMPT; sense_rq->rq_flags |= RQF_PREEMPT;
req->cmd[0] = GPCMD_REQUEST_SENSE;
req->cmd[4] = cmd_len;
if (drive->media == ide_tape) if (drive->media == ide_tape)
sense_rq->cmd[13] = REQ_IDETAPE_PC1; req->cmd[13] = REQ_IDETAPE_PC1;
drive->sense_rq_armed = true; drive->sense_rq_armed = true;
} }
@ -229,12 +233,12 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
return -ENOMEM; return -ENOMEM;
} }
drive->sense_rq.special = special; drive->sense_rq->special = special;
drive->sense_rq_armed = false; drive->sense_rq_armed = false;
drive->hwif->rq = NULL; drive->hwif->rq = NULL;
elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT); elv_add_request(drive->queue, drive->sense_rq, ELEVATOR_INSERT_FRONT);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ide_queue_sense_rq); EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
@ -247,14 +251,14 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
void ide_retry_pc(ide_drive_t *drive) void ide_retry_pc(ide_drive_t *drive)
{ {
struct request *failed_rq = drive->hwif->rq; struct request *failed_rq = drive->hwif->rq;
struct request *sense_rq = &drive->sense_rq; struct request *sense_rq = drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc; struct ide_atapi_pc *pc = &drive->request_sense_pc;
(void)ide_read_error(drive); (void)ide_read_error(drive);
/* init pc from sense_rq */ /* init pc from sense_rq */
ide_init_pc(pc); ide_init_pc(pc);
memcpy(pc->c, sense_rq->cmd, 12); memcpy(pc->c, scsi_req(sense_rq)->cmd, 12);
if (drive->media == ide_tape) if (drive->media == ide_tape)
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
@ -286,7 +290,7 @@ int ide_cd_expiry(ide_drive_t *drive)
* commands/drives support that. Let ide_timer_expiry keep polling us * commands/drives support that. Let ide_timer_expiry keep polling us
* for these. * for these.
*/ */
switch (rq->cmd[0]) { switch (scsi_req(rq)->cmd[0]) {
case GPCMD_BLANK: case GPCMD_BLANK:
case GPCMD_FORMAT_UNIT: case GPCMD_FORMAT_UNIT:
case GPCMD_RESERVE_RZONE_TRACK: case GPCMD_RESERVE_RZONE_TRACK:
@ -297,7 +301,7 @@ int ide_cd_expiry(ide_drive_t *drive)
default: default:
if (!(rq->rq_flags & RQF_QUIET)) if (!(rq->rq_flags & RQF_QUIET))
printk(KERN_INFO PFX "cmd 0x%x timed out\n", printk(KERN_INFO PFX "cmd 0x%x timed out\n",
rq->cmd[0]); scsi_req(rq)->cmd[0]);
wait = 0; wait = 0;
break; break;
} }
@ -307,15 +311,21 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
int ide_cd_get_xferlen(struct request *rq) int ide_cd_get_xferlen(struct request *rq)
{ {
switch (rq->cmd_type) { switch (req_op(rq)) {
case REQ_TYPE_FS:
return 32768;
case REQ_TYPE_ATA_SENSE:
case REQ_TYPE_BLOCK_PC:
case REQ_TYPE_ATA_PC:
return blk_rq_bytes(rq);
default: default:
return 0; return 32768;
case REQ_OP_SCSI_IN:
case REQ_OP_SCSI_OUT:
return blk_rq_bytes(rq);
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
switch (ide_req(rq)->type) {
case ATA_PRIV_PC:
case ATA_PRIV_SENSE:
return blk_rq_bytes(rq);
default:
return 0;
}
} }
} }
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
@ -374,7 +384,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
drive->name, __func__, ireason); drive->name, __func__, ireason);
} }
if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC) if (dev_is_idecd(drive) && ata_pc_request(rq))
rq->rq_flags |= RQF_FAILED; rq->rq_flags |= RQF_FAILED;
return 1; return 1;
@ -420,7 +430,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
? "write" : "read"); ? "write" : "read");
pc->flags |= PC_FLAG_DMA_ERROR; pc->flags |= PC_FLAG_DMA_ERROR;
} else } else
rq->resid_len = 0; scsi_req(rq)->resid_len = 0;
debug_log("%s: DMA finished\n", drive->name); debug_log("%s: DMA finished\n", drive->name);
} }
@ -436,7 +446,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
local_irq_enable_in_hardirq(); local_irq_enable_in_hardirq();
if (drive->media == ide_tape && if (drive->media == ide_tape &&
(stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE) (stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE)
stat &= ~ATA_ERR; stat &= ~ATA_ERR;
if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) { if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
@ -446,7 +456,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (drive->media != ide_tape) if (drive->media != ide_tape)
pc->rq->errors++; pc->rq->errors++;
if (rq->cmd[0] == REQUEST_SENSE) { if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) {
printk(KERN_ERR PFX "%s: I/O error in request " printk(KERN_ERR PFX "%s: I/O error in request "
"sense command\n", drive->name); "sense command\n", drive->name);
return ide_do_reset(drive); return ide_do_reset(drive);
@ -477,12 +487,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (uptodate == 0) if (uptodate == 0)
drive->failed_pc = NULL; drive->failed_pc = NULL;
if (rq->cmd_type == REQ_TYPE_DRV_PRIV) { if (ata_misc_request(rq)) {
rq->errors = 0; rq->errors = 0;
error = 0; error = 0;
} else { } else {
if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) { if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
if (rq->errors == 0) if (rq->errors == 0)
rq->errors = -EIO; rq->errors = -EIO;
} }
@ -512,7 +522,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
ide_pio_bytes(drive, cmd, write, done); ide_pio_bytes(drive, cmd, write, done);
/* Update transferred byte count */ /* Update transferred byte count */
rq->resid_len -= done; scsi_req(rq)->resid_len -= done;
bcount -= done; bcount -= done;
@ -520,7 +530,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
ide_pad_transfer(drive, write, bcount); ide_pad_transfer(drive, write, bcount);
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n", debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
rq->cmd[0], done, bcount, rq->resid_len); rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
/* And set the interrupt handler again */ /* And set the interrupt handler again */
ide_set_handler(drive, ide_pc_intr, timeout); ide_set_handler(drive, ide_pc_intr, timeout);
@ -603,7 +613,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
if (dev_is_idecd(drive)) { if (dev_is_idecd(drive)) {
/* ATAPI commands get padded out to 12 bytes minimum */ /* ATAPI commands get padded out to 12 bytes minimum */
cmd_len = COMMAND_SIZE(rq->cmd[0]); cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]);
if (cmd_len < ATAPI_MIN_CDB_BYTES) if (cmd_len < ATAPI_MIN_CDB_BYTES)
cmd_len = ATAPI_MIN_CDB_BYTES; cmd_len = ATAPI_MIN_CDB_BYTES;
@ -650,7 +660,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
/* Send the actual packet */ /* Send the actual packet */
if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len);
/* Begin DMA, if necessary */ /* Begin DMA, if necessary */
if (dev_is_idecd(drive)) { if (dev_is_idecd(drive)) {
@ -695,7 +705,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
bytes, 63 * 1024)); bytes, 63 * 1024));
/* We haven't transferred any data yet */ /* We haven't transferred any data yet */
rq->resid_len = bcount; scsi_req(rq)->resid_len = bcount;
if (pc->flags & PC_FLAG_DMA_ERROR) { if (pc->flags & PC_FLAG_DMA_ERROR) {
pc->flags &= ~PC_FLAG_DMA_ERROR; pc->flags &= ~PC_FLAG_DMA_ERROR;

View File

@ -121,7 +121,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
* don't log START_STOP unit with LoEj set, since we cannot * don't log START_STOP unit with LoEj set, since we cannot
* reliably check if drive can auto-close * reliably check if drive can auto-close
*/ */
if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
break; break;
log = 1; log = 1;
break; break;
@ -163,7 +163,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
* toc has not been recorded yet, it will fail with 05/24/00 (which is a * toc has not been recorded yet, it will fail with 05/24/00 (which is a
* confusing error) * confusing error)
*/ */
if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP) if (failed_command && scsi_req(failed_command)->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
if (sense->sense_key == 0x05 && sense->asc == 0x24) if (sense->sense_key == 0x05 && sense->asc == 0x24)
return; return;
@ -176,7 +176,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
if (!sense->valid) if (!sense->valid)
break; break;
if (failed_command == NULL || if (failed_command == NULL ||
failed_command->cmd_type != REQ_TYPE_FS) blk_rq_is_passthrough(failed_command))
break; break;
sector = (sense->information[0] << 24) | sector = (sense->information[0] << 24) |
(sense->information[1] << 16) | (sense->information[1] << 16) |
@ -210,7 +210,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
{ {
/* /*
* For REQ_TYPE_ATA_SENSE, "rq->special" points to the original * For ATA_PRIV_SENSE, "rq->special" points to the original
* failed request. Also, the sense data should be read * failed request. Also, the sense data should be read
* directly from rq which might be different from the original * directly from rq which might be different from the original
* sense buffer if it got copied during mapping. * sense buffer if it got copied during mapping.
@ -219,15 +219,12 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
void *sense = bio_data(rq->bio); void *sense = bio_data(rq->bio);
if (failed) { if (failed) {
if (failed->sense) { /*
/* * Sense is always read into drive->sense_data, copy back to the
* Sense is always read into drive->sense_data. * original request.
* Copy back if the failed request has its */
* sense pointer set. memcpy(scsi_req(failed)->sense, sense, 18);
*/ scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
memcpy(failed->sense, sense, 18);
failed->sense_len = rq->sense_len;
}
cdrom_analyze_sense_data(drive, failed); cdrom_analyze_sense_data(drive, failed);
if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed))) if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed)))
@ -285,7 +282,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
"stat 0x%x", "stat 0x%x",
rq->cmd[0], rq->cmd_type, err, stat); rq->cmd[0], rq->cmd_type, err, stat);
if (rq->cmd_type == REQ_TYPE_ATA_SENSE) { if (ata_sense_request(rq)) {
/* /*
* We got an error trying to get sense info from the drive * We got an error trying to get sense info from the drive
* (probably while trying to recover from a former error). * (probably while trying to recover from a former error).
@ -296,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
} }
/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) if (blk_rq_is_scsi(rq) && !rq->errors)
rq->errors = SAM_STAT_CHECK_CONDITION; rq->errors = SAM_STAT_CHECK_CONDITION;
if (blk_noretry_request(rq)) if (blk_noretry_request(rq))
@ -304,13 +301,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
switch (sense_key) { switch (sense_key) {
case NOT_READY: case NOT_READY:
if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { if (req_op(rq) == REQ_OP_WRITE) {
if (ide_cd_breathe(drive, rq)) if (ide_cd_breathe(drive, rq))
return 1; return 1;
} else { } else {
cdrom_saw_media_change(drive); cdrom_saw_media_change(drive);
if (rq->cmd_type == REQ_TYPE_FS && if (!blk_rq_is_passthrough(rq) &&
!(rq->rq_flags & RQF_QUIET)) !(rq->rq_flags & RQF_QUIET))
printk(KERN_ERR PFX "%s: tray open\n", printk(KERN_ERR PFX "%s: tray open\n",
drive->name); drive->name);
@ -320,7 +317,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
case UNIT_ATTENTION: case UNIT_ATTENTION:
cdrom_saw_media_change(drive); cdrom_saw_media_change(drive);
if (rq->cmd_type != REQ_TYPE_FS) if (blk_rq_is_passthrough(rq))
return 0; return 0;
/* /*
@ -338,7 +335,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* *
* cdrom_log_sense() knows this! * cdrom_log_sense() knows this!
*/ */
if (rq->cmd[0] == GPCMD_START_STOP_UNIT) if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
break; break;
/* fall-through */ /* fall-through */
case DATA_PROTECT: case DATA_PROTECT:
@ -368,7 +365,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
do_end_request = 1; do_end_request = 1;
break; break;
default: default:
if (rq->cmd_type != REQ_TYPE_FS) if (blk_rq_is_passthrough(rq))
break; break;
if (err & ~ATA_ABORTED) { if (err & ~ATA_ABORTED) {
/* go to the default handler for other errors */ /* go to the default handler for other errors */
@ -379,7 +376,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
do_end_request = 1; do_end_request = 1;
} }
if (rq->cmd_type != REQ_TYPE_FS) { if (blk_rq_is_passthrough(rq)) {
rq->rq_flags |= RQF_FAILED; rq->rq_flags |= RQF_FAILED;
do_end_request = 1; do_end_request = 1;
} }
@ -414,7 +411,7 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
* Some of the trailing request sense fields are optional, * Some of the trailing request sense fields are optional,
* and some drives don't send them. Sigh. * and some drives don't send them. Sigh.
*/ */
if (rq->cmd[0] == GPCMD_REQUEST_SENSE && if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE &&
cmd->nleft > 0 && cmd->nleft <= 5) cmd->nleft > 0 && cmd->nleft <= 5)
cmd->nleft = 0; cmd->nleft = 0;
} }
@ -425,12 +422,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
req_flags_t rq_flags) req_flags_t rq_flags)
{ {
struct cdrom_info *info = drive->driver_data; struct cdrom_info *info = drive->driver_data;
struct request_sense local_sense;
int retries = 10; int retries = 10;
req_flags_t flags = 0; bool failed;
if (!sense)
sense = &local_sense;
ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, " ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
"rq_flags: 0x%x", "rq_flags: 0x%x",
@ -440,12 +433,13 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
do { do {
struct request *rq; struct request *rq;
int error; int error;
bool delay = false;
rq = blk_get_request(drive->queue, write, __GFP_RECLAIM); rq = blk_get_request(drive->queue,
write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
memcpy(rq->cmd, cmd, BLK_MAX_CDB); scsi_req_init(rq);
rq->cmd_type = REQ_TYPE_ATA_PC; memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
rq->sense = sense; ide_req(rq)->type = ATA_PRIV_PC;
rq->rq_flags |= rq_flags; rq->rq_flags |= rq_flags;
rq->timeout = timeout; rq->timeout = timeout;
if (buffer) { if (buffer) {
@ -460,21 +454,21 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
error = blk_execute_rq(drive->queue, info->disk, rq, 0); error = blk_execute_rq(drive->queue, info->disk, rq, 0);
if (buffer) if (buffer)
*bufflen = rq->resid_len; *bufflen = scsi_req(rq)->resid_len;
if (sense)
flags = rq->rq_flags; memcpy(sense, scsi_req(rq)->sense, sizeof(*sense));
blk_put_request(rq);
/* /*
* FIXME: we should probably abort/retry or something in case of * FIXME: we should probably abort/retry or something in case of
* failure. * failure.
*/ */
if (flags & RQF_FAILED) { failed = (rq->rq_flags & RQF_FAILED) != 0;
if (failed) {
/* /*
* The request failed. Retry if it was due to a unit * The request failed. Retry if it was due to a unit
* attention status (usually means media was changed). * attention status (usually means media was changed).
*/ */
struct request_sense *reqbuf = sense; struct request_sense *reqbuf = scsi_req(rq)->sense;
if (reqbuf->sense_key == UNIT_ATTENTION) if (reqbuf->sense_key == UNIT_ATTENTION)
cdrom_saw_media_change(drive); cdrom_saw_media_change(drive);
@ -485,19 +479,20 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
* a disk. Retry, but wait a little to give * a disk. Retry, but wait a little to give
* the drive time to complete the load. * the drive time to complete the load.
*/ */
ssleep(2); delay = true;
} else { } else {
/* otherwise, don't retry */ /* otherwise, don't retry */
retries = 0; retries = 0;
} }
--retries; --retries;
} }
blk_put_request(rq);
/* end of retry loop */ if (delay)
} while ((flags & RQF_FAILED) && retries >= 0); ssleep(2);
} while (failed && retries >= 0);
/* return an error if the command failed */ /* return an error if the command failed */
return (flags & RQF_FAILED) ? -EIO : 0; return failed ? -EIO : 0;
} }
/* /*
@ -526,7 +521,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
ide_expiry_t *expiry = NULL; ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, thislen, uptodate = 0; int dma_error = 0, dma, thislen, uptodate = 0;
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
int sense = (rq->cmd_type == REQ_TYPE_ATA_SENSE); int sense = ata_sense_request(rq);
unsigned int timeout; unsigned int timeout;
u16 len; u16 len;
u8 ireason, stat; u8 ireason, stat;
@ -569,7 +564,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
ide_read_bcount_and_ireason(drive, &len, &ireason); ide_read_bcount_and_ireason(drive, &len, &ireason);
thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
if (thislen > len) if (thislen > len)
thislen = len; thislen = len;
@ -578,7 +573,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
/* If DRQ is clear, the command has completed. */ /* If DRQ is clear, the command has completed. */
if ((stat & ATA_DRQ) == 0) { if ((stat & ATA_DRQ) == 0) {
if (rq->cmd_type == REQ_TYPE_FS) { switch (req_op(rq)) {
default:
/* /*
* If we're not done reading/writing, complain. * If we're not done reading/writing, complain.
* Otherwise, complete the command normally. * Otherwise, complete the command normally.
@ -592,7 +588,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
rq->rq_flags |= RQF_FAILED; rq->rq_flags |= RQF_FAILED;
uptodate = 0; uptodate = 0;
} }
} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { goto out_end;
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
ide_cd_request_sense_fixup(drive, cmd); ide_cd_request_sense_fixup(drive, cmd);
uptodate = cmd->nleft ? 0 : 1; uptodate = cmd->nleft ? 0 : 1;
@ -608,8 +606,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (!uptodate) if (!uptodate)
rq->rq_flags |= RQF_FAILED; rq->rq_flags |= RQF_FAILED;
goto out_end;
case REQ_OP_SCSI_IN:
case REQ_OP_SCSI_OUT:
goto out_end;
} }
goto out_end;
} }
rc = ide_check_ireason(drive, rq, len, ireason, write); rc = ide_check_ireason(drive, rq, len, ireason, write);
@ -636,12 +637,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
len -= blen; len -= blen;
if (sense && write == 0) if (sense && write == 0)
rq->sense_len += blen; scsi_req(rq)->sense_len += blen;
} }
/* pad, if necessary */ /* pad, if necessary */
if (len > 0) { if (len > 0) {
if (rq->cmd_type != REQ_TYPE_FS || write == 0) if (blk_rq_is_passthrough(rq) || write == 0)
ide_pad_transfer(drive, write, len); ide_pad_transfer(drive, write, len);
else { else {
printk(KERN_ERR PFX "%s: confused, missing data\n", printk(KERN_ERR PFX "%s: confused, missing data\n",
@ -650,12 +651,18 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
} }
} }
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { switch (req_op(rq)) {
case REQ_OP_SCSI_IN:
case REQ_OP_SCSI_OUT:
timeout = rq->timeout; timeout = rq->timeout;
} else { break;
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
expiry = ide_cd_expiry;
/*FALLTHRU*/
default:
timeout = ATAPI_WAIT_PC; timeout = ATAPI_WAIT_PC;
if (rq->cmd_type != REQ_TYPE_FS) break;
expiry = ide_cd_expiry;
} }
hwif->expiry = expiry; hwif->expiry = expiry;
@ -663,15 +670,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
return ide_started; return ide_started;
out_end: out_end:
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { if (blk_rq_is_scsi(rq) && rc == 0) {
rq->resid_len = 0; scsi_req(rq)->resid_len = 0;
blk_end_request_all(rq, 0); blk_end_request_all(rq, 0);
hwif->rq = NULL; hwif->rq = NULL;
} else { } else {
if (sense && uptodate) if (sense && uptodate)
ide_cd_complete_failed_rq(drive, rq); ide_cd_complete_failed_rq(drive, rq);
if (rq->cmd_type == REQ_TYPE_FS) { if (!blk_rq_is_passthrough(rq)) {
if (cmd->nleft == 0) if (cmd->nleft == 0)
uptodate = 1; uptodate = 1;
} else { } else {
@ -684,10 +691,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
return ide_stopped; return ide_stopped;
/* make sure it's fully ended */ /* make sure it's fully ended */
if (rq->cmd_type != REQ_TYPE_FS) { if (blk_rq_is_passthrough(rq)) {
rq->resid_len -= cmd->nbytes - cmd->nleft; scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
rq->resid_len += cmd->last_xfer_len; scsi_req(rq)->resid_len += cmd->last_xfer_len;
} }
ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq)); ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
@ -744,7 +751,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
rq->cmd[0], rq->cmd_type); rq->cmd[0], rq->cmd_type);
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) if (blk_rq_is_scsi(rq))
rq->rq_flags |= RQF_QUIET; rq->rq_flags |= RQF_QUIET;
else else
rq->rq_flags &= ~RQF_FAILED; rq->rq_flags &= ~RQF_FAILED;
@ -786,25 +793,31 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
if (drive->debug_mask & IDE_DBG_RQ) if (drive->debug_mask & IDE_DBG_RQ)
blk_dump_rq_flags(rq, "ide_cd_do_request"); blk_dump_rq_flags(rq, "ide_cd_do_request");
switch (rq->cmd_type) { switch (req_op(rq)) {
case REQ_TYPE_FS: default:
if (cdrom_start_rw(drive, rq) == ide_stopped) if (cdrom_start_rw(drive, rq) == ide_stopped)
goto out_end; goto out_end;
break; break;
case REQ_TYPE_ATA_SENSE: case REQ_OP_SCSI_IN:
case REQ_TYPE_BLOCK_PC: case REQ_OP_SCSI_OUT:
case REQ_TYPE_ATA_PC: handle_pc:
if (!rq->timeout) if (!rq->timeout)
rq->timeout = ATAPI_WAIT_PC; rq->timeout = ATAPI_WAIT_PC;
cdrom_do_block_pc(drive, rq); cdrom_do_block_pc(drive, rq);
break; break;
case REQ_TYPE_DRV_PRIV: case REQ_OP_DRV_IN:
/* right now this can only be a reset... */ case REQ_OP_DRV_OUT:
uptodate = 1; switch (ide_req(rq)->type) {
goto out_end; case ATA_PRIV_MISC:
default: /* right now this can only be a reset... */
BUG(); uptodate = 1;
goto out_end;
case ATA_PRIV_SENSE:
case ATA_PRIV_PC:
goto handle_pc;
default:
BUG();
}
} }
/* prepare sense request for this command */ /* prepare sense request for this command */
@ -817,7 +830,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
cmd.rq = rq; cmd.rq = rq;
if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd); ide_map_sg(drive, &cmd);
} }
@ -1312,28 +1325,29 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
int hard_sect = queue_logical_block_size(q); int hard_sect = queue_logical_block_size(q);
long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
struct scsi_request *req = scsi_req(rq);
memset(rq->cmd, 0, BLK_MAX_CDB); memset(req->cmd, 0, BLK_MAX_CDB);
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
rq->cmd[0] = GPCMD_READ_10; req->cmd[0] = GPCMD_READ_10;
else else
rq->cmd[0] = GPCMD_WRITE_10; req->cmd[0] = GPCMD_WRITE_10;
/* /*
* fill in lba * fill in lba
*/ */
rq->cmd[2] = (block >> 24) & 0xff; req->cmd[2] = (block >> 24) & 0xff;
rq->cmd[3] = (block >> 16) & 0xff; req->cmd[3] = (block >> 16) & 0xff;
rq->cmd[4] = (block >> 8) & 0xff; req->cmd[4] = (block >> 8) & 0xff;
rq->cmd[5] = block & 0xff; req->cmd[5] = block & 0xff;
/* /*
* and transfer length * and transfer length
*/ */
rq->cmd[7] = (blocks >> 8) & 0xff; req->cmd[7] = (blocks >> 8) & 0xff;
rq->cmd[8] = blocks & 0xff; req->cmd[8] = blocks & 0xff;
rq->cmd_len = 10; req->cmd_len = 10;
return BLKPREP_OK; return BLKPREP_OK;
} }
@ -1343,7 +1357,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
*/ */
static int ide_cdrom_prep_pc(struct request *rq) static int ide_cdrom_prep_pc(struct request *rq)
{ {
u8 *c = rq->cmd; u8 *c = scsi_req(rq)->cmd;
/* transform 6-byte read/write commands to the 10-byte version */ /* transform 6-byte read/write commands to the 10-byte version */
if (c[0] == READ_6 || c[0] == WRITE_6) { if (c[0] == READ_6 || c[0] == WRITE_6) {
@ -1354,7 +1368,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
c[2] = 0; c[2] = 0;
c[1] &= 0xe0; c[1] &= 0xe0;
c[0] += (READ_10 - READ_6); c[0] += (READ_10 - READ_6);
rq->cmd_len = 10; scsi_req(rq)->cmd_len = 10;
return BLKPREP_OK; return BLKPREP_OK;
} }
@ -1373,9 +1387,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
{ {
if (rq->cmd_type == REQ_TYPE_FS) if (!blk_rq_is_passthrough(rq))
return ide_cdrom_prep_fs(q, rq); return ide_cdrom_prep_fs(q, rq);
else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) else if (blk_rq_is_scsi(rq))
return ide_cdrom_prep_pc(rq); return ide_cdrom_prep_pc(rq);
return 0; return 0;

View File

@ -303,8 +303,9 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
struct request *rq; struct request *rq;
int ret; int ret;
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; scsi_req_init(rq);
ide_req(rq)->type = ATA_PRIV_MISC;
rq->rq_flags = RQF_QUIET; rq->rq_flags = RQF_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
blk_put_request(rq); blk_put_request(rq);

View File

@ -315,12 +315,12 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
while (hi > lo) { while (hi > lo) {
mid = (lo + hi) / 2; mid = (lo + hi) / 2;
if (packet_command_texts[mid].packet_command == if (packet_command_texts[mid].packet_command ==
failed_command->cmd[0]) { scsi_req(failed_command)->cmd[0]) {
s = packet_command_texts[mid].text; s = packet_command_texts[mid].text;
break; break;
} }
if (packet_command_texts[mid].packet_command > if (packet_command_texts[mid].packet_command >
failed_command->cmd[0]) scsi_req(failed_command)->cmd[0])
hi = mid; hi = mid;
else else
lo = mid + 1; lo = mid + 1;
@ -329,7 +329,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
printk(KERN_ERR " The failed \"%s\" packet command " printk(KERN_ERR " The failed \"%s\" packet command "
"was: \n \"", s); "was: \n \"", s);
for (i = 0; i < BLK_MAX_CDB; i++) for (i = 0; i < BLK_MAX_CDB; i++)
printk(KERN_CONT "%02x ", failed_command->cmd[i]); printk(KERN_CONT "%02x ", scsi_req(failed_command)->cmd[i]);
printk(KERN_CONT "\"\n"); printk(KERN_CONT "\"\n");
} }

View File

@ -165,11 +165,12 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
if (!(setting->flags & DS_SYNC)) if (!(setting->flags & DS_SYNC))
return setting->set(drive, arg); return setting->set(drive, arg);
rq = blk_get_request(q, READ, __GFP_RECLAIM); rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; scsi_req_init(rq);
rq->cmd_len = 5; ide_req(rq)->type = ATA_PRIV_MISC;
rq->cmd[0] = REQ_DEVSET_EXEC; scsi_req(rq)->cmd_len = 5;
*(int *)&rq->cmd[1] = arg; scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
*(int *)&scsi_req(rq)->cmd[1] = arg;
rq->special = setting->set; rq->special = setting->set;
if (blk_execute_rq(q, NULL, rq, 0)) if (blk_execute_rq(q, NULL, rq, 0))
@ -183,7 +184,7 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
{ {
int err, (*setfunc)(ide_drive_t *, int) = rq->special; int err, (*setfunc)(ide_drive_t *, int) = rq->special;
err = setfunc(drive, *(int *)&rq->cmd[1]); err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
if (err) if (err)
rq->errors = err; rq->errors = err;
ide_complete_rq(drive, err, blk_rq_bytes(rq)); ide_complete_rq(drive, err, blk_rq_bytes(rq));

View File

@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
BUG_ON(rq->cmd_type != REQ_TYPE_FS); BUG_ON(blk_rq_is_passthrough(rq));
ledtrig_disk_activity(); ledtrig_disk_activity();
@ -452,8 +452,9 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd->tf_flags = IDE_TFLAG_DYN; cmd->tf_flags = IDE_TFLAG_DYN;
cmd->protocol = ATA_PROT_NODATA; cmd->protocol = ATA_PROT_NODATA;
rq->cmd_flags &= ~REQ_OP_MASK;
rq->cmd_type = REQ_TYPE_ATA_TASKFILE; rq->cmd_flags |= REQ_OP_DRV_OUT;
ide_req(rq)->type = ATA_PRIV_TASKFILE;
rq->special = cmd; rq->special = cmd;
cmd->rq = rq; cmd->rq = rq;
@ -477,8 +478,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
return -EBUSY; return -EBUSY;
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE; scsi_req_init(rq);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
drive->mult_req = arg; drive->mult_req = arg;
drive->special_flags |= IDE_SFLAG_SET_MULTMODE; drive->special_flags |= IDE_SFLAG_SET_MULTMODE;

View File

@ -123,8 +123,8 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
return ide_stopped; return ide_stopped;
/* retry only "normal" I/O: */ /* retry only "normal" I/O: */
if (rq->cmd_type != REQ_TYPE_FS) { if (blk_rq_is_passthrough(rq)) {
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { if (ata_taskfile_request(rq)) {
struct ide_cmd *cmd = rq->special; struct ide_cmd *cmd = rq->special;
if (cmd) if (cmd)
@ -147,8 +147,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
{ {
struct request *rq = drive->hwif->rq; struct request *rq = drive->hwif->rq;
if (rq && rq->cmd_type == REQ_TYPE_DRV_PRIV && if (rq && ata_misc_request(rq) &&
rq->cmd[0] == REQ_DRIVE_RESET) { scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
if (err <= 0 && rq->errors == 0) if (err <= 0 && rq->errors == 0)
rq->errors = -EIO; rq->errors = -EIO;
ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));

View File

@ -72,7 +72,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
drive->failed_pc = NULL; drive->failed_pc = NULL;
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
rq->cmd_type == REQ_TYPE_BLOCK_PC) (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT))
uptodate = 1; /* FIXME */ uptodate = 1; /* FIXME */
else if (pc->c[0] == GPCMD_REQUEST_SENSE) { else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
@ -97,7 +97,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
"Aborting request!\n"); "Aborting request!\n");
} }
if (rq->cmd_type == REQ_TYPE_DRV_PRIV) if (ata_misc_request(rq))
rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
return uptodate; return uptodate;
@ -203,7 +203,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
memcpy(rq->cmd, pc->c, 12); memcpy(scsi_req(rq)->cmd, pc->c, 12);
pc->rq = rq; pc->rq = rq;
if (cmd == WRITE) if (cmd == WRITE)
@ -216,7 +216,7 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
struct ide_atapi_pc *pc, struct request *rq) struct ide_atapi_pc *pc, struct request *rq)
{ {
ide_init_pc(pc); ide_init_pc(pc);
memcpy(pc->c, rq->cmd, sizeof(pc->c)); memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c));
pc->rq = rq; pc->rq = rq;
if (blk_rq_bytes(rq)) { if (blk_rq_bytes(rq)) {
pc->flags |= PC_FLAG_DMA_OK; pc->flags |= PC_FLAG_DMA_OK;
@ -246,7 +246,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
} else } else
printk(KERN_ERR PFX "%s: I/O error\n", drive->name); printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
if (rq->cmd_type == REQ_TYPE_DRV_PRIV) { if (ata_misc_request(rq)) {
rq->errors = 0; rq->errors = 0;
ide_complete_rq(drive, 0, blk_rq_bytes(rq)); ide_complete_rq(drive, 0, blk_rq_bytes(rq));
return ide_stopped; return ide_stopped;
@ -254,8 +254,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end; goto out_end;
} }
switch (rq->cmd_type) { switch (req_op(rq)) {
case REQ_TYPE_FS: default:
if (((long)blk_rq_pos(rq) % floppy->bs_factor) || if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
(blk_rq_sectors(rq) % floppy->bs_factor)) { (blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
@ -265,16 +265,21 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
pc = &floppy->queued_pc; pc = &floppy->queued_pc;
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
break; break;
case REQ_TYPE_DRV_PRIV: case REQ_OP_SCSI_IN:
case REQ_TYPE_ATA_SENSE: case REQ_OP_SCSI_OUT:
pc = (struct ide_atapi_pc *)rq->special;
break;
case REQ_TYPE_BLOCK_PC:
pc = &floppy->queued_pc; pc = &floppy->queued_pc;
idefloppy_blockpc_cmd(floppy, pc, rq); idefloppy_blockpc_cmd(floppy, pc, rq);
break; break;
default: case REQ_OP_DRV_IN:
BUG(); case REQ_OP_DRV_OUT:
switch (ide_req(rq)->type) {
case ATA_PRIV_MISC:
case ATA_PRIV_SENSE:
pc = (struct ide_atapi_pc *)rq->special;
break;
default:
BUG();
}
} }
ide_prep_sense(drive, rq); ide_prep_sense(drive, rq);
@ -286,7 +291,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
cmd.rq = rq; cmd.rq = rq;
if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd); ide_map_sg(drive, &cmd);
} }
@ -296,7 +301,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
return ide_floppy_issue_pc(drive, &cmd, pc); return ide_floppy_issue_pc(drive, &cmd, pc);
out_end: out_end:
drive->failed_pc = NULL; drive->failed_pc = NULL;
if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) if (blk_rq_is_passthrough(rq) && rq->errors == 0)
rq->errors = -EIO; rq->errors = -EIO;
ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped; return ide_stopped;

View File

@ -102,7 +102,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
drive->dev_flags |= IDE_DFLAG_PARKED; drive->dev_flags |= IDE_DFLAG_PARKED;
} }
if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { if (rq && ata_taskfile_request(rq)) {
struct ide_cmd *orig_cmd = rq->special; struct ide_cmd *orig_cmd = rq->special;
if (cmd->tf_flags & IDE_TFLAG_DYN) if (cmd->tf_flags & IDE_TFLAG_DYN)
@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
void ide_kill_rq(ide_drive_t *drive, struct request *rq) void ide_kill_rq(ide_drive_t *drive, struct request *rq)
{ {
u8 drv_req = (rq->cmd_type == REQ_TYPE_DRV_PRIV) && rq->rq_disk; u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
u8 media = drive->media; u8 media = drive->media;
drive->failed_pc = NULL; drive->failed_pc = NULL;
@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
} else { } else {
if (media == ide_tape) if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL; rq->errors = IDE_DRV_ERROR_GENERAL;
else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) else if (blk_rq_is_passthrough(rq) && rq->errors == 0)
rq->errors = -EIO; rq->errors = -EIO;
} }
@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
{ {
u8 cmd = rq->cmd[0]; u8 cmd = scsi_req(rq)->cmd[0];
switch (cmd) { switch (cmd) {
case REQ_PARK_HEADS: case REQ_PARK_HEADS:
@ -340,7 +340,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
if (drive->current_speed == 0xff) if (drive->current_speed == 0xff)
ide_config_drive_speed(drive, drive->desired_speed); ide_config_drive_speed(drive, drive->desired_speed);
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) if (ata_taskfile_request(rq))
return execute_drive_cmd(drive, rq); return execute_drive_cmd(drive, rq);
else if (ata_pm_request(rq)) { else if (ata_pm_request(rq)) {
struct ide_pm_state *pm = rq->special; struct ide_pm_state *pm = rq->special;
@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
pm->pm_step == IDE_PM_COMPLETED) pm->pm_step == IDE_PM_COMPLETED)
ide_complete_pm_rq(drive, rq); ide_complete_pm_rq(drive, rq);
return startstop; return startstop;
} else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_DRV_PRIV) } else if (!rq->rq_disk && ata_misc_request(rq))
/* /*
* TODO: Once all ULDs have been modified to * TODO: Once all ULDs have been modified to
* check for specific op codes rather than * check for specific op codes rather than
@ -545,6 +545,7 @@ void do_ide_request(struct request_queue *q)
goto plug_device; goto plug_device;
} }
scsi_req(rq)->resid_len = blk_rq_bytes(rq);
hwif->rq = rq; hwif->rq = rq;
spin_unlock_irq(&hwif->lock); spin_unlock_irq(&hwif->lock);

View File

@ -125,8 +125,9 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
if (NULL == (void *) arg) { if (NULL == (void *) arg) {
struct request *rq; struct request *rq;
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE; scsi_req_init(rq);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
err = blk_execute_rq(drive->queue, NULL, rq, 0); err = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq); blk_put_request(rq);
@ -221,10 +222,11 @@ static int generic_drive_reset(ide_drive_t *drive)
struct request *rq; struct request *rq;
int ret = 0; int ret = 0;
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; scsi_req_init(rq);
rq->cmd_len = 1; ide_req(rq)->type = ATA_PRIV_MISC;
rq->cmd[0] = REQ_DRIVE_RESET; scsi_req(rq)->cmd_len = 1;
scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
if (blk_execute_rq(drive->queue, NULL, rq, 1)) if (blk_execute_rq(drive->queue, NULL, rq, 1))
ret = rq->errors; ret = rq->errors;
blk_put_request(rq); blk_put_request(rq);

View File

@ -31,10 +31,11 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
} }
spin_unlock_irq(&hwif->lock); spin_unlock_irq(&hwif->lock);
rq = blk_get_request(q, READ, __GFP_RECLAIM); rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd[0] = REQ_PARK_HEADS; scsi_req_init(rq);
rq->cmd_len = 1; scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
rq->cmd_type = REQ_TYPE_DRV_PRIV; scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = &timeout; rq->special = &timeout;
rc = blk_execute_rq(q, NULL, rq, 1); rc = blk_execute_rq(q, NULL, rq, 1);
blk_put_request(rq); blk_put_request(rq);
@ -45,13 +46,14 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
* Make sure that *some* command is sent to the drive after the * Make sure that *some* command is sent to the drive after the
* timeout has expired, so power management will be reenabled. * timeout has expired, so power management will be reenabled.
*/ */
rq = blk_get_request(q, READ, GFP_NOWAIT); rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
scsi_req_init(rq);
if (IS_ERR(rq)) if (IS_ERR(rq))
goto out; goto out;
rq->cmd[0] = REQ_UNPARK_HEADS; scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
rq->cmd_len = 1; scsi_req(rq)->cmd_len = 1;
rq->cmd_type = REQ_TYPE_DRV_PRIV; ide_req(rq)->type = ATA_PRIV_MISC;
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
out: out:
@ -64,7 +66,7 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
struct ide_taskfile *tf = &cmd.tf; struct ide_taskfile *tf = &cmd.tf;
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
if (rq->cmd[0] == REQ_PARK_HEADS) { if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
drive->sleep = *(unsigned long *)rq->special; drive->sleep = *(unsigned long *)rq->special;
drive->dev_flags |= IDE_DFLAG_SLEEPING; drive->dev_flags |= IDE_DFLAG_SLEEPING;
tf->command = ATA_CMD_IDLEIMMEDIATE; tf->command = ATA_CMD_IDLEIMMEDIATE;

View File

@ -18,8 +18,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
} }
memset(&rqpm, 0, sizeof(rqpm)); memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND; scsi_req_init(rq);
ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
rq->special = &rqpm; rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND; rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW) if (mesg.event == PM_EVENT_PRETHAW)
@ -88,8 +89,9 @@ int generic_ide_resume(struct device *dev)
} }
memset(&rqpm, 0, sizeof(rqpm)); memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; scsi_req_init(rq);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
rq->rq_flags |= RQF_PREEMPT; rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm; rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME; rqpm.pm_step = IDE_PM_START_RESUME;
@ -221,10 +223,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
#ifdef DEBUG_PM #ifdef DEBUG_PM
printk("%s: completing PM request, %s\n", drive->name, printk("%s: completing PM request, %s\n", drive->name,
(rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) ? "suspend" : "resume"); (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
#endif #endif
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
blk_stop_queue(q); blk_stop_queue(q);
else else
drive->dev_flags &= ~IDE_DFLAG_BLOCKED; drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
@ -240,11 +242,13 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{ {
struct ide_pm_state *pm = rq->special; struct ide_pm_state *pm = rq->special;
if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND && if (blk_rq_is_private(rq) &&
ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
pm->pm_step == IDE_PM_START_SUSPEND) pm->pm_step == IDE_PM_START_SUSPEND)
/* Mark drive blocked when starting the suspend sequence. */ /* Mark drive blocked when starting the suspend sequence. */
drive->dev_flags |= IDE_DFLAG_BLOCKED; drive->dev_flags |= IDE_DFLAG_BLOCKED;
else if (rq->cmd_type == REQ_TYPE_ATA_PM_RESUME && else if (blk_rq_is_private(rq) &&
ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
pm->pm_step == IDE_PM_START_RESUME) { pm->pm_step == IDE_PM_START_RESUME) {
/* /*
* The first thing we do on wakeup is to wait for BSY bit to * The first thing we do on wakeup is to wait for BSY bit to

View File

@ -741,6 +741,14 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
} }
} }
static int ide_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
{
struct ide_request *req = blk_mq_rq_to_pdu(rq);
req->sreq.sense = req->sense;
return 0;
}
/* /*
* init request queue * init request queue
*/ */
@ -758,11 +766,18 @@ static int ide_init_queue(ide_drive_t *drive)
* limits and LBA48 we could raise it but as yet * limits and LBA48 we could raise it but as yet
* do not. * do not.
*/ */
q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif));
q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif));
if (!q) if (!q)
return 1; return 1;
q->request_fn = do_ide_request;
q->init_rq_fn = ide_init_rq;
q->cmd_size = sizeof(struct ide_request);
if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q);
return 1;
}
q->queuedata = drive; q->queuedata = drive;
blk_queue_segment_boundary(q, 0xffff); blk_queue_segment_boundary(q, 0xffff);
@ -1131,10 +1146,12 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
ide_port_for_each_dev(i, drive, hwif) { ide_port_for_each_dev(i, drive, hwif) {
u8 j = (hwif->index * MAX_DRIVES) + i; u8 j = (hwif->index * MAX_DRIVES) + i;
u16 *saved_id = drive->id; u16 *saved_id = drive->id;
struct request *saved_sense_rq = drive->sense_rq;
memset(drive, 0, sizeof(*drive)); memset(drive, 0, sizeof(*drive));
memset(saved_id, 0, SECTOR_SIZE); memset(saved_id, 0, SECTOR_SIZE);
drive->id = saved_id; drive->id = saved_id;
drive->sense_rq = saved_sense_rq;
drive->media = ide_disk; drive->media = ide_disk;
drive->select = (i << 4) | ATA_DEVICE_OBS; drive->select = (i << 4) | ATA_DEVICE_OBS;
@ -1241,6 +1258,7 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
int i; int i;
ide_port_for_each_dev(i, drive, hwif) { ide_port_for_each_dev(i, drive, hwif) {
kfree(drive->sense_rq);
kfree(drive->id); kfree(drive->id);
kfree(drive); kfree(drive);
} }
@ -1248,11 +1266,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
{ {
ide_drive_t *drive;
int i; int i;
for (i = 0; i < MAX_DRIVES; i++) { for (i = 0; i < MAX_DRIVES; i++) {
ide_drive_t *drive;
drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node); drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
if (drive == NULL) if (drive == NULL)
goto out_nomem; goto out_nomem;
@ -1267,12 +1284,21 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
*/ */
drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node); drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
if (drive->id == NULL) if (drive->id == NULL)
goto out_nomem; goto out_free_drive;
drive->sense_rq = kmalloc(sizeof(struct request) +
sizeof(struct ide_request), GFP_KERNEL);
if (!drive->sense_rq)
goto out_free_id;
hwif->devices[i] = drive; hwif->devices[i] = drive;
} }
return 0; return 0;
out_free_id:
kfree(drive->id);
out_free_drive:
kfree(drive);
out_nomem: out_nomem:
ide_port_free_devices(hwif); ide_port_free_devices(hwif);
return -ENOMEM; return -ENOMEM;

View File

@ -282,7 +282,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
/* correct remaining bytes to transfer */ /* correct remaining bytes to transfer */
if (pc->flags & PC_FLAG_DMA_ERROR) if (pc->flags & PC_FLAG_DMA_ERROR)
rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]); scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
/* /*
* If error was the result of a zero-length read or write command, * If error was the result of a zero-length read or write command,
@ -316,7 +316,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
pc->flags |= PC_FLAG_ABORT; pc->flags |= PC_FLAG_ABORT;
} }
if (!(pc->flags & PC_FLAG_ABORT) && if (!(pc->flags & PC_FLAG_ABORT) &&
(blk_rq_bytes(rq) - rq->resid_len)) (blk_rq_bytes(rq) - scsi_req(rq)->resid_len))
pc->retries = IDETAPE_MAX_PC_RETRIES + 1; pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
} }
} }
@ -348,7 +348,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
"itself - Aborting request!\n"); "itself - Aborting request!\n");
} else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) { } else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
unsigned int blocks = unsigned int blocks =
(blk_rq_bytes(rq) - rq->resid_len) / tape->blk_size; (blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size;
tape->avg_size += blocks * tape->blk_size; tape->avg_size += blocks * tape->blk_size;
@ -560,7 +560,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
pc->flags |= PC_FLAG_WRITING; pc->flags |= PC_FLAG_WRITING;
} }
memcpy(rq->cmd, pc->c, 12); memcpy(scsi_req(rq)->cmd, pc->c, 12);
} }
static ide_startstop_t idetape_do_request(ide_drive_t *drive, static ide_startstop_t idetape_do_request(ide_drive_t *drive,
@ -570,14 +570,16 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
idetape_tape_t *tape = drive->driver_data; idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc *pc = NULL; struct ide_atapi_pc *pc = NULL;
struct ide_cmd cmd; struct ide_cmd cmd;
struct scsi_request *req = scsi_req(rq);
u8 stat; u8 stat;
ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u", ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u",
rq->cmd[0], (unsigned long long)blk_rq_pos(rq), req->cmd[0], (unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq)); blk_rq_sectors(rq));
BUG_ON(!(rq->cmd_type == REQ_TYPE_DRV_PRIV || BUG_ON(!blk_rq_is_private(rq));
rq->cmd_type == REQ_TYPE_ATA_SENSE)); BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
ide_req(rq)->type != ATA_PRIV_SENSE);
/* Retry a failed packet command */ /* Retry a failed packet command */
if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
@ -592,7 +594,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
stat = hwif->tp_ops->read_status(hwif); stat = hwif->tp_ops->read_status(hwif);
if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 && if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
(rq->cmd[13] & REQ_IDETAPE_PC2) == 0) (req->cmd[13] & REQ_IDETAPE_PC2) == 0)
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
if (drive->dev_flags & IDE_DFLAG_POST_RESET) { if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
@ -609,7 +611,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
} else if (time_after(jiffies, tape->dsc_timeout)) { } else if (time_after(jiffies, tape->dsc_timeout)) {
printk(KERN_ERR "ide-tape: %s: DSC timeout\n", printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
tape->name); tape->name);
if (rq->cmd[13] & REQ_IDETAPE_PC2) { if (req->cmd[13] & REQ_IDETAPE_PC2) {
idetape_media_access_finished(drive); idetape_media_access_finished(drive);
return ide_stopped; return ide_stopped;
} else { } else {
@ -626,23 +628,23 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
tape->postponed_rq = false; tape->postponed_rq = false;
} }
if (rq->cmd[13] & REQ_IDETAPE_READ) { if (req->cmd[13] & REQ_IDETAPE_READ) {
pc = &tape->queued_pc; pc = &tape->queued_pc;
ide_tape_create_rw_cmd(tape, pc, rq, READ_6); ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
goto out; goto out;
} }
if (rq->cmd[13] & REQ_IDETAPE_WRITE) { if (req->cmd[13] & REQ_IDETAPE_WRITE) {
pc = &tape->queued_pc; pc = &tape->queued_pc;
ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6); ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
goto out; goto out;
} }
if (rq->cmd[13] & REQ_IDETAPE_PC1) { if (req->cmd[13] & REQ_IDETAPE_PC1) {
pc = (struct ide_atapi_pc *)rq->special; pc = (struct ide_atapi_pc *)rq->special;
rq->cmd[13] &= ~(REQ_IDETAPE_PC1); req->cmd[13] &= ~(REQ_IDETAPE_PC1);
rq->cmd[13] |= REQ_IDETAPE_PC2; req->cmd[13] |= REQ_IDETAPE_PC2;
goto out; goto out;
} }
if (rq->cmd[13] & REQ_IDETAPE_PC2) { if (req->cmd[13] & REQ_IDETAPE_PC2) {
idetape_media_access_finished(drive); idetape_media_access_finished(drive);
return ide_stopped; return ide_stopped;
} }
@ -852,9 +854,10 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE); BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
BUG_ON(size < 0 || size % tape->blk_size); BUG_ON(size < 0 || size % tape->blk_size);
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; scsi_req_init(rq);
rq->cmd[13] = cmd; ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd[13] = cmd;
rq->rq_disk = tape->disk; rq->rq_disk = tape->disk;
rq->__sector = tape->first_frame; rq->__sector = tape->first_frame;
@ -868,7 +871,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
blk_execute_rq(drive->queue, tape->disk, rq, 0); blk_execute_rq(drive->queue, tape->disk, rq, 0);
/* calculate the number of transferred bytes and update buffer state */ /* calculate the number of transferred bytes and update buffer state */
size -= rq->resid_len; size -= scsi_req(rq)->resid_len;
tape->cur = tape->buf; tape->cur = tape->buf;
if (cmd == REQ_IDETAPE_READ) if (cmd == REQ_IDETAPE_READ)
tape->valid = size; tape->valid = size;

View File

@ -428,10 +428,12 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
{ {
struct request *rq; struct request *rq;
int error; int error;
int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM); rq = blk_get_request(drive->queue,
rq->cmd_type = REQ_TYPE_ATA_TASKFILE; (cmd->tf_flags & IDE_TFLAG_WRITE) ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
/* /*
* (ks) We transfer currently only whole sectors. * (ks) We transfer currently only whole sectors.

View File

@ -54,7 +54,7 @@
#define DRV_NAME "sis5513" #define DRV_NAME "sis5513"
/* registers layout and init values are chipset family dependent */ /* registers layout and init values are chipset family dependent */
#undef ATA_16
#define ATA_16 0x01 #define ATA_16 0x01
#define ATA_33 0x02 #define ATA_33 0x02
#define ATA_66 0x03 #define ATA_66 0x03

View File

@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits)
struct request_queue *q = bdev_get_queue(dc->bdev); struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0; int ret = 0;
if (bdi_congested(&q->backing_dev_info, bits)) if (bdi_congested(q->backing_dev_info, bits))
return 1; return 1;
if (cached_dev_get(dc)) { if (cached_dev_get(dc)) {
@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev); q = bdev_get_queue(ca->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
cached_dev_put(dc); cached_dev_put(dc);
@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
struct gendisk *g = dc->disk.disk; struct gendisk *g = dc->disk.disk;
g->queue->make_request_fn = cached_dev_make_request; g->queue->make_request_fn = cached_dev_make_request;
g->queue->backing_dev_info.congested_fn = cached_dev_congested; g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl; dc->disk.ioctl = cached_dev_ioctl;
} }
@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev); q = bdev_get_queue(ca->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
return ret; return ret;
@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
struct gendisk *g = d->disk; struct gendisk *g = d->disk;
g->queue->make_request_fn = flash_dev_make_request; g->queue->make_request_fn = flash_dev_make_request;
g->queue->backing_dev_info.congested_fn = flash_dev_congested; g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss; d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl; d->ioctl = flash_dev_ioctl;
} }

View File

@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_make_request(q, NULL); blk_queue_make_request(q, NULL);
d->disk->queue = q; d->disk->queue = q;
q->queuedata = d; q->queuedata = d;
q->backing_dev_info.congested_data = d; q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX; q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX; q->limits.max_segment_size = UINT_MAX;
@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_capacity(dc->disk.disk, set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset); dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
dc->disk.disk->queue->backing_dev_info.ra_pages = dc->disk.disk->queue->backing_dev_info->ra_pages =
max(dc->disk.disk->queue->backing_dev_info.ra_pages, max(dc->disk.disk->queue->backing_dev_info->ra_pages,
q->backing_dev_info.ra_pages); q->backing_dev_info->ra_pages);
bch_cached_dev_request_init(dc); bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc); bch_cached_dev_writeback_init(dc);

View File

@ -2284,7 +2284,7 @@ static void do_waker(struct work_struct *ws)
static int is_congested(struct dm_dev *dev, int bdi_bits) static int is_congested(struct dm_dev *dev, int bdi_bits)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)

View File

@ -92,7 +92,6 @@ struct mapped_device {
* io objects are allocated from here. * io objects are allocated from here.
*/ */
mempool_t *io_pool; mempool_t *io_pool;
mempool_t *rq_pool;
struct bio_set *bs; struct bio_set *bs;

View File

@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
static int dev_is_congested(struct dm_dev *dev, int bdi_bits) static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)

View File

@ -92,12 +92,6 @@ struct multipath {
unsigned queue_mode; unsigned queue_mode;
/*
* We must use a mempool of dm_mpath_io structs so that we
* can resubmit bios on error.
*/
mempool_t *mpio_pool;
struct mutex work_mutex; struct mutex work_mutex;
struct work_struct trigger_event; struct work_struct trigger_event;
@ -115,8 +109,6 @@ struct dm_mpath_io {
typedef int (*action_fn) (struct pgpath *pgpath); typedef int (*action_fn) (struct pgpath *pgpath);
static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work); static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work); static void activate_path(struct work_struct *work);
@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
init_waitqueue_head(&m->pg_init_wait); init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex); mutex_init(&m->work_mutex);
m->mpio_pool = NULL;
m->queue_mode = DM_TYPE_NONE; m->queue_mode = DM_TYPE_NONE;
m->ti = ti; m->ti = ti;
@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else else
m->queue_mode = DM_TYPE_REQUEST_BASED; m->queue_mode = DM_TYPE_REQUEST_BASED;
} } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
unsigned min_ios = dm_get_reserved_rq_based_ios();
m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
if (!m->mpio_pool)
return -ENOMEM;
}
else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios); INIT_WORK(&m->process_queued_bios, process_queued_bios);
/* /*
* bio-based doesn't support any direct scsi_dh management; * bio-based doesn't support any direct scsi_dh management;
@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)
kfree(m->hw_handler_name); kfree(m->hw_handler_name);
kfree(m->hw_handler_params); kfree(m->hw_handler_params);
mempool_destroy(m->mpio_pool);
kfree(m); kfree(m);
} }
@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
return info->ptr; return info->ptr;
} }
static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
{
struct dm_mpath_io *mpio;
if (!m->mpio_pool) {
/* Use blk-mq pdu memory requested via per_io_data_size */
mpio = get_mpio(info);
memset(mpio, 0, sizeof(*mpio));
return mpio;
}
mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
if (!mpio)
return NULL;
memset(mpio, 0, sizeof(*mpio));
info->ptr = mpio;
return mpio;
}
static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
{
/* Only needed for non blk-mq (.request_fn) multipath */
if (m->mpio_pool) {
struct dm_mpath_io *mpio = info->ptr;
info->ptr = NULL;
mempool_free(mpio, m->mpio_pool);
}
}
static size_t multipath_per_bio_data_size(void) static size_t multipath_per_bio_data_size(void)
{ {
return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
/* /*
* Map cloned requests (request-based multipath) * Map cloned requests (request-based multipath)
*/ */
static int __multipath_map(struct dm_target *ti, struct request *clone, static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
union map_info *map_context, union map_info *map_context,
struct request *rq, struct request **__clone) struct request **__clone)
{ {
struct multipath *m = ti->private; struct multipath *m = ti->private;
int r = DM_MAPIO_REQUEUE; int r = DM_MAPIO_REQUEUE;
size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq); size_t nr_bytes = blk_rq_bytes(rq);
struct pgpath *pgpath; struct pgpath *pgpath;
struct block_device *bdev; struct block_device *bdev;
struct dm_mpath_io *mpio; struct dm_mpath_io *mpio = get_mpio(map_context);
struct request *clone;
/* Do we need to select a new pgpath? */ /* Do we need to select a new pgpath? */
pgpath = lockless_dereference(m->current_pgpath); pgpath = lockless_dereference(m->current_pgpath);
@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
return r; return r;
} }
mpio = set_mpio(m, map_context); memset(mpio, 0, sizeof(*mpio));
if (!mpio)
/* ENOMEM, requeue */
return r;
mpio->pgpath = pgpath; mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes; mpio->nr_bytes = nr_bytes;
bdev = pgpath->path.dev->bdev; bdev = pgpath->path.dev->bdev;
if (clone) { clone = blk_get_request(bdev_get_queue(bdev),
/* rq->cmd_flags | REQ_NOMERGE,
* Old request-based interface: allocated clone is passed in. GFP_ATOMIC);
* Used by: .request_fn stacked on .request_fn path(s). if (IS_ERR(clone)) {
*/ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
clone->q = bdev_get_queue(bdev); return r;
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
} else {
/*
* blk-mq request-based interface; used by both:
* .request_fn stacked on blk-mq path(s) and
* blk-mq stacked on blk-mq path(s).
*/
clone = blk_mq_alloc_request(bdev_get_queue(bdev),
rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
clear_request_fn_mpio(m, map_context);
return r;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
*__clone = clone;
} }
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
*__clone = clone;
if (pgpath->pg->ps.type->start_io) if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps, pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
static int multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
return __multipath_map(ti, clone, map_context, NULL, NULL);
}
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
union map_info *map_context,
struct request **clone)
{
return __multipath_map(ti, NULL, map_context, rq, clone);
}
static void multipath_release_clone(struct request *clone) static void multipath_release_clone(struct request *clone)
{ {
blk_mq_free_request(clone); blk_put_request(clone);
} }
/* /*
@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_write_same_bios = 1; ti->num_write_same_bios = 1;
if (m->queue_mode == DM_TYPE_BIO_BASED) if (m->queue_mode == DM_TYPE_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size(); ti->per_io_data_size = multipath_per_bio_data_size();
else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) else
ti->per_io_data_size = sizeof(struct dm_mpath_io); ti->per_io_data_size = sizeof(struct dm_mpath_io);
return 0; return 0;
@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (ps->type->end_io) if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
} }
clear_request_fn_mpio(m, map_context);
return r; return r;
} }
@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = multipath_ctr, .ctr = multipath_ctr,
.dtr = multipath_dtr, .dtr = multipath_dtr,
.map_rq = multipath_map,
.clone_and_map_rq = multipath_clone_and_map, .clone_and_map_rq = multipath_clone_and_map,
.release_clone_rq = multipath_release_clone, .release_clone_rq = multipath_release_clone,
.rq_end_io = multipath_end_io, .rq_end_io = multipath_end_io,
@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
{ {
int r; int r;
/* allocate a slab for the dm_mpath_ios */
_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
if (!_mpio_cache)
return -ENOMEM;
r = dm_register_target(&multipath_target); r = dm_register_target(&multipath_target);
if (r < 0) { if (r < 0) {
DMERR("request-based register failed %d", r); DMERR("request-based register failed %d", r);
@ -2120,8 +2031,6 @@ static int __init dm_multipath_init(void)
bad_alloc_kmultipathd: bad_alloc_kmultipathd:
dm_unregister_target(&multipath_target); dm_unregister_target(&multipath_target);
bad_register_target: bad_register_target:
kmem_cache_destroy(_mpio_cache);
return r; return r;
} }
@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
destroy_workqueue(kmultipathd); destroy_workqueue(kmultipathd);
dm_unregister_target(&multipath_target); dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
} }
module_init(dm_multipath_init); module_init(dm_multipath_init);

View File

@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q)
dm_mq_stop_queue(q); dm_mq_stop_queue(q);
} }
static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
gfp_t gfp_mask)
{
return mempool_alloc(md->io_pool, gfp_mask);
}
static void free_old_rq_tio(struct dm_rq_target_io *tio)
{
mempool_free(tio, tio->md->io_pool);
}
static struct request *alloc_old_clone_request(struct mapped_device *md,
gfp_t gfp_mask)
{
return mempool_alloc(md->rq_pool, gfp_mask);
}
static void free_old_clone_request(struct mapped_device *md, struct request *rq)
{
mempool_free(rq, md->rq_pool);
}
/* /*
* Partial completion handling for request-based dm * Partial completion handling for request-based dm
*/ */
@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
static struct dm_rq_target_io *tio_from_request(struct request *rq) static struct dm_rq_target_io *tio_from_request(struct request *rq)
{ {
return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); return blk_mq_rq_to_pdu(rq);
} }
static void rq_end_stats(struct mapped_device *md, struct request *orig) static void rq_end_stats(struct mapped_device *md, struct request *orig)
@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md); dm_put(md);
} }
static void free_rq_clone(struct request *clone)
{
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
blk_rq_unprep_clone(clone);
/*
* It is possible for a clone_old_rq() allocated clone to
* get passed in -- it may not yet have a request_queue.
* This is known to occur if the error target replaces
* a multipath target that has a request_fn queue stacked
* on blk-mq queue(s).
*/
if (clone->q && clone->q->mq_ops)
/* stacked on blk-mq queue(s) */
tio->ti->type->release_clone_rq(clone);
else if (!md->queue->mq_ops)
/* request_fn queue stacked on request_fn queue(s) */
free_old_clone_request(md, clone);
if (!md->queue->mq_ops)
free_old_rq_tio(tio);
}
/* /*
* Complete the clone and the original request. * Complete the clone and the original request.
* Must be called without clone's queue lock held, * Must be called without clone's queue lock held,
@ -270,20 +223,9 @@ static void dm_end_request(struct request *clone, int error)
struct mapped_device *md = tio->md; struct mapped_device *md = tio->md;
struct request *rq = tio->orig; struct request *rq = tio->orig;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { blk_rq_unprep_clone(clone);
rq->errors = clone->errors; tio->ti->type->release_clone_rq(clone);
rq->resid_len = clone->resid_len;
if (rq->sense)
/*
* We are using the sense buffer of the original
* request.
* So setting the length of the sense data is enough.
*/
rq->sense_len = clone->sense_len;
}
free_rq_clone(clone);
rq_end_stats(md, rq); rq_end_stats(md, rq);
if (!rq->q->mq_ops) if (!rq->q->mq_ops)
blk_end_request_all(rq, error); blk_end_request_all(rq, error);
@ -292,22 +234,6 @@ static void dm_end_request(struct request *clone, int error)
rq_completed(md, rw, true); rq_completed(md, rw, true);
} }
static void dm_unprep_request(struct request *rq)
{
struct dm_rq_target_io *tio = tio_from_request(rq);
struct request *clone = tio->clone;
if (!rq->q->mq_ops) {
rq->special = NULL;
rq->rq_flags &= ~RQF_DONTPREP;
}
if (clone)
free_rq_clone(clone);
else if (!tio->md->queue->mq_ops)
free_old_rq_tio(tio);
}
/* /*
* Requeue the original request of a clone. * Requeue the original request of a clone.
*/ */
@ -346,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
int rw = rq_data_dir(rq); int rw = rq_data_dir(rq);
rq_end_stats(md, rq); rq_end_stats(md, rq);
dm_unprep_request(rq); if (tio->clone) {
blk_rq_unprep_clone(tio->clone);
tio->ti->type->release_clone_rq(tio->clone);
}
if (!rq->q->mq_ops) if (!rq->q->mq_ops)
dm_old_requeue_request(rq); dm_old_requeue_request(rq);
@ -401,14 +330,11 @@ static void dm_softirq_done(struct request *rq)
if (!clone) { if (!clone) {
rq_end_stats(tio->md, rq); rq_end_stats(tio->md, rq);
rw = rq_data_dir(rq); rw = rq_data_dir(rq);
if (!rq->q->mq_ops) { if (!rq->q->mq_ops)
blk_end_request_all(rq, tio->error); blk_end_request_all(rq, tio->error);
rq_completed(tio->md, rw, false); else
free_old_rq_tio(tio);
} else {
blk_mq_end_request(rq, tio->error); blk_mq_end_request(rq, tio->error);
rq_completed(tio->md, rw, false); rq_completed(tio->md, rw, false);
}
return; return;
} }
@ -452,16 +378,6 @@ static void end_clone_request(struct request *clone, int error)
{ {
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = clone->end_io_data;
if (!clone->q->mq_ops) {
/*
* For just cleaning up the information of the queue in which
* the clone was dispatched.
* The clone is *NOT* freed actually here because it is alloced
* from dm own mempool (RQF_ALLOCED isn't set).
*/
__blk_put_request(clone->q, clone);
}
/* /*
* Actual request completion is done in a softirq context which doesn't * Actual request completion is done in a softirq context which doesn't
* hold the clone's queue lock. Otherwise, deadlock could occur because: * hold the clone's queue lock. Otherwise, deadlock could occur because:
@ -511,9 +427,6 @@ static int setup_clone(struct request *clone, struct request *rq,
if (r) if (r)
return r; return r;
clone->cmd = rq->cmd;
clone->cmd_len = rq->cmd_len;
clone->sense = rq->sense;
clone->end_io = end_clone_request; clone->end_io = end_clone_request;
clone->end_io_data = tio; clone->end_io_data = tio;
@ -522,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0; return 0;
} }
static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
/*
* Create clone for use with .request_fn request_queue
*/
struct request *clone;
clone = alloc_old_clone_request(md, gfp_mask);
if (!clone)
return NULL;
blk_rq_init(NULL, clone);
if (setup_clone(clone, rq, tio, gfp_mask)) {
/* -ENOMEM */
free_old_clone_request(md, clone);
return NULL;
}
return clone;
}
static void map_tio_request(struct kthread_work *work); static void map_tio_request(struct kthread_work *work);
static void init_tio(struct dm_rq_target_io *tio, struct request *rq, static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
@ -565,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
kthread_init_work(&tio->work, map_tio_request); kthread_init_work(&tio->work, map_tio_request);
} }
static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
struct mapped_device *md,
gfp_t gfp_mask)
{
struct dm_rq_target_io *tio;
int srcu_idx;
struct dm_table *table;
tio = alloc_old_rq_tio(md, gfp_mask);
if (!tio)
return NULL;
init_tio(tio, rq, md);
table = dm_get_live_table(md, &srcu_idx);
/*
* Must clone a request if this .request_fn DM device
* is stacked on .request_fn device(s).
*/
if (!dm_table_all_blk_mq_devices(table)) {
if (!clone_old_rq(rq, md, tio, gfp_mask)) {
dm_put_live_table(md, srcu_idx);
free_old_rq_tio(tio);
return NULL;
}
}
dm_put_live_table(md, srcu_idx);
return tio;
}
/*
* Called with the queue lock held.
*/
static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
{
struct mapped_device *md = q->queuedata;
struct dm_rq_target_io *tio;
if (unlikely(rq->special)) {
DMWARN("Already has something in rq->special.");
return BLKPREP_KILL;
}
tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
if (!tio)
return BLKPREP_DEFER;
rq->special = tio;
rq->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
/* /*
* Returns: * Returns:
* DM_MAPIO_* : the request has been processed as indicated * DM_MAPIO_* : the request has been processed as indicated
@ -633,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio)
struct request *rq = tio->orig; struct request *rq = tio->orig;
struct request *clone = NULL; struct request *clone = NULL;
if (tio->clone) { r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
clone = tio->clone;
r = ti->type->map_rq(ti, clone, &tio->info);
if (r == DM_MAPIO_DELAY_REQUEUE)
return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
} else {
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
if (r < 0) {
/* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, r);
return r;
}
if (r == DM_MAPIO_REMAPPED &&
setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
ti->type->release_clone_rq(clone);
return DM_MAPIO_REQUEUE;
}
}
switch (r) { switch (r) {
case DM_MAPIO_SUBMITTED: case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */ /* The target has taken the I/O to submit by itself later */
break; break;
case DM_MAPIO_REMAPPED: case DM_MAPIO_REMAPPED:
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
ti->type->release_clone_rq(clone);
return DM_MAPIO_REQUEUE;
}
/* The target has remapped the I/O so dispatch it */ /* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq)); blk_rq_pos(rq));
@ -716,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
dm_get(md); dm_get(md);
} }
static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
{
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
/*
* Must initialize md member of tio, otherwise it won't
* be available in dm_mq_queue_rq.
*/
tio->md = md;
if (md->init_tio_pdu) {
/* target-specific per-io data is immediately after the tio */
tio->info.ptr = tio + 1;
}
return 0;
}
static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
{
return __dm_rq_init_rq(q->rq_alloc_data, rq);
}
static void map_tio_request(struct kthread_work *work) static void map_tio_request(struct kthread_work *work)
{ {
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
@ -814,6 +661,7 @@ static void dm_old_request_fn(struct request_queue *q)
dm_start_request(md, rq); dm_start_request(md, rq);
tio = tio_from_request(rq); tio = tio_from_request(rq);
init_tio(tio, rq, md);
/* Establish tio->ti before queuing work (map_tio_request) */ /* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti; tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work); kthread_queue_work(&md->kworker, &tio->work);
@ -824,10 +672,23 @@ static void dm_old_request_fn(struct request_queue *q)
/* /*
* Fully initialize a .request_fn request-based queue. * Fully initialize a .request_fn request-based queue.
*/ */
int dm_old_init_request_queue(struct mapped_device *md) int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
{ {
struct dm_target *immutable_tgt;
/* Fully initialize the queue */ /* Fully initialize the queue */
if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) md->queue->cmd_size = sizeof(struct dm_rq_target_io);
md->queue->rq_alloc_data = md;
md->queue->request_fn = dm_old_request_fn;
md->queue->init_rq_fn = dm_rq_init_rq;
immutable_tgt = dm_table_get_immutable_target(t);
if (immutable_tgt && immutable_tgt->per_io_data_size) {
/* any target-specific per-io data is immediately after the tio */
md->queue->cmd_size += immutable_tgt->per_io_data_size;
md->init_tio_pdu = true;
}
if (blk_init_allocated_queue(md->queue) < 0)
return -EINVAL; return -EINVAL;
/* disable dm_old_request_fn's merge heuristic by default */ /* disable dm_old_request_fn's merge heuristic by default */
@ -835,7 +696,6 @@ int dm_old_init_request_queue(struct mapped_device *md)
dm_init_normal_md_queue(md); dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done); blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_old_prep_fn);
/* Initialize the request-based DM worker thread */ /* Initialize the request-based DM worker thread */
kthread_init_worker(&md->kworker); kthread_init_worker(&md->kworker);
@ -856,21 +716,7 @@ static int dm_mq_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx, unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node) unsigned int numa_node)
{ {
struct mapped_device *md = data; return __dm_rq_init_rq(data, rq);
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
/*
* Must initialize md member of tio, otherwise it won't
* be available in dm_mq_queue_rq.
*/
tio->md = md;
if (md->init_tio_pdu) {
/* target-specific per-io data is immediately after the tio */
tio->info.ptr = tio + 1;
}
return 0;
} }
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,

View File

@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info {
bool dm_use_blk_mq_default(void); bool dm_use_blk_mq_default(void);
bool dm_use_blk_mq(struct mapped_device *md); bool dm_use_blk_mq(struct mapped_device *md);
int dm_old_init_request_queue(struct mapped_device *md); int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t); int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
void dm_mq_cleanup_mapped_device(struct mapped_device *md); void dm_mq_cleanup_mapped_device(struct mapped_device *md);

View File

@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
if (likely(q)) if (likely(q))
r |= bdi_congested(&q->backing_dev_info, bdi_bits); r |= bdi_congested(q->backing_dev_info, bdi_bits);
else else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s", DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md), dm_device_name(t->md),

View File

@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
return -EIO; return -EIO;
} }
static int io_err_map_rq(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
return -EIO;
}
static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
union map_info *map_context, union map_info *map_context,
struct request **clone) struct request **clone)
@ -161,7 +155,6 @@ static struct target_type error_target = {
.ctr = io_err_ctr, .ctr = io_err_ctr,
.dtr = io_err_dtr, .dtr = io_err_dtr,
.map = io_err_map, .map = io_err_map,
.map_rq = io_err_map_rq,
.clone_and_map_rq = io_err_clone_and_map_rq, .clone_and_map_rq = io_err_clone_and_map_rq,
.release_clone_rq = io_err_release_clone_rq, .release_clone_rq = io_err_release_clone_rq,
.direct_access = io_err_direct_access, .direct_access = io_err_direct_access,

View File

@ -2711,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
return 1; return 1;
q = bdev_get_queue(pt->data_dev->bdev); q = bdev_get_queue(pt->data_dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static void requeue_bios(struct pool *pool) static void requeue_bios(struct pool *pool)

View File

@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE;
*/ */
struct dm_md_mempools { struct dm_md_mempools {
mempool_t *io_pool; mempool_t *io_pool;
mempool_t *rq_pool;
struct bio_set *bs; struct bio_set *bs;
}; };
@ -466,13 +465,16 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (r > 0) { if (r > 0) {
/* /*
* Target determined this ioctl is being issued against * Target determined this ioctl is being issued against a
* a logical partition of the parent bdev; so extra * subset of the parent bdev; require extra privileges.
* validation is needed.
*/ */
r = scsi_verify_blk_ioctl(NULL, cmd); if (!capable(CAP_SYS_RAWIO)) {
if (r) DMWARN_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;
goto out; goto out;
}
} }
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
@ -1314,7 +1316,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* With request-based DM we only need to check the * With request-based DM we only need to check the
* top-level queue for congestion. * top-level queue for congestion.
*/ */
r = md->queue->backing_dev_info.wb.state & bdi_bits; r = md->queue->backing_dev_info->wb.state & bdi_bits;
} else { } else {
map = dm_get_live_table_fast(md); map = dm_get_live_table_fast(md);
if (map) if (map)
@ -1397,7 +1399,7 @@ void dm_init_md_queue(struct mapped_device *md)
* - must do so here (in alloc_dev callchain) before queue is used * - must do so here (in alloc_dev callchain) before queue is used
*/ */
md->queue->queuedata = md; md->queue->queuedata = md;
md->queue->backing_dev_info.congested_data = md; md->queue->backing_dev_info->congested_data = md;
} }
void dm_init_normal_md_queue(struct mapped_device *md) void dm_init_normal_md_queue(struct mapped_device *md)
@ -1408,7 +1410,7 @@ void dm_init_normal_md_queue(struct mapped_device *md)
/* /*
* Initialize aspects of queue that aren't relevant for blk-mq * Initialize aspects of queue that aren't relevant for blk-mq
*/ */
md->queue->backing_dev_info.congested_fn = dm_any_congested; md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
} }
@ -1419,7 +1421,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->kworker_task) if (md->kworker_task)
kthread_stop(md->kworker_task); kthread_stop(md->kworker_task);
mempool_destroy(md->io_pool); mempool_destroy(md->io_pool);
mempool_destroy(md->rq_pool);
if (md->bs) if (md->bs)
bioset_free(md->bs); bioset_free(md->bs);
@ -1595,12 +1596,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out; goto out;
} }
BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool; md->io_pool = p->io_pool;
p->io_pool = NULL; p->io_pool = NULL;
md->rq_pool = p->rq_pool;
p->rq_pool = NULL;
md->bs = p->bs; md->bs = p->bs;
p->bs = NULL; p->bs = NULL;
@ -1777,7 +1776,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) { switch (type) {
case DM_TYPE_REQUEST_BASED: case DM_TYPE_REQUEST_BASED:
r = dm_old_init_request_queue(md); r = dm_old_init_request_queue(md, t);
if (r) { if (r) {
DMERR("Cannot initialize queue for request-based mapped device"); DMERR("Cannot initialize queue for request-based mapped device");
return r; return r;
@ -2493,7 +2492,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
unsigned integrity, unsigned per_io_data_size) unsigned integrity, unsigned per_io_data_size)
{ {
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
struct kmem_cache *cachep = NULL;
unsigned int pool_size = 0; unsigned int pool_size = 0;
unsigned int front_pad; unsigned int front_pad;
@ -2503,20 +2501,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
switch (type) { switch (type) {
case DM_TYPE_BIO_BASED: case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_DAX_BIO_BASED:
cachep = _io_cache;
pool_size = dm_get_reserved_bio_based_ios(); pool_size = dm_get_reserved_bio_based_ios();
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
if (!pools->io_pool)
goto out;
break; break;
case DM_TYPE_REQUEST_BASED: case DM_TYPE_REQUEST_BASED:
cachep = _rq_tio_cache;
pool_size = dm_get_reserved_rq_based_ios();
pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
if (!pools->rq_pool)
goto out;
/* fall through to setup remaining rq-based pools */
case DM_TYPE_MQ_REQUEST_BASED: case DM_TYPE_MQ_REQUEST_BASED:
if (!pool_size) pool_size = dm_get_reserved_rq_based_ios();
pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone); front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */ /* per_io_data_size is used for blk-mq pdu at queue allocation */
break; break;
@ -2524,12 +2518,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
BUG(); BUG();
} }
if (cachep) {
pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
if (!pools->io_pool)
goto out;
}
pools->bs = bioset_create_nobvec(pool_size, front_pad); pools->bs = bioset_create_nobvec(pool_size, front_pad);
if (!pools->bs) if (!pools->bs)
goto out; goto out;
@ -2551,7 +2539,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
return; return;
mempool_destroy(pools->io_pool); mempool_destroy(pools->io_pool);
mempool_destroy(pools->rq_pool);
if (pools->bs) if (pools->bs)
bioset_free(pools->bs); bioset_free(pools->bs);

View File

@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/* /*
* To check whether the target type is request-based or not (bio-based). * To check whether the target type is request-based or not (bio-based).
*/ */
#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ #define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
((t)->type->clone_and_map_rq != NULL))
/* /*
* To check whether the target type is a hybrid (capable of being * To check whether the target type is a hybrid (capable of being

View File

@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits)
for (i = 0; i < mddev->raid_disks && !ret ; i++) { for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
return ret; return ret;

View File

@ -5346,8 +5346,8 @@ int md_run(struct mddev *mddev)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info->congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested; mddev->queue->backing_dev_info->congested_fn = md_congested;
} }
if (pers->sync_request) { if (pers->sync_request) {
if (mddev->kobj.sd && if (mddev->kobj.sd &&
@ -5704,7 +5704,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev); __md_stop_writes(mddev);
__md_stop(mddev); __md_stop(mddev);
mddev->queue->backing_dev_info.congested_fn = NULL; mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */ /* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);

View File

@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) { if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
/* Just like multipath_map, we just check the /* Just like multipath_map, we just check the
* first available device * first available device
*/ */

View File

@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
for (i = 0; i < raid_disks && !ret ; i++) { for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev); struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
return ret; return ret;
} }
@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev)
*/ */
int stripe = mddev->raid_disks * int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE; (mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe; mddev->queue->backing_dev_info->ra_pages = 2* stripe;
} }
dump_zones(mddev); dump_zones(mddev);

View File

@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
* non-congested targets, it can be removed * non-congested targets, it can be removed
*/ */
if ((bits & (1 << WB_async_congested)) || 1) if ((bits & (1 << WB_async_congested)) || 1)
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
else else
ret &= bdi_congested(&q->backing_dev_info, bits); ret &= bdi_congested(q->backing_dev_info, bits);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
@ -1170,10 +1170,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int i, disks; int i, disks;
struct bitmap *bitmap = mddev->bitmap; struct bitmap *bitmap = mddev->bitmap;
unsigned long flags; unsigned long flags;
const int op = bio_op(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_opf &
(REQ_PREFLUSH | REQ_FUA));
struct md_rdev *blocked_rdev; struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb; struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL; struct raid1_plug_cb *plug = NULL;
@ -1389,7 +1385,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
conf->mirrors[i].rdev->data_offset); conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request; mbio->bi_end_io = raid1_end_write_request;
bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); mbio->bi_opf = bio_op(bio) |
(bio->bi_opf & (REQ_SYNC | REQ_PREFLUSH | REQ_FUA));
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
!test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
conf->raid_disks - mddev->degraded > 1) conf->raid_disks - mddev->degraded > 1)

View File

@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) { if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev)
* maybe... * maybe...
*/ */
stripe /= conf->geo.near_copies; stripe /= conf->geo.near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
if (md_integrity_register(mddev)) if (md_integrity_register(mddev))
@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf)
int stripe = conf->geo.raid_disks * int stripe = conf->geo.raid_disks *
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies; stripe /= conf->geo.near_copies;
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
conf->fullsync = 0; conf->fullsync = 0;
} }

View File

@ -6331,10 +6331,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev); mddev_suspend(mddev);
conf->skip_copy = new; conf->skip_copy = new;
if (new) if (new)
mddev->queue->backing_dev_info.capabilities |= mddev->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES; BDI_CAP_STABLE_WRITES;
else else
mddev->queue->backing_dev_info.capabilities &= mddev->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES; ~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev); mddev_resume(mddev);
} }
@ -7153,8 +7153,8 @@ static int raid5_run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded; int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks * int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE); ((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9; chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, chunk_size);
@ -7763,8 +7763,8 @@ static void end_reshape(struct r5conf *conf)
int data_disks = conf->raid_disks - conf->max_degraded; int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9) int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE); / PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
} }
} }

View File

@ -2000,16 +2000,6 @@ static int msb_bd_getgeo(struct block_device *bdev,
return 0; return 0;
} }
static int msb_prepare_req(struct request_queue *q, struct request *req)
{
if (req->cmd_type != REQ_TYPE_FS) {
blk_dump_rq_flags(req, "MS unsupported request");
return BLKPREP_KILL;
}
req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
static void msb_submit_req(struct request_queue *q) static void msb_submit_req(struct request_queue *q)
{ {
struct memstick_dev *card = q->queuedata; struct memstick_dev *card = q->queuedata;
@ -2132,7 +2122,6 @@ static int msb_init_disk(struct memstick_dev *card)
} }
msb->queue->queuedata = card; msb->queue->queuedata = card;
blk_queue_prep_rq(msb->queue, msb_prepare_req);
blk_queue_bounce_limit(msb->queue, limit); blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES); blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);

View File

@ -827,18 +827,6 @@ static void mspro_block_start(struct memstick_dev *card)
spin_unlock_irqrestore(&msb->q_lock, flags); spin_unlock_irqrestore(&msb->q_lock, flags);
} }
static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
{
if (req->cmd_type != REQ_TYPE_FS) {
blk_dump_rq_flags(req, "MSPro unsupported request");
return BLKPREP_KILL;
}
req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
static void mspro_block_submit_req(struct request_queue *q) static void mspro_block_submit_req(struct request_queue *q)
{ {
struct memstick_dev *card = q->queuedata; struct memstick_dev *card = q->queuedata;
@ -1228,7 +1216,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
} }
msb->queue->queuedata = card; msb->queue->queuedata = card;
blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
blk_queue_bounce_limit(msb->queue, limit); blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);

View File

@ -2320,10 +2320,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
SmpPassthroughReply_t *smprep; SmpPassthroughReply_t *smprep;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep)); memcpy(scsi_req(req)->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep); scsi_req(req)->sense_len = sizeof(*smprep);
req->resid_len = 0; scsi_req(req)->resid_len = 0;
rsp->resid_len -= smprep->ResponseDataLength; scsi_req(rsp)->resid_len -= smprep->ResponseDataLength;
} else { } else {
printk(MYIOC_s_ERR_FMT printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n", "%s: smp passthru reply failed to be returned\n",

View File

@ -30,15 +30,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{ {
struct mmc_queue *mq = q->queuedata; struct mmc_queue *mq = q->queuedata;
/*
* We only like normal block requests and discards.
*/
if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
req_op(req) != REQ_OP_SECURE_ERASE) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL; return BLKPREP_KILL;

View File

@ -84,9 +84,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
nsect = blk_rq_cur_bytes(req) >> tr->blkshift; nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = bio_data(req->bio); buf = bio_data(req->bio);
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
if (req_op(req) == REQ_OP_FLUSH) if (req_op(req) == REQ_OP_FLUSH)
return tr->flush(dev); return tr->flush(dev);
@ -94,16 +91,16 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
get_capacity(req->rq_disk)) get_capacity(req->rq_disk))
return -EIO; return -EIO;
if (req_op(req) == REQ_OP_DISCARD) switch (req_op(req)) {
case REQ_OP_DISCARD:
return tr->discard(dev, block, nsect); return tr->discard(dev, block, nsect);
case REQ_OP_READ:
if (rq_data_dir(req) == READ) {
for (; nsect > 0; nsect--, block++, buf += tr->blksize) for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf)) if (tr->readsect(dev, block, buf))
return -EIO; return -EIO;
rq_flush_dcache_pages(req); rq_flush_dcache_pages(req);
return 0; return 0;
} else { case REQ_OP_WRITE:
if (!tr->writesect) if (!tr->writesect)
return -EIO; return -EIO;
@ -112,6 +109,8 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf)) if (tr->writesect(dev, block, buf))
return -EIO; return -EIO;
return 0; return 0;
default:
return -EIO;
} }
} }

Some files were not shown because too many files have changed in this diff Show More