mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 09:40:58 +07:00
Merge branch 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block: (132 commits) doc/cdrom: Trvial documentation error, file not present block_dev: fix kernel-doc in new functions block: add some comments around the bio read-write flags block: mark bio_split_pool static block: Find bio sector offset given idx and offset block: gendisk integrity wrapper block: Switch blk_integrity_compare from bdev to gendisk block: Fix double put in blk_integrity_unregister block: Introduce integrity data ownership flag block: revert part of d7533ad0e132f92e75c1b2eb7c26387b25a583c1 bio.h: Remove unused conditional code block: remove end_{queued|dequeued}_request() block: change elevator to use __blk_end_request() gdrom: change to use __blk_end_request() memstick: change to use __blk_end_request() virtio_blk: change to use __blk_end_request() blktrace: use BLKTRACE_BDEV_SIZE as the name size for setup structure block: add lld busy state exporting interface block: Fix blk_start_queueing() to not kick a stopped queue include blktrace_api.h in headers_install ...
This commit is contained in:
commit
e26feff647
@ -337,7 +337,7 @@ With scatterlists, you use the resulting mapping like this:
|
||||
int i, count = dma_map_sg(dev, sglist, nents, direction);
|
||||
struct scatterlist *sg;
|
||||
|
||||
for (i = 0, sg = sglist; i < count; i++, sg++) {
|
||||
for_each_sg(sglist, sg, count, i) {
|
||||
hw_address[i] = sg_dma_address(sg);
|
||||
hw_len[i] = sg_dma_len(sg);
|
||||
}
|
||||
|
@ -364,6 +364,10 @@ X!Edrivers/pnp/system.c
|
||||
!Eblock/blk-barrier.c
|
||||
!Eblock/blk-tag.c
|
||||
!Iblock/blk-tag.c
|
||||
!Eblock/blk-integrity.c
|
||||
!Iblock/blktrace.c
|
||||
!Iblock/genhd.c
|
||||
!Eblock/genhd.c
|
||||
</chapter>
|
||||
|
||||
<chapter id="chrdev">
|
||||
|
@ -30,12 +30,18 @@ write_expire (in ms)
|
||||
Similar to read_expire mentioned above, but for writes.
|
||||
|
||||
|
||||
fifo_batch
|
||||
fifo_batch (number of requests)
|
||||
----------
|
||||
|
||||
When a read request expires its deadline, we must move some requests from
|
||||
the sorted io scheduler list to the block device dispatch queue. fifo_batch
|
||||
controls how many requests we move.
|
||||
Requests are grouped into ``batches'' of a particular data direction (read or
|
||||
write) which are serviced in increasing sector order. To limit extra seeking,
|
||||
deadline expiries are only checked between batches. fifo_batch controls the
|
||||
maximum number of requests per batch.
|
||||
|
||||
This parameter tunes the balance between per-request latency and aggregate
|
||||
throughput. When low latency is the primary concern, smaller is better (where
|
||||
a value of 1 yields first-come first-served behaviour). Increasing fifo_batch
|
||||
generally improves throughput, at the cost of latency variation.
|
||||
|
||||
|
||||
writes_starved (number of dispatches)
|
||||
|
@ -145,8 +145,7 @@ useful for reading photocds.
|
||||
|
||||
To play an audio CD, you should first unmount and remove any data
|
||||
CDROM. Any of the CDROM player programs should then work (workman,
|
||||
workbone, cdplayer, etc.). Lacking anything else, you could use the
|
||||
cdtester program in Documentation/cdrom/sbpcd.
|
||||
workbone, cdplayer, etc.).
|
||||
|
||||
On a few drives, you can read digital audio directly using a program
|
||||
such as cdda2wav. The only types of drive which I've heard support
|
||||
|
@ -4,8 +4,8 @@
|
||||
|
||||
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
|
||||
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
|
||||
blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o \
|
||||
cmd-filter.o
|
||||
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
|
||||
ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
|
||||
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
|
||||
|
@ -462,7 +462,7 @@ static void as_antic_stop(struct as_data *ad)
|
||||
del_timer(&ad->antic_timer);
|
||||
ad->antic_status = ANTIC_FINISHED;
|
||||
/* see as_work_handler */
|
||||
kblockd_schedule_work(&ad->antic_work);
|
||||
kblockd_schedule_work(ad->q, &ad->antic_work);
|
||||
}
|
||||
}
|
||||
|
||||
@ -483,7 +483,7 @@ static void as_antic_timeout(unsigned long data)
|
||||
aic = ad->io_context->aic;
|
||||
|
||||
ad->antic_status = ANTIC_FINISHED;
|
||||
kblockd_schedule_work(&ad->antic_work);
|
||||
kblockd_schedule_work(q, &ad->antic_work);
|
||||
|
||||
if (aic->ttime_samples == 0) {
|
||||
/* process anticipated on has exited or timed out*/
|
||||
@ -745,6 +745,14 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
|
||||
*/
|
||||
static int as_can_anticipate(struct as_data *ad, struct request *rq)
|
||||
{
|
||||
#if 0 /* disable for now, we need to check tag level as well */
|
||||
/*
|
||||
* SSD device without seek penalty, disable idling
|
||||
*/
|
||||
if (blk_queue_nonrot(ad->q)) axman
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
if (!ad->io_context)
|
||||
/*
|
||||
* Last request submitted was a write
|
||||
@ -844,7 +852,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
|
||||
if (ad->changed_batch && ad->nr_dispatched == 1) {
|
||||
ad->current_batch_expires = jiffies +
|
||||
ad->batch_expire[ad->batch_data_dir];
|
||||
kblockd_schedule_work(&ad->antic_work);
|
||||
kblockd_schedule_work(q, &ad->antic_work);
|
||||
ad->changed_batch = 0;
|
||||
|
||||
if (ad->batch_data_dir == REQ_SYNC)
|
||||
|
@ -293,7 +293,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
||||
bio->bi_end_io = bio_end_empty_barrier;
|
||||
bio->bi_private = &wait;
|
||||
bio->bi_bdev = bdev;
|
||||
submit_bio(1 << BIO_RW_BARRIER, bio);
|
||||
submit_bio(WRITE_BARRIER, bio);
|
||||
|
||||
wait_for_completion(&wait);
|
||||
|
||||
@ -315,3 +315,73 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||
|
||||
static void blkdev_discard_end_io(struct bio *bio, int err)
|
||||
{
|
||||
if (err) {
|
||||
if (err == -EOPNOTSUPP)
|
||||
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
|
||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
}
|
||||
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_issue_discard - queue a discard
|
||||
* @bdev: blockdev to issue discard for
|
||||
* @sector: start sector
|
||||
* @nr_sects: number of sectors to discard
|
||||
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||
*
|
||||
* Description:
|
||||
* Issue a discard request for the sectors in question. Does not wait.
|
||||
*/
|
||||
int blkdev_issue_discard(struct block_device *bdev,
|
||||
sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
|
||||
{
|
||||
struct request_queue *q;
|
||||
struct bio *bio;
|
||||
int ret = 0;
|
||||
|
||||
if (bdev->bd_disk == NULL)
|
||||
return -ENXIO;
|
||||
|
||||
q = bdev_get_queue(bdev);
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
|
||||
if (!q->prepare_discard_fn)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
while (nr_sects && !ret) {
|
||||
bio = bio_alloc(gfp_mask, 0);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
bio->bi_end_io = blkdev_discard_end_io;
|
||||
bio->bi_bdev = bdev;
|
||||
|
||||
bio->bi_sector = sector;
|
||||
|
||||
if (nr_sects > q->max_hw_sectors) {
|
||||
bio->bi_size = q->max_hw_sectors << 9;
|
||||
nr_sects -= q->max_hw_sectors;
|
||||
sector += q->max_hw_sectors;
|
||||
} else {
|
||||
bio->bi_size = nr_sects << 9;
|
||||
nr_sects = 0;
|
||||
}
|
||||
bio_get(bio);
|
||||
submit_bio(DISCARD_BARRIER, bio);
|
||||
|
||||
/* Check if it failed immediately */
|
||||
if (bio_flagged(bio, BIO_EOPNOTSUPP))
|
||||
ret = -EOPNOTSUPP;
|
||||
else if (!bio_flagged(bio, BIO_UPTODATE))
|
||||
ret = -EIO;
|
||||
bio_put(bio);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_discard);
|
||||
|
629
block/blk-core.c
629
block/blk-core.c
File diff suppressed because it is too large
Load Diff
@ -16,7 +16,7 @@
|
||||
/**
|
||||
* blk_end_sync_rq - executes a completion event on a request
|
||||
* @rq: request to complete
|
||||
* @error: end io status of the request
|
||||
* @error: end I/O status of the request
|
||||
*/
|
||||
static void blk_end_sync_rq(struct request *rq, int error)
|
||||
{
|
||||
@ -41,7 +41,7 @@ static void blk_end_sync_rq(struct request *rq, int error)
|
||||
* @done: I/O completion handler
|
||||
*
|
||||
* Description:
|
||||
* Insert a fully prepared request at the back of the io scheduler queue
|
||||
* Insert a fully prepared request at the back of the I/O scheduler queue
|
||||
* for execution. Don't wait for completion.
|
||||
*/
|
||||
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
||||
* @at_head: insert request at head or tail of queue
|
||||
*
|
||||
* Description:
|
||||
* Insert a fully prepared request at the back of the io scheduler queue
|
||||
* Insert a fully prepared request at the back of the I/O scheduler queue
|
||||
* for execution and wait for completion.
|
||||
*/
|
||||
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
|
||||
|
@ -108,51 +108,51 @@ int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
|
||||
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
|
||||
|
||||
/**
|
||||
* blk_integrity_compare - Compare integrity profile of two block devices
|
||||
* @b1: Device to compare
|
||||
* @b2: Device to compare
|
||||
* blk_integrity_compare - Compare integrity profile of two disks
|
||||
* @gd1: Disk to compare
|
||||
* @gd2: Disk to compare
|
||||
*
|
||||
* Description: Meta-devices like DM and MD need to verify that all
|
||||
* sub-devices use the same integrity format before advertising to
|
||||
* upper layers that they can send/receive integrity metadata. This
|
||||
* function can be used to check whether two block devices have
|
||||
* function can be used to check whether two gendisk devices have
|
||||
* compatible integrity formats.
|
||||
*/
|
||||
int blk_integrity_compare(struct block_device *bd1, struct block_device *bd2)
|
||||
int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
|
||||
{
|
||||
struct blk_integrity *b1 = bd1->bd_disk->integrity;
|
||||
struct blk_integrity *b2 = bd2->bd_disk->integrity;
|
||||
struct blk_integrity *b1 = gd1->integrity;
|
||||
struct blk_integrity *b2 = gd2->integrity;
|
||||
|
||||
BUG_ON(bd1->bd_disk == NULL);
|
||||
BUG_ON(bd2->bd_disk == NULL);
|
||||
if (!b1 && !b2)
|
||||
return 0;
|
||||
|
||||
if (!b1 || !b2)
|
||||
return 0;
|
||||
return -1;
|
||||
|
||||
if (b1->sector_size != b2->sector_size) {
|
||||
printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__,
|
||||
bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
|
||||
gd1->disk_name, gd2->disk_name,
|
||||
b1->sector_size, b2->sector_size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (b1->tuple_size != b2->tuple_size) {
|
||||
printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
|
||||
bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
|
||||
gd1->disk_name, gd2->disk_name,
|
||||
b1->tuple_size, b2->tuple_size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
|
||||
printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
|
||||
bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
|
||||
gd1->disk_name, gd2->disk_name,
|
||||
b1->tag_size, b2->tag_size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (strcmp(b1->name, b2->name)) {
|
||||
printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
|
||||
bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
|
||||
gd1->disk_name, gd2->disk_name,
|
||||
b1->name, b2->name);
|
||||
return -1;
|
||||
}
|
||||
@ -331,7 +331,8 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
|
||||
return -1;
|
||||
|
||||
if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
|
||||
&disk->dev.kobj, "%s", "integrity")) {
|
||||
&disk_to_dev(disk)->kobj,
|
||||
"%s", "integrity")) {
|
||||
kmem_cache_free(integrity_cachep, bi);
|
||||
return -1;
|
||||
}
|
||||
@ -375,7 +376,7 @@ void blk_integrity_unregister(struct gendisk *disk)
|
||||
|
||||
kobject_uevent(&bi->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&bi->kobj);
|
||||
kobject_put(&disk->dev.kobj);
|
||||
kmem_cache_free(integrity_cachep, bi);
|
||||
disk->integrity = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_integrity_unregister);
|
||||
|
@ -41,10 +41,10 @@ static int __blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
|
||||
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
void __user *ubuf, unsigned int len)
|
||||
struct rq_map_data *map_data, void __user *ubuf,
|
||||
unsigned int len, int null_mapped, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long uaddr;
|
||||
unsigned int alignment;
|
||||
struct bio *bio, *orig_bio;
|
||||
int reading, ret;
|
||||
|
||||
@ -55,15 +55,17 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
* direct dma. else, set up kernel bounce buffers
|
||||
*/
|
||||
uaddr = (unsigned long) ubuf;
|
||||
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
|
||||
if (!(uaddr & alignment) && !(len & alignment))
|
||||
bio = bio_map_user(q, NULL, uaddr, len, reading);
|
||||
if (blk_rq_aligned(q, ubuf, len) && !map_data)
|
||||
bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
|
||||
else
|
||||
bio = bio_copy_user(q, uaddr, len, reading);
|
||||
bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
|
||||
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
if (null_mapped)
|
||||
bio->bi_flags |= (1 << BIO_NULL_MAPPED);
|
||||
|
||||
orig_bio = bio;
|
||||
blk_queue_bounce(q, &bio);
|
||||
|
||||
@ -85,17 +87,19 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
|
||||
* blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
* @rq: request structure to fill
|
||||
* @map_data: pointer to the rq_map_data holding pages (if necessary)
|
||||
* @ubuf: the user buffer
|
||||
* @len: length of user data
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* Description:
|
||||
* Data will be mapped directly for zero copy io, if possible. Otherwise
|
||||
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
|
||||
* a kernel bounce buffer is used.
|
||||
*
|
||||
* A matching blk_rq_unmap_user() must be issued at the end of io, while
|
||||
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
|
||||
* still in process context.
|
||||
*
|
||||
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
|
||||
@ -105,16 +109,22 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
* unmapping.
|
||||
*/
|
||||
int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
void __user *ubuf, unsigned long len)
|
||||
struct rq_map_data *map_data, void __user *ubuf,
|
||||
unsigned long len, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long bytes_read = 0;
|
||||
struct bio *bio = NULL;
|
||||
int ret;
|
||||
int ret, null_mapped = 0;
|
||||
|
||||
if (len > (q->max_hw_sectors << 9))
|
||||
return -EINVAL;
|
||||
if (!len || !ubuf)
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
if (!ubuf) {
|
||||
if (!map_data || rq_data_dir(rq) != READ)
|
||||
return -EINVAL;
|
||||
null_mapped = 1;
|
||||
}
|
||||
|
||||
while (bytes_read != len) {
|
||||
unsigned long map_len, end, start;
|
||||
@ -132,7 +142,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
if (end - start > BIO_MAX_PAGES)
|
||||
map_len -= PAGE_SIZE;
|
||||
|
||||
ret = __blk_rq_map_user(q, rq, ubuf, map_len);
|
||||
ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
|
||||
null_mapped, gfp_mask);
|
||||
if (ret < 0)
|
||||
goto unmap_rq;
|
||||
if (!bio)
|
||||
@ -154,18 +165,20 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
EXPORT_SYMBOL(blk_rq_map_user);
|
||||
|
||||
/**
|
||||
* blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
|
||||
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
* @rq: request to map data to
|
||||
* @map_data: pointer to the rq_map_data holding pages (if necessary)
|
||||
* @iov: pointer to the iovec
|
||||
* @iov_count: number of elements in the iovec
|
||||
* @len: I/O byte count
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* Description:
|
||||
* Data will be mapped directly for zero copy io, if possible. Otherwise
|
||||
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
|
||||
* a kernel bounce buffer is used.
|
||||
*
|
||||
* A matching blk_rq_unmap_user() must be issued at the end of io, while
|
||||
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
|
||||
* still in process context.
|
||||
*
|
||||
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
|
||||
@ -175,7 +188,8 @@ EXPORT_SYMBOL(blk_rq_map_user);
|
||||
* unmapping.
|
||||
*/
|
||||
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||
struct sg_iovec *iov, int iov_count, unsigned int len)
|
||||
struct rq_map_data *map_data, struct sg_iovec *iov,
|
||||
int iov_count, unsigned int len, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *bio;
|
||||
int i, read = rq_data_dir(rq) == READ;
|
||||
@ -193,10 +207,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||
}
|
||||
}
|
||||
|
||||
if (unaligned || (q->dma_pad_mask & len))
|
||||
bio = bio_copy_user_iov(q, iov, iov_count, read);
|
||||
if (unaligned || (q->dma_pad_mask & len) || map_data)
|
||||
bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
|
||||
gfp_mask);
|
||||
else
|
||||
bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
|
||||
bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
|
||||
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
@ -216,6 +231,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||
rq->buffer = rq->data = NULL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_map_user_iov);
|
||||
|
||||
/**
|
||||
* blk_rq_unmap_user - unmap a request with user data
|
||||
@ -224,7 +240,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||
* Description:
|
||||
* Unmap a rq previously mapped by blk_rq_map_user(). The caller must
|
||||
* supply the original rq->bio from the blk_rq_map_user() return, since
|
||||
* the io completion may have changed rq->bio.
|
||||
* the I/O completion may have changed rq->bio.
|
||||
*/
|
||||
int blk_rq_unmap_user(struct bio *bio)
|
||||
{
|
||||
@ -250,7 +266,7 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
* @rq: request to fill
|
||||
* @kbuf: the kernel buffer
|
||||
@ -264,8 +280,6 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
||||
unsigned int len, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long kaddr;
|
||||
unsigned int alignment;
|
||||
int reading = rq_data_dir(rq) == READ;
|
||||
int do_copy = 0;
|
||||
struct bio *bio;
|
||||
@ -275,11 +289,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
||||
if (!len || !kbuf)
|
||||
return -EINVAL;
|
||||
|
||||
kaddr = (unsigned long)kbuf;
|
||||
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
|
||||
do_copy = ((kaddr & alignment) || (len & alignment) ||
|
||||
object_is_on_stack(kbuf));
|
||||
|
||||
do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
|
||||
if (do_copy)
|
||||
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
|
||||
else
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
void blk_recalc_rq_sectors(struct request *rq, int nsect)
|
||||
{
|
||||
if (blk_fs_request(rq)) {
|
||||
if (blk_fs_request(rq) || blk_discard_rq(rq)) {
|
||||
rq->hard_sector += nsect;
|
||||
rq->hard_nr_sectors -= nsect;
|
||||
|
||||
@ -41,12 +41,9 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
|
||||
void blk_recalc_rq_segments(struct request *rq)
|
||||
{
|
||||
int nr_phys_segs;
|
||||
int nr_hw_segs;
|
||||
unsigned int phys_size;
|
||||
unsigned int hw_size;
|
||||
struct bio_vec *bv, *bvprv = NULL;
|
||||
int seg_size;
|
||||
int hw_seg_size;
|
||||
int cluster;
|
||||
struct req_iterator iter;
|
||||
int high, highprv = 1;
|
||||
@ -56,8 +53,8 @@ void blk_recalc_rq_segments(struct request *rq)
|
||||
return;
|
||||
|
||||
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||
hw_seg_size = seg_size = 0;
|
||||
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
|
||||
seg_size = 0;
|
||||
phys_size = nr_phys_segs = 0;
|
||||
rq_for_each_segment(bv, rq, iter) {
|
||||
/*
|
||||
* the trick here is making sure that a high page is never
|
||||
@ -66,7 +63,7 @@ void blk_recalc_rq_segments(struct request *rq)
|
||||
*/
|
||||
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
|
||||
if (high || highprv)
|
||||
goto new_hw_segment;
|
||||
goto new_segment;
|
||||
if (cluster) {
|
||||
if (seg_size + bv->bv_len > q->max_segment_size)
|
||||
goto new_segment;
|
||||
@ -74,40 +71,19 @@ void blk_recalc_rq_segments(struct request *rq)
|
||||
goto new_segment;
|
||||
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
|
||||
goto new_segment;
|
||||
if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
|
||||
goto new_hw_segment;
|
||||
|
||||
seg_size += bv->bv_len;
|
||||
hw_seg_size += bv->bv_len;
|
||||
bvprv = bv;
|
||||
continue;
|
||||
}
|
||||
new_segment:
|
||||
if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
|
||||
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
|
||||
hw_seg_size += bv->bv_len;
|
||||
else {
|
||||
new_hw_segment:
|
||||
if (nr_hw_segs == 1 &&
|
||||
hw_seg_size > rq->bio->bi_hw_front_size)
|
||||
rq->bio->bi_hw_front_size = hw_seg_size;
|
||||
hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
|
||||
nr_hw_segs++;
|
||||
}
|
||||
|
||||
nr_phys_segs++;
|
||||
bvprv = bv;
|
||||
seg_size = bv->bv_len;
|
||||
highprv = high;
|
||||
}
|
||||
|
||||
if (nr_hw_segs == 1 &&
|
||||
hw_seg_size > rq->bio->bi_hw_front_size)
|
||||
rq->bio->bi_hw_front_size = hw_seg_size;
|
||||
if (hw_seg_size > rq->biotail->bi_hw_back_size)
|
||||
rq->biotail->bi_hw_back_size = hw_seg_size;
|
||||
rq->nr_phys_segments = nr_phys_segs;
|
||||
rq->nr_hw_segments = nr_hw_segs;
|
||||
}
|
||||
|
||||
void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
||||
@ -120,7 +96,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
||||
blk_recalc_rq_segments(&rq);
|
||||
bio->bi_next = nxt;
|
||||
bio->bi_phys_segments = rq.nr_phys_segments;
|
||||
bio->bi_hw_segments = rq.nr_hw_segments;
|
||||
bio->bi_flags |= (1 << BIO_SEG_VALID);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_recount_segments);
|
||||
@ -131,13 +106,17 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
||||
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
|
||||
return 0;
|
||||
|
||||
if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
|
||||
return 0;
|
||||
if (bio->bi_size + nxt->bi_size > q->max_segment_size)
|
||||
return 0;
|
||||
|
||||
if (!bio_has_data(bio))
|
||||
return 1;
|
||||
|
||||
if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* bio and nxt are contigous in memory, check if the queue allows
|
||||
* bio and nxt are contiguous in memory; check if the queue allows
|
||||
* these two to be merged into one
|
||||
*/
|
||||
if (BIO_SEG_BOUNDARY(q, bio, nxt))
|
||||
@ -146,22 +125,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
|
||||
struct bio *nxt)
|
||||
{
|
||||
if (!bio_flagged(bio, BIO_SEG_VALID))
|
||||
blk_recount_segments(q, bio);
|
||||
if (!bio_flagged(nxt, BIO_SEG_VALID))
|
||||
blk_recount_segments(q, nxt);
|
||||
if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
|
||||
BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
|
||||
return 0;
|
||||
if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* map a request to scatterlist, return number of sg entries setup. Caller
|
||||
* must make sure sg can hold rq->nr_phys_segments entries
|
||||
@ -275,10 +238,9 @@ static inline int ll_new_hw_segment(struct request_queue *q,
|
||||
struct request *req,
|
||||
struct bio *bio)
|
||||
{
|
||||
int nr_hw_segs = bio_hw_segments(q, bio);
|
||||
int nr_phys_segs = bio_phys_segments(q, bio);
|
||||
|
||||
if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
|
||||
if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
|
||||
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
|
||||
req->cmd_flags |= REQ_NOMERGE;
|
||||
if (req == q->last_merge)
|
||||
@ -290,7 +252,6 @@ static inline int ll_new_hw_segment(struct request_queue *q,
|
||||
* This will form the start of a new hw segment. Bump both
|
||||
* counters.
|
||||
*/
|
||||
req->nr_hw_segments += nr_hw_segs;
|
||||
req->nr_phys_segments += nr_phys_segs;
|
||||
return 1;
|
||||
}
|
||||
@ -299,7 +260,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
||||
struct bio *bio)
|
||||
{
|
||||
unsigned short max_sectors;
|
||||
int len;
|
||||
|
||||
if (unlikely(blk_pc_request(req)))
|
||||
max_sectors = q->max_hw_sectors;
|
||||
@ -316,19 +276,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
||||
blk_recount_segments(q, req->biotail);
|
||||
if (!bio_flagged(bio, BIO_SEG_VALID))
|
||||
blk_recount_segments(q, bio);
|
||||
len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
|
||||
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
|
||||
&& !BIOVEC_VIRT_OVERSIZE(len)) {
|
||||
int mergeable = ll_new_mergeable(q, req, bio);
|
||||
|
||||
if (mergeable) {
|
||||
if (req->nr_hw_segments == 1)
|
||||
req->bio->bi_hw_front_size = len;
|
||||
if (bio->bi_hw_segments == 1)
|
||||
bio->bi_hw_back_size = len;
|
||||
}
|
||||
return mergeable;
|
||||
}
|
||||
|
||||
return ll_new_hw_segment(q, req, bio);
|
||||
}
|
||||
@ -337,7 +284,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
||||
struct bio *bio)
|
||||
{
|
||||
unsigned short max_sectors;
|
||||
int len;
|
||||
|
||||
if (unlikely(blk_pc_request(req)))
|
||||
max_sectors = q->max_hw_sectors;
|
||||
@ -351,23 +297,10 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
||||
q->last_merge = NULL;
|
||||
return 0;
|
||||
}
|
||||
len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
|
||||
if (!bio_flagged(bio, BIO_SEG_VALID))
|
||||
blk_recount_segments(q, bio);
|
||||
if (!bio_flagged(req->bio, BIO_SEG_VALID))
|
||||
blk_recount_segments(q, req->bio);
|
||||
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
|
||||
!BIOVEC_VIRT_OVERSIZE(len)) {
|
||||
int mergeable = ll_new_mergeable(q, req, bio);
|
||||
|
||||
if (mergeable) {
|
||||
if (bio->bi_hw_segments == 1)
|
||||
bio->bi_hw_front_size = len;
|
||||
if (req->nr_hw_segments == 1)
|
||||
req->biotail->bi_hw_back_size = len;
|
||||
}
|
||||
return mergeable;
|
||||
}
|
||||
|
||||
return ll_new_hw_segment(q, req, bio);
|
||||
}
|
||||
@ -376,7 +309,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
||||
struct request *next)
|
||||
{
|
||||
int total_phys_segments;
|
||||
int total_hw_segments;
|
||||
|
||||
/*
|
||||
* First check if the either of the requests are re-queued
|
||||
@ -398,26 +330,11 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
||||
if (total_phys_segments > q->max_phys_segments)
|
||||
return 0;
|
||||
|
||||
total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
|
||||
if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
|
||||
int len = req->biotail->bi_hw_back_size +
|
||||
next->bio->bi_hw_front_size;
|
||||
/*
|
||||
* propagate the combined length to the end of the requests
|
||||
*/
|
||||
if (req->nr_hw_segments == 1)
|
||||
req->bio->bi_hw_front_size = len;
|
||||
if (next->nr_hw_segments == 1)
|
||||
next->biotail->bi_hw_back_size = len;
|
||||
total_hw_segments--;
|
||||
}
|
||||
|
||||
if (total_hw_segments > q->max_hw_segments)
|
||||
if (total_phys_segments > q->max_hw_segments)
|
||||
return 0;
|
||||
|
||||
/* Merge is OK... */
|
||||
req->nr_phys_segments = total_phys_segments;
|
||||
req->nr_hw_segments = total_hw_segments;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -470,17 +387,21 @@ static int attempt_merge(struct request_queue *q, struct request *req,
|
||||
elv_merge_requests(q, req, next);
|
||||
|
||||
if (req->rq_disk) {
|
||||
struct hd_struct *part
|
||||
= get_part(req->rq_disk, req->sector);
|
||||
disk_round_stats(req->rq_disk);
|
||||
req->rq_disk->in_flight--;
|
||||
if (part) {
|
||||
part_round_stats(part);
|
||||
part->in_flight--;
|
||||
}
|
||||
struct hd_struct *part;
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, req->sector);
|
||||
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part);
|
||||
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
||||
req->ioprio = ioprio_best(req->ioprio, next->ioprio);
|
||||
if (blk_rq_cpu_valid(next))
|
||||
req->cpu = next->cpu;
|
||||
|
||||
__blk_put_request(q, next);
|
||||
return 1;
|
||||
|
@ -32,6 +32,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_prep_rq);
|
||||
|
||||
/**
|
||||
* blk_queue_set_discard - set a discard_sectors function for queue
|
||||
* @q: queue
|
||||
* @dfn: prepare_discard function
|
||||
*
|
||||
* It's possible for a queue to register a discard callback which is used
|
||||
* to transform a discard request into the appropriate type for the
|
||||
* hardware. If none is registered, then discard requests are failed
|
||||
* with %EOPNOTSUPP.
|
||||
*
|
||||
*/
|
||||
void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
|
||||
{
|
||||
q->prepare_discard_fn = dfn;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_set_discard);
|
||||
|
||||
/**
|
||||
* blk_queue_merge_bvec - set a merge_bvec function for queue
|
||||
* @q: queue
|
||||
@ -60,6 +77,24 @@ void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_softirq_done);
|
||||
|
||||
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
|
||||
{
|
||||
q->rq_timeout = timeout;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
|
||||
|
||||
void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
|
||||
{
|
||||
q->rq_timed_out_fn = fn;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
|
||||
|
||||
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
|
||||
{
|
||||
q->lld_busy_fn = fn;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
|
||||
|
||||
/**
|
||||
* blk_queue_make_request - define an alternate make_request function for a device
|
||||
* @q: the request queue for the device to be affected
|
||||
@ -127,7 +162,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
|
||||
* Different hardware can have different requirements as to what pages
|
||||
* it can do I/O directly to. A low level driver can call
|
||||
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
|
||||
* buffers for doing I/O to pages residing above @page.
|
||||
* buffers for doing I/O to pages residing above @dma_addr.
|
||||
**/
|
||||
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
|
||||
{
|
||||
@ -212,7 +247,7 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
|
||||
* Description:
|
||||
* Enables a low level driver to set an upper limit on the number of
|
||||
* hw data segments in a request. This would be the largest number of
|
||||
* address/length pairs the host adapter can actually give as once
|
||||
* address/length pairs the host adapter can actually give at once
|
||||
* to the device.
|
||||
**/
|
||||
void blk_queue_max_hw_segments(struct request_queue *q,
|
||||
@ -393,7 +428,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
|
||||
* @mask: alignment mask
|
||||
*
|
||||
* description:
|
||||
* set required memory and length aligment for direct dma transactions.
|
||||
* set required memory and length alignment for direct dma transactions.
|
||||
* this is used when buiding direct io requests for the queue.
|
||||
*
|
||||
**/
|
||||
@ -409,7 +444,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
|
||||
* @mask: alignment mask
|
||||
*
|
||||
* description:
|
||||
* update required memory and length aligment for direct dma transactions.
|
||||
* update required memory and length alignment for direct dma transactions.
|
||||
* If the requested alignment is larger than the current alignment, then
|
||||
* the current queue alignment is updated to the new value, otherwise it
|
||||
* is left alone. The design of this is to allow multiple objects
|
||||
|
175
block/blk-softirq.c
Normal file
175
block/blk-softirq.c
Normal file
@ -0,0 +1,175 @@
|
||||
/*
|
||||
* Functions related to softirq rq completions
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include "blk.h"
|
||||
|
||||
static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
|
||||
|
||||
/*
|
||||
* Softirq action handler - move entries to local list and loop over them
|
||||
* while passing them to the queue registered handler.
|
||||
*/
|
||||
static void blk_done_softirq(struct softirq_action *h)
|
||||
{
|
||||
struct list_head *cpu_list, local_list;
|
||||
|
||||
local_irq_disable();
|
||||
cpu_list = &__get_cpu_var(blk_cpu_done);
|
||||
list_replace_init(cpu_list, &local_list);
|
||||
local_irq_enable();
|
||||
|
||||
while (!list_empty(&local_list)) {
|
||||
struct request *rq;
|
||||
|
||||
rq = list_entry(local_list.next, struct request, csd.list);
|
||||
list_del_init(&rq->csd.list);
|
||||
rq->q->softirq_done_fn(rq);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
|
||||
static void trigger_softirq(void *data)
|
||||
{
|
||||
struct request *rq = data;
|
||||
unsigned long flags;
|
||||
struct list_head *list;
|
||||
|
||||
local_irq_save(flags);
|
||||
list = &__get_cpu_var(blk_cpu_done);
|
||||
list_add_tail(&rq->csd.list, list);
|
||||
|
||||
if (list->next == &rq->csd.list)
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup and invoke a run of 'trigger_softirq' on the given cpu.
|
||||
*/
|
||||
static int raise_blk_irq(int cpu, struct request *rq)
|
||||
{
|
||||
if (cpu_online(cpu)) {
|
||||
struct call_single_data *data = &rq->csd;
|
||||
|
||||
data->func = trigger_softirq;
|
||||
data->info = rq;
|
||||
data->flags = 0;
|
||||
|
||||
__smp_call_function_single(cpu, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
|
||||
static int raise_blk_irq(int cpu, struct request *rq)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __cpuinit blk_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
/*
|
||||
* If a CPU goes away, splice its entries to the current CPU
|
||||
* and trigger a run of the softirq
|
||||
*/
|
||||
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
||||
int cpu = (unsigned long) hcpu;
|
||||
|
||||
local_irq_disable();
|
||||
list_splice_init(&per_cpu(blk_cpu_done, cpu),
|
||||
&__get_cpu_var(blk_cpu_done));
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata blk_cpu_notifier = {
|
||||
.notifier_call = blk_cpu_notify,
|
||||
};
|
||||
|
||||
void __blk_complete_request(struct request *req)
|
||||
{
|
||||
struct request_queue *q = req->q;
|
||||
unsigned long flags;
|
||||
int ccpu, cpu, group_cpu;
|
||||
|
||||
BUG_ON(!q->softirq_done_fn);
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
group_cpu = blk_cpu_to_group(cpu);
|
||||
|
||||
/*
|
||||
* Select completion CPU
|
||||
*/
|
||||
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
|
||||
ccpu = req->cpu;
|
||||
else
|
||||
ccpu = cpu;
|
||||
|
||||
if (ccpu == cpu || ccpu == group_cpu) {
|
||||
struct list_head *list;
|
||||
do_local:
|
||||
list = &__get_cpu_var(blk_cpu_done);
|
||||
list_add_tail(&req->csd.list, list);
|
||||
|
||||
/*
|
||||
* if the list only contains our just added request,
|
||||
* signal a raise of the softirq. If there are already
|
||||
* entries there, someone already raised the irq but it
|
||||
* hasn't run yet.
|
||||
*/
|
||||
if (list->next == &req->csd.list)
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
} else if (raise_blk_irq(ccpu, req))
|
||||
goto do_local;
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_complete_request - end I/O on a request
|
||||
* @req: the request being processed
|
||||
*
|
||||
* Description:
|
||||
* Ends all I/O on a request. It does not handle partial completions,
|
||||
* unless the driver actually implements this in its completion callback
|
||||
* through requeueing. The actual completion happens out-of-order,
|
||||
* through a softirq handler. The user must have registered a completion
|
||||
* callback through blk_queue_softirq_done().
|
||||
**/
|
||||
void blk_complete_request(struct request *req)
|
||||
{
|
||||
if (unlikely(blk_should_fake_timeout(req->q)))
|
||||
return;
|
||||
if (!blk_mark_rq_complete(req))
|
||||
__blk_complete_request(req);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_complete_request);
|
||||
|
||||
__init int blk_softirq_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
|
||||
|
||||
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
|
||||
register_hotcpu_notifier(&blk_cpu_notifier);
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(blk_softirq_init);
|
@ -156,6 +156,30 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
|
||||
{
|
||||
unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
|
||||
|
||||
return queue_var_show(set != 0, page);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
|
||||
{
|
||||
ssize_t ret = -EINVAL;
|
||||
#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
|
||||
unsigned long val;
|
||||
|
||||
ret = queue_var_store(&val, page, count);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (val)
|
||||
queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct queue_sysfs_entry queue_requests_entry = {
|
||||
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
|
||||
@ -197,6 +221,12 @@ static struct queue_sysfs_entry queue_nomerges_entry = {
|
||||
.store = queue_nomerges_store,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_rq_affinity_entry = {
|
||||
.attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_rq_affinity_show,
|
||||
.store = queue_rq_affinity_store,
|
||||
};
|
||||
|
||||
static struct attribute *default_attrs[] = {
|
||||
&queue_requests_entry.attr,
|
||||
&queue_ra_entry.attr,
|
||||
@ -205,6 +235,7 @@ static struct attribute *default_attrs[] = {
|
||||
&queue_iosched_entry.attr,
|
||||
&queue_hw_sector_size_entry.attr,
|
||||
&queue_nomerges_entry.attr,
|
||||
&queue_rq_affinity_entry.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -310,7 +341,7 @@ int blk_register_queue(struct gendisk *disk)
|
||||
if (!q->request_fn)
|
||||
return 0;
|
||||
|
||||
ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
|
||||
ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj),
|
||||
"%s", "queue");
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -339,6 +370,6 @@ void blk_unregister_queue(struct gendisk *disk)
|
||||
|
||||
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->kobj);
|
||||
kobject_put(&disk->dev.kobj);
|
||||
kobject_put(&disk_to_dev(disk)->kobj);
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ EXPORT_SYMBOL(blk_queue_find_tag);
|
||||
* __blk_free_tags - release a given set of tag maintenance info
|
||||
* @bqt: the tag map to free
|
||||
*
|
||||
* Tries to free the specified @bqt@. Returns true if it was
|
||||
* Tries to free the specified @bqt. Returns true if it was
|
||||
* actually freed and false if there are still references using it
|
||||
*/
|
||||
static int __blk_free_tags(struct blk_queue_tag *bqt)
|
||||
@ -78,7 +78,7 @@ void __blk_queue_free_tags(struct request_queue *q)
|
||||
* blk_free_tags - release a given set of tag maintenance info
|
||||
* @bqt: the tag map to free
|
||||
*
|
||||
* For externally managed @bqt@ frees the map. Callers of this
|
||||
* For externally managed @bqt frees the map. Callers of this
|
||||
* function must guarantee to have released all the queues that
|
||||
* might have been using this tag map.
|
||||
*/
|
||||
@ -94,7 +94,7 @@ EXPORT_SYMBOL(blk_free_tags);
|
||||
* @q: the request queue for the device
|
||||
*
|
||||
* Notes:
|
||||
* This is used to disabled tagged queuing to a device, yet leave
|
||||
* This is used to disable tagged queuing to a device, yet leave
|
||||
* queue in function.
|
||||
**/
|
||||
void blk_queue_free_tags(struct request_queue *q)
|
||||
@ -271,7 +271,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
|
||||
* @rq: the request that has completed
|
||||
*
|
||||
* Description:
|
||||
* Typically called when end_that_request_first() returns 0, meaning
|
||||
* Typically called when end_that_request_first() returns %0, meaning
|
||||
* all transfers have been done for a request. It's important to call
|
||||
* this function before end_that_request_last(), as that will put the
|
||||
* request back on the free list thus corrupting the internal tag list.
|
||||
@ -337,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
|
||||
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct blk_queue_tag *bqt = q->queue_tags;
|
||||
unsigned max_depth, offset;
|
||||
int tag;
|
||||
|
||||
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
|
||||
@ -350,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
|
||||
/*
|
||||
* Protect against shared tag maps, as we may not have exclusive
|
||||
* access to the tag map.
|
||||
*
|
||||
* We reserve a few tags just for sync IO, since we don't want
|
||||
* to starve sync IO on behalf of flooding async IO.
|
||||
*/
|
||||
max_depth = bqt->max_depth;
|
||||
if (rq_is_sync(rq))
|
||||
offset = 0;
|
||||
else
|
||||
offset = max_depth >> 2;
|
||||
|
||||
do {
|
||||
tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
|
||||
if (tag >= bqt->max_depth)
|
||||
tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
|
||||
if (tag >= max_depth)
|
||||
return 1;
|
||||
|
||||
} while (test_and_set_bit_lock(tag, bqt->tag_map));
|
||||
|
238
block/blk-timeout.c
Normal file
238
block/blk-timeout.c
Normal file
@ -0,0 +1,238 @@
|
||||
/*
|
||||
* Functions related to generic timeout handling of requests.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/fault-inject.h>
|
||||
|
||||
#include "blk.h"
|
||||
|
||||
#ifdef CONFIG_FAIL_IO_TIMEOUT
|
||||
|
||||
static DECLARE_FAULT_ATTR(fail_io_timeout);
|
||||
|
||||
static int __init setup_fail_io_timeout(char *str)
|
||||
{
|
||||
return setup_fault_attr(&fail_io_timeout, str);
|
||||
}
|
||||
__setup("fail_io_timeout=", setup_fail_io_timeout);
|
||||
|
||||
int blk_should_fake_timeout(struct request_queue *q)
|
||||
{
|
||||
if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
|
||||
return 0;
|
||||
|
||||
return should_fail(&fail_io_timeout, 1);
|
||||
}
|
||||
|
||||
static int __init fail_io_timeout_debugfs(void)
|
||||
{
|
||||
return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
|
||||
}
|
||||
|
||||
late_initcall(fail_io_timeout_debugfs);
|
||||
|
||||
ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
|
||||
|
||||
return sprintf(buf, "%d\n", set != 0);
|
||||
}
|
||||
|
||||
ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
int val;
|
||||
|
||||
if (count) {
|
||||
struct request_queue *q = disk->queue;
|
||||
char *p = (char *) buf;
|
||||
|
||||
val = simple_strtoul(p, &p, 10);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (val)
|
||||
queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FAIL_IO_TIMEOUT */
|
||||
|
||||
/*
|
||||
* blk_delete_timer - Delete/cancel timer for a given function.
|
||||
* @req: request that we are canceling timer for
|
||||
*
|
||||
*/
|
||||
void blk_delete_timer(struct request *req)
|
||||
{
|
||||
struct request_queue *q = req->q;
|
||||
|
||||
/*
|
||||
* Nothing to detach
|
||||
*/
|
||||
if (!q->rq_timed_out_fn || !req->deadline)
|
||||
return;
|
||||
|
||||
list_del_init(&req->timeout_list);
|
||||
|
||||
if (list_empty(&q->timeout_list))
|
||||
del_timer(&q->timeout);
|
||||
}
|
||||
|
||||
static void blk_rq_timed_out(struct request *req)
|
||||
{
|
||||
struct request_queue *q = req->q;
|
||||
enum blk_eh_timer_return ret;
|
||||
|
||||
ret = q->rq_timed_out_fn(req);
|
||||
switch (ret) {
|
||||
case BLK_EH_HANDLED:
|
||||
__blk_complete_request(req);
|
||||
break;
|
||||
case BLK_EH_RESET_TIMER:
|
||||
blk_clear_rq_complete(req);
|
||||
blk_add_timer(req);
|
||||
break;
|
||||
case BLK_EH_NOT_HANDLED:
|
||||
/*
|
||||
* LLD handles this for now but in the future
|
||||
* we can send a request msg to abort the command
|
||||
* and we can move more of the generic scsi eh code to
|
||||
* the blk layer.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "block: bad eh return: %d\n", ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void blk_rq_timed_out_timer(unsigned long data)
|
||||
{
|
||||
struct request_queue *q = (struct request_queue *) data;
|
||||
unsigned long flags, uninitialized_var(next), next_set = 0;
|
||||
struct request *rq, *tmp;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
|
||||
if (time_after_eq(jiffies, rq->deadline)) {
|
||||
list_del_init(&rq->timeout_list);
|
||||
|
||||
/*
|
||||
* Check if we raced with end io completion
|
||||
*/
|
||||
if (blk_mark_rq_complete(rq))
|
||||
continue;
|
||||
blk_rq_timed_out(rq);
|
||||
}
|
||||
if (!next_set) {
|
||||
next = rq->deadline;
|
||||
next_set = 1;
|
||||
} else if (time_after(next, rq->deadline))
|
||||
next = rq->deadline;
|
||||
}
|
||||
|
||||
if (next_set && !list_empty(&q->timeout_list))
|
||||
mod_timer(&q->timeout, round_jiffies(next));
|
||||
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_abort_request -- Request request recovery for the specified command
|
||||
* @req: pointer to the request of interest
|
||||
*
|
||||
* This function requests that the block layer start recovery for the
|
||||
* request by deleting the timer and calling the q's timeout function.
|
||||
* LLDDs who implement their own error recovery MAY ignore the timeout
|
||||
* event if they generated blk_abort_req. Must hold queue lock.
|
||||
*/
|
||||
void blk_abort_request(struct request *req)
|
||||
{
|
||||
if (blk_mark_rq_complete(req))
|
||||
return;
|
||||
blk_delete_timer(req);
|
||||
blk_rq_timed_out(req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_abort_request);
|
||||
|
||||
/**
|
||||
* blk_add_timer - Start timeout timer for a single request
|
||||
* @req: request that is about to start running.
|
||||
*
|
||||
* Notes:
|
||||
* Each request has its own timer, and as it is added to the queue, we
|
||||
* set up the timer. When the request completes, we cancel the timer.
|
||||
*/
|
||||
void blk_add_timer(struct request *req)
|
||||
{
|
||||
struct request_queue *q = req->q;
|
||||
unsigned long expiry;
|
||||
|
||||
if (!q->rq_timed_out_fn)
|
||||
return;
|
||||
|
||||
BUG_ON(!list_empty(&req->timeout_list));
|
||||
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
|
||||
|
||||
if (req->timeout)
|
||||
req->deadline = jiffies + req->timeout;
|
||||
else {
|
||||
req->deadline = jiffies + q->rq_timeout;
|
||||
/*
|
||||
* Some LLDs, like scsi, peek at the timeout to prevent
|
||||
* a command from being retried forever.
|
||||
*/
|
||||
req->timeout = q->rq_timeout;
|
||||
}
|
||||
list_add_tail(&req->timeout_list, &q->timeout_list);
|
||||
|
||||
/*
|
||||
* If the timer isn't already pending or this timeout is earlier
|
||||
* than an existing one, modify the timer. Round to next nearest
|
||||
* second.
|
||||
*/
|
||||
expiry = round_jiffies(req->deadline);
|
||||
|
||||
/*
|
||||
* We use ->deadline == 0 to detect whether a timer was added or
|
||||
* not, so just increase to next jiffy for that specific case
|
||||
*/
|
||||
if (unlikely(!req->deadline))
|
||||
req->deadline = 1;
|
||||
|
||||
if (!timer_pending(&q->timeout) ||
|
||||
time_before(expiry, q->timeout.expires))
|
||||
mod_timer(&q->timeout, expiry);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_abort_queue -- Abort all request on given queue
|
||||
* @queue: pointer to queue
|
||||
*
|
||||
*/
|
||||
void blk_abort_queue(struct request_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct request *rq, *tmp;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
||||
elv_abort_queue(q);
|
||||
|
||||
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
|
||||
blk_abort_request(rq);
|
||||
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_abort_queue);
|
48
block/blk.h
48
block/blk.h
@ -17,6 +17,42 @@ void __blk_queue_free_tags(struct request_queue *q);
|
||||
|
||||
void blk_unplug_work(struct work_struct *work);
|
||||
void blk_unplug_timeout(unsigned long data);
|
||||
void blk_rq_timed_out_timer(unsigned long data);
|
||||
void blk_delete_timer(struct request *);
|
||||
void blk_add_timer(struct request *);
|
||||
|
||||
/*
|
||||
* Internal atomic flags for request handling
|
||||
*/
|
||||
enum rq_atomic_flags {
|
||||
REQ_ATOM_COMPLETE = 0,
|
||||
};
|
||||
|
||||
/*
|
||||
* EH timer and IO completion will both attempt to 'grab' the request, make
|
||||
* sure that only one of them suceeds
|
||||
*/
|
||||
static inline int blk_mark_rq_complete(struct request *rq)
|
||||
{
|
||||
return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
|
||||
}
|
||||
|
||||
static inline void blk_clear_rq_complete(struct request *rq)
|
||||
{
|
||||
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIL_IO_TIMEOUT
|
||||
int blk_should_fake_timeout(struct request_queue *);
|
||||
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
|
||||
ssize_t part_timeout_store(struct device *, struct device_attribute *,
|
||||
const char *, size_t);
|
||||
#else
|
||||
static inline int blk_should_fake_timeout(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct io_context *current_io_context(gfp_t gfp_flags, int node);
|
||||
|
||||
@ -59,4 +95,16 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
|
||||
|
||||
#endif /* BLK_DEV_INTEGRITY */
|
||||
|
||||
static inline int blk_cpu_to_group(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_MC
|
||||
cpumask_t mask = cpu_coregroup_map(cpu);
|
||||
return first_cpu(mask);
|
||||
#elif defined(CONFIG_SCHED_SMT)
|
||||
return first_cpu(per_cpu(cpu_sibling_map, cpu));
|
||||
#else
|
||||
return cpu;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -111,23 +111,9 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
|
||||
*/
|
||||
static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
|
||||
|
||||
/*
|
||||
* Bio action bits of interest
|
||||
*/
|
||||
static u32 bio_act[9] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD), 0, 0, 0, BLK_TC_ACT(BLK_TC_META) };
|
||||
|
||||
/*
|
||||
* More could be added as needed, taking care to increment the decrementer
|
||||
* to get correct indexing
|
||||
*/
|
||||
#define trace_barrier_bit(rw) \
|
||||
(((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
|
||||
#define trace_sync_bit(rw) \
|
||||
(((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
|
||||
#define trace_ahead_bit(rw) \
|
||||
(((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
|
||||
#define trace_meta_bit(rw) \
|
||||
(((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
|
||||
/* The ilog2() calls fall out because they're constant */
|
||||
#define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
|
||||
(ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
|
||||
|
||||
/*
|
||||
* The worker for the various blk_add_trace*() types. Fills out a
|
||||
@ -147,10 +133,11 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
||||
return;
|
||||
|
||||
what |= ddir_act[rw & WRITE];
|
||||
what |= bio_act[trace_barrier_bit(rw)];
|
||||
what |= bio_act[trace_sync_bit(rw)];
|
||||
what |= bio_act[trace_ahead_bit(rw)];
|
||||
what |= bio_act[trace_meta_bit(rw)];
|
||||
what |= MASK_TC_BIT(rw, BARRIER);
|
||||
what |= MASK_TC_BIT(rw, SYNC);
|
||||
what |= MASK_TC_BIT(rw, AHEAD);
|
||||
what |= MASK_TC_BIT(rw, META);
|
||||
what |= MASK_TC_BIT(rw, DISCARD);
|
||||
|
||||
pid = tsk->pid;
|
||||
if (unlikely(act_log_check(bt, what, sector, pid)))
|
||||
@ -382,7 +369,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
if (!buts->buf_size || !buts->buf_nr)
|
||||
return -EINVAL;
|
||||
|
||||
strcpy(buts->name, name);
|
||||
strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
|
||||
buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
|
||||
|
||||
/*
|
||||
* some device names have larger paths - convert the slashes
|
||||
|
@ -283,7 +283,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
|
||||
next_rq->cmd_type = rq->cmd_type;
|
||||
|
||||
dxferp = (void*)(unsigned long)hdr->din_xferp;
|
||||
ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
|
||||
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
|
||||
hdr->din_xfer_len, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@ -298,7 +299,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
|
||||
dxfer_len = 0;
|
||||
|
||||
if (dxfer_len) {
|
||||
ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
|
||||
ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
|
||||
GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ static int cfq_slice_idle = HZ / 125;
|
||||
#define CFQ_MIN_TT (2)
|
||||
|
||||
#define CFQ_SLICE_SCALE (5)
|
||||
#define CFQ_HW_QUEUE_MIN (5)
|
||||
|
||||
#define RQ_CIC(rq) \
|
||||
((struct cfq_io_context *) (rq)->elevator_private)
|
||||
@ -86,7 +87,14 @@ struct cfq_data {
|
||||
|
||||
int rq_in_driver;
|
||||
int sync_flight;
|
||||
|
||||
/*
|
||||
* queue-depth detection
|
||||
*/
|
||||
int rq_queued;
|
||||
int hw_tag;
|
||||
int hw_tag_samples;
|
||||
int rq_in_driver_peak;
|
||||
|
||||
/*
|
||||
* idle window management
|
||||
@ -244,7 +252,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
|
||||
{
|
||||
if (cfqd->busy_queues) {
|
||||
cfq_log(cfqd, "schedule dispatch");
|
||||
kblockd_schedule_work(&cfqd->unplug_work);
|
||||
kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
|
||||
}
|
||||
}
|
||||
|
||||
@ -654,15 +662,6 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
|
||||
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
|
||||
cfqd->rq_in_driver);
|
||||
|
||||
/*
|
||||
* If the depth is larger 1, it really could be queueing. But lets
|
||||
* make the mark a little higher - idling could still be good for
|
||||
* low queueing, and a low queueing number could also just indicate
|
||||
* a SCSI mid layer like behaviour where limit+1 is often seen.
|
||||
*/
|
||||
if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
|
||||
cfqd->hw_tag = 1;
|
||||
|
||||
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
|
||||
}
|
||||
|
||||
@ -686,6 +685,7 @@ static void cfq_remove_request(struct request *rq)
|
||||
list_del_init(&rq->queuelist);
|
||||
cfq_del_rq_rb(rq);
|
||||
|
||||
cfqq->cfqd->rq_queued--;
|
||||
if (rq_is_meta(rq)) {
|
||||
WARN_ON(!cfqq->meta_pending);
|
||||
cfqq->meta_pending--;
|
||||
@ -878,6 +878,14 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
||||
struct cfq_io_context *cic;
|
||||
unsigned long sl;
|
||||
|
||||
/*
|
||||
* SSD device without seek penalty, disable idling. But only do so
|
||||
* for devices that support queuing, otherwise we still have a problem
|
||||
* with sync vs async workloads.
|
||||
*/
|
||||
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
|
||||
return;
|
||||
|
||||
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
|
||||
WARN_ON(cfq_cfqq_slice_new(cfqq));
|
||||
|
||||
@ -1833,6 +1841,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
{
|
||||
struct cfq_io_context *cic = RQ_CIC(rq);
|
||||
|
||||
cfqd->rq_queued++;
|
||||
if (rq_is_meta(rq))
|
||||
cfqq->meta_pending++;
|
||||
|
||||
@ -1880,6 +1889,31 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
|
||||
cfq_rq_enqueued(cfqd, cfqq, rq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update hw_tag based on peak queue depth over 50 samples under
|
||||
* sufficient load.
|
||||
*/
|
||||
static void cfq_update_hw_tag(struct cfq_data *cfqd)
|
||||
{
|
||||
if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
|
||||
cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
|
||||
|
||||
if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
|
||||
cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
|
||||
return;
|
||||
|
||||
if (cfqd->hw_tag_samples++ < 50)
|
||||
return;
|
||||
|
||||
if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
|
||||
cfqd->hw_tag = 1;
|
||||
else
|
||||
cfqd->hw_tag = 0;
|
||||
|
||||
cfqd->hw_tag_samples = 0;
|
||||
cfqd->rq_in_driver_peak = 0;
|
||||
}
|
||||
|
||||
static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
||||
@ -1890,6 +1924,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||
now = jiffies;
|
||||
cfq_log_cfqq(cfqd, cfqq, "complete");
|
||||
|
||||
cfq_update_hw_tag(cfqd);
|
||||
|
||||
WARN_ON(!cfqd->rq_in_driver);
|
||||
WARN_ON(!cfqq->dispatched);
|
||||
cfqd->rq_in_driver--;
|
||||
@ -2200,6 +2236,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||
cfqd->cfq_slice[1] = cfq_slice_sync;
|
||||
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
|
||||
cfqd->cfq_slice_idle = cfq_slice_idle;
|
||||
cfqd->hw_tag = 1;
|
||||
|
||||
return cfqd;
|
||||
}
|
||||
|
@ -211,14 +211,10 @@ int blk_register_filter(struct gendisk *disk)
|
||||
{
|
||||
int ret;
|
||||
struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
|
||||
struct kobject *parent = kobject_get(disk->holder_dir->parent);
|
||||
|
||||
if (!parent)
|
||||
return -ENODEV;
|
||||
|
||||
ret = kobject_init_and_add(&filter->kobj, &rcf_ktype, parent,
|
||||
ret = kobject_init_and_add(&filter->kobj, &rcf_ktype,
|
||||
&disk_to_dev(disk)->kobj,
|
||||
"%s", "cmd_filter");
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -231,7 +227,6 @@ void blk_unregister_filter(struct gendisk *disk)
|
||||
struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
|
||||
|
||||
kobject_put(&filter->kobj);
|
||||
kobject_put(disk->holder_dir->parent);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_unregister_filter);
|
||||
#endif
|
||||
|
@ -788,6 +788,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||
return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
|
||||
case BLKFLSBUF:
|
||||
case BLKROSET:
|
||||
case BLKDISCARD:
|
||||
/*
|
||||
* the ones below are implemented in blkdev_locked_ioctl,
|
||||
* but we call blkdev_ioctl, which gets the lock for us
|
||||
|
@ -33,7 +33,7 @@ struct deadline_data {
|
||||
*/
|
||||
struct rb_root sort_list[2];
|
||||
struct list_head fifo_list[2];
|
||||
|
||||
|
||||
/*
|
||||
* next in sort order. read, write or both are NULL
|
||||
*/
|
||||
@ -53,7 +53,11 @@ struct deadline_data {
|
||||
|
||||
static void deadline_move_request(struct deadline_data *, struct request *);
|
||||
|
||||
#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
|
||||
static inline struct rb_root *
|
||||
deadline_rb_root(struct deadline_data *dd, struct request *rq)
|
||||
{
|
||||
return &dd->sort_list[rq_data_dir(rq)];
|
||||
}
|
||||
|
||||
/*
|
||||
* get the request after `rq' in sector-sorted order
|
||||
@ -72,15 +76,11 @@ deadline_latter_request(struct request *rq)
|
||||
static void
|
||||
deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
|
||||
{
|
||||
struct rb_root *root = RQ_RB_ROOT(dd, rq);
|
||||
struct rb_root *root = deadline_rb_root(dd, rq);
|
||||
struct request *__alias;
|
||||
|
||||
retry:
|
||||
__alias = elv_rb_add(root, rq);
|
||||
if (unlikely(__alias)) {
|
||||
while (unlikely(__alias = elv_rb_add(root, rq)))
|
||||
deadline_move_request(dd, __alias);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -91,7 +91,7 @@ deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
|
||||
if (dd->next_rq[data_dir] == rq)
|
||||
dd->next_rq[data_dir] = deadline_latter_request(rq);
|
||||
|
||||
elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
|
||||
elv_rb_del(deadline_rb_root(dd, rq), rq);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
|
||||
deadline_add_rq_rb(dd, rq);
|
||||
|
||||
/*
|
||||
* set expire time (only used for reads) and add to fifo list
|
||||
* set expire time and add to fifo list
|
||||
*/
|
||||
rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
|
||||
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
|
||||
@ -162,7 +162,7 @@ static void deadline_merged_request(struct request_queue *q,
|
||||
* if the merge was a front merge, we need to reposition request
|
||||
*/
|
||||
if (type == ELEVATOR_FRONT_MERGE) {
|
||||
elv_rb_del(RQ_RB_ROOT(dd, req), req);
|
||||
elv_rb_del(deadline_rb_root(dd, req), req);
|
||||
deadline_add_rq_rb(dd, req);
|
||||
}
|
||||
}
|
||||
@ -212,7 +212,7 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
|
||||
dd->next_rq[WRITE] = NULL;
|
||||
dd->next_rq[data_dir] = deadline_latter_request(rq);
|
||||
|
||||
dd->last_sector = rq->sector + rq->nr_sectors;
|
||||
dd->last_sector = rq_end_sector(rq);
|
||||
|
||||
/*
|
||||
* take it off the sort and fifo list, move
|
||||
@ -222,7 +222,7 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
|
||||
}
|
||||
|
||||
/*
|
||||
* deadline_check_fifo returns 0 if there are no expired reads on the fifo,
|
||||
* deadline_check_fifo returns 0 if there are no expired requests on the fifo,
|
||||
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
|
||||
*/
|
||||
static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
|
||||
@ -258,17 +258,9 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
|
||||
else
|
||||
rq = dd->next_rq[READ];
|
||||
|
||||
if (rq) {
|
||||
/* we have a "next request" */
|
||||
|
||||
if (dd->last_sector != rq->sector)
|
||||
/* end the batch on a non sequential request */
|
||||
dd->batching += dd->fifo_batch;
|
||||
|
||||
if (dd->batching < dd->fifo_batch)
|
||||
/* we are still entitled to batch */
|
||||
goto dispatch_request;
|
||||
}
|
||||
if (rq && dd->batching < dd->fifo_batch)
|
||||
/* we have a next request are still entitled to batch */
|
||||
goto dispatch_request;
|
||||
|
||||
/*
|
||||
* at this point we are not running a batch. select the appropriate
|
||||
|
@ -34,8 +34,9 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include "blk.h"
|
||||
|
||||
static DEFINE_SPINLOCK(elv_list_lock);
|
||||
static LIST_HEAD(elv_list);
|
||||
@ -74,6 +75,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
if (!rq_mergeable(rq))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Don't merge file system requests and discard requests
|
||||
*/
|
||||
if (bio_discard(bio) != bio_discard(rq->bio))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* different data direction or already started, don't merge
|
||||
*/
|
||||
@ -438,6 +445,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
|
||||
list_for_each_prev(entry, &q->queue_head) {
|
||||
struct request *pos = list_entry_rq(entry);
|
||||
|
||||
if (blk_discard_rq(rq) != blk_discard_rq(pos))
|
||||
break;
|
||||
if (rq_data_dir(rq) != rq_data_dir(pos))
|
||||
break;
|
||||
if (pos->cmd_flags & stop_flags)
|
||||
@ -607,7 +616,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
BUG_ON(!blk_fs_request(rq));
|
||||
BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
|
||||
rq->cmd_flags |= REQ_SORTED;
|
||||
q->nr_sorted++;
|
||||
if (rq_mergeable(rq)) {
|
||||
@ -692,7 +701,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
||||
* this request is scheduling boundary, update
|
||||
* end_sector
|
||||
*/
|
||||
if (blk_fs_request(rq)) {
|
||||
if (blk_fs_request(rq) || blk_discard_rq(rq)) {
|
||||
q->end_sector = rq_end_sector(rq);
|
||||
q->boundary_rq = rq;
|
||||
}
|
||||
@ -745,7 +754,7 @@ struct request *elv_next_request(struct request_queue *q)
|
||||
* not ever see it.
|
||||
*/
|
||||
if (blk_empty_barrier(rq)) {
|
||||
end_queued_request(rq, 1);
|
||||
__blk_end_request(rq, 0, blk_rq_bytes(rq));
|
||||
continue;
|
||||
}
|
||||
if (!(rq->cmd_flags & REQ_STARTED)) {
|
||||
@ -764,6 +773,12 @@ struct request *elv_next_request(struct request_queue *q)
|
||||
*/
|
||||
rq->cmd_flags |= REQ_STARTED;
|
||||
blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
|
||||
|
||||
/*
|
||||
* We are now handing the request to the hardware,
|
||||
* add the timeout handler
|
||||
*/
|
||||
blk_add_timer(rq);
|
||||
}
|
||||
|
||||
if (!q->boundary_rq || q->boundary_rq == rq) {
|
||||
@ -782,7 +797,6 @@ struct request *elv_next_request(struct request_queue *q)
|
||||
* device can handle
|
||||
*/
|
||||
rq->nr_phys_segments++;
|
||||
rq->nr_hw_segments++;
|
||||
}
|
||||
|
||||
if (!q->prep_rq_fn)
|
||||
@ -805,14 +819,13 @@ struct request *elv_next_request(struct request_queue *q)
|
||||
* so that we don't add it again
|
||||
*/
|
||||
--rq->nr_phys_segments;
|
||||
--rq->nr_hw_segments;
|
||||
}
|
||||
|
||||
rq = NULL;
|
||||
break;
|
||||
} else if (ret == BLKPREP_KILL) {
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
end_queued_request(rq, 0);
|
||||
__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
|
||||
} else {
|
||||
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
|
||||
break;
|
||||
@ -901,6 +914,19 @@ int elv_may_queue(struct request_queue *q, int rw)
|
||||
return ELV_MQUEUE_MAY;
|
||||
}
|
||||
|
||||
void elv_abort_queue(struct request_queue *q)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
while (!list_empty(&q->queue_head)) {
|
||||
rq = list_entry_rq(q->queue_head.next);
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
blk_add_trace_rq(q, rq, BLK_TA_ABORT);
|
||||
__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(elv_abort_queue);
|
||||
|
||||
void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
elevator_t *e = q->elevator;
|
||||
|
977
block/genhd.c
977
block/genhd.c
File diff suppressed because it is too large
Load Diff
126
block/ioctl.c
126
block/ioctl.c
@ -12,11 +12,12 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|
||||
{
|
||||
struct block_device *bdevp;
|
||||
struct gendisk *disk;
|
||||
struct hd_struct *part;
|
||||
struct blkpg_ioctl_arg a;
|
||||
struct blkpg_partition p;
|
||||
struct disk_part_iter piter;
|
||||
long long start, length;
|
||||
int part;
|
||||
int i;
|
||||
int partno;
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
@ -28,8 +29,8 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|
||||
disk = bdev->bd_disk;
|
||||
if (bdev != bdev->bd_contains)
|
||||
return -EINVAL;
|
||||
part = p.pno;
|
||||
if (part <= 0 || part >= disk->minors)
|
||||
partno = p.pno;
|
||||
if (partno <= 0)
|
||||
return -EINVAL;
|
||||
switch (a.op) {
|
||||
case BLKPG_ADD_PARTITION:
|
||||
@ -43,36 +44,37 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|
||||
|| pstart < 0 || plength < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
/* partition number in use? */
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
if (disk->part[part - 1]) {
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
/* overlap? */
|
||||
for (i = 0; i < disk->minors - 1; i++) {
|
||||
struct hd_struct *s = disk->part[i];
|
||||
|
||||
if (!s)
|
||||
continue;
|
||||
if (!(start+length <= s->start_sect ||
|
||||
start >= s->start_sect + s->nr_sects)) {
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
|
||||
/* overlap? */
|
||||
disk_part_iter_init(&piter, disk,
|
||||
DISK_PITER_INCL_EMPTY);
|
||||
while ((part = disk_part_iter_next(&piter))) {
|
||||
if (!(start + length <= part->start_sect ||
|
||||
start >= part->start_sect + part->nr_sects)) {
|
||||
disk_part_iter_exit(&piter);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
disk_part_iter_exit(&piter);
|
||||
|
||||
/* all seems OK */
|
||||
err = add_partition(disk, part, start, length, ADDPART_FLAG_NONE);
|
||||
err = add_partition(disk, partno, start, length,
|
||||
ADDPART_FLAG_NONE);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
return err;
|
||||
case BLKPG_DEL_PARTITION:
|
||||
if (!disk->part[part-1])
|
||||
part = disk_get_part(disk, partno);
|
||||
if (!part)
|
||||
return -ENXIO;
|
||||
if (disk->part[part - 1]->nr_sects == 0)
|
||||
return -ENXIO;
|
||||
bdevp = bdget_disk(disk, part);
|
||||
|
||||
bdevp = bdget(part_devt(part));
|
||||
disk_put_part(part);
|
||||
if (!bdevp)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&bdevp->bd_mutex);
|
||||
if (bdevp->bd_openers) {
|
||||
mutex_unlock(&bdevp->bd_mutex);
|
||||
@ -84,7 +86,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|
||||
invalidate_bdev(bdevp);
|
||||
|
||||
mutex_lock_nested(&bdev->bd_mutex, 1);
|
||||
delete_partition(disk, part);
|
||||
delete_partition(disk, partno);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
mutex_unlock(&bdevp->bd_mutex);
|
||||
bdput(bdevp);
|
||||
@ -100,7 +102,7 @@ static int blkdev_reread_part(struct block_device *bdev)
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
int res;
|
||||
|
||||
if (disk->minors == 1 || bdev != bdev->bd_contains)
|
||||
if (!disk_partitionable(disk) || bdev != bdev->bd_contains)
|
||||
return -EINVAL;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
@ -111,6 +113,69 @@ static int blkdev_reread_part(struct block_device *bdev)
|
||||
return res;
|
||||
}
|
||||
|
||||
static void blk_ioc_discard_endio(struct bio *bio, int err)
|
||||
{
|
||||
if (err) {
|
||||
if (err == -EOPNOTSUPP)
|
||||
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
|
||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
}
|
||||
complete(bio->bi_private);
|
||||
}
|
||||
|
||||
static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
|
||||
uint64_t len)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
int ret = 0;
|
||||
|
||||
if (start & 511)
|
||||
return -EINVAL;
|
||||
if (len & 511)
|
||||
return -EINVAL;
|
||||
start >>= 9;
|
||||
len >>= 9;
|
||||
|
||||
if (start + len > (bdev->bd_inode->i_size >> 9))
|
||||
return -EINVAL;
|
||||
|
||||
if (!q->prepare_discard_fn)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
while (len && !ret) {
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc(GFP_KERNEL, 0);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
bio->bi_end_io = blk_ioc_discard_endio;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_private = &wait;
|
||||
bio->bi_sector = start;
|
||||
|
||||
if (len > q->max_hw_sectors) {
|
||||
bio->bi_size = q->max_hw_sectors << 9;
|
||||
len -= q->max_hw_sectors;
|
||||
start += q->max_hw_sectors;
|
||||
} else {
|
||||
bio->bi_size = len << 9;
|
||||
len = 0;
|
||||
}
|
||||
submit_bio(DISCARD_NOBARRIER, bio);
|
||||
|
||||
wait_for_completion(&wait);
|
||||
|
||||
if (bio_flagged(bio, BIO_EOPNOTSUPP))
|
||||
ret = -EOPNOTSUPP;
|
||||
else if (!bio_flagged(bio, BIO_UPTODATE))
|
||||
ret = -EIO;
|
||||
bio_put(bio);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int put_ushort(unsigned long arg, unsigned short val)
|
||||
{
|
||||
return put_user(val, (unsigned short __user *)arg);
|
||||
@ -258,6 +323,19 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
|
||||
set_device_ro(bdev, n);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
|
||||
case BLKDISCARD: {
|
||||
uint64_t range[2];
|
||||
|
||||
if (!(file->f_mode & FMODE_WRITE))
|
||||
return -EBADF;
|
||||
|
||||
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
|
||||
return -EFAULT;
|
||||
|
||||
return blk_ioctl_discard(bdev, range[0], range[1]);
|
||||
}
|
||||
|
||||
case HDIO_GETGEO: {
|
||||
struct hd_geometry geo;
|
||||
|
||||
|
@ -185,6 +185,7 @@ void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
|
||||
__set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
|
||||
__set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
|
||||
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
|
||||
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults);
|
||||
|
||||
@ -313,11 +314,12 @@ static int sg_io(struct file *file, struct request_queue *q,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
|
||||
hdr->dxfer_len);
|
||||
ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
|
||||
hdr->dxfer_len, GFP_KERNEL);
|
||||
kfree(iov);
|
||||
} else if (hdr->dxfer_len)
|
||||
ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
|
||||
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -33,6 +33,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/pci.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
@ -459,29 +460,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
|
||||
* RETURNS:
|
||||
* EH_HANDLED or EH_NOT_HANDLED
|
||||
*/
|
||||
enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
|
||||
enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct ata_port *ap = ata_shost_to_port(host);
|
||||
unsigned long flags;
|
||||
struct ata_queued_cmd *qc;
|
||||
enum scsi_eh_timer_return ret;
|
||||
enum blk_eh_timer_return ret;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
if (ap->ops->error_handler) {
|
||||
ret = EH_NOT_HANDLED;
|
||||
ret = BLK_EH_NOT_HANDLED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = EH_HANDLED;
|
||||
ret = BLK_EH_HANDLED;
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc) {
|
||||
WARN_ON(qc->scsicmd != cmd);
|
||||
qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
|
||||
qc->err_mask |= AC_ERR_TIMEOUT;
|
||||
ret = EH_NOT_HANDLED;
|
||||
ret = BLK_EH_NOT_HANDLED;
|
||||
}
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
|
||||
@ -833,7 +834,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
|
||||
* Note that ATA_QCFLAG_FAILED is unconditionally set after
|
||||
* this function completes.
|
||||
*/
|
||||
scsi_req_abort_cmd(qc->scsicmd);
|
||||
blk_abort_request(qc->scsicmd->request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1085,6 +1085,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
|
||||
|
||||
blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
|
||||
} else {
|
||||
if (ata_id_is_ssd(dev->id))
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
|
||||
sdev->request_queue);
|
||||
|
||||
/* ATA devices must be sector aligned */
|
||||
blk_queue_update_dma_alignment(sdev->request_queue,
|
||||
ATA_SECT_SIZE - 1);
|
||||
|
@ -155,7 +155,7 @@ extern int ata_bus_probe(struct ata_port *ap);
|
||||
/* libata-eh.c */
|
||||
extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
|
||||
extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
|
||||
extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
|
||||
extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
|
||||
extern void ata_scsi_error(struct Scsi_Host *host);
|
||||
extern void ata_port_wait_eh(struct ata_port *ap);
|
||||
extern void ata_eh_fastdrain_timerfn(unsigned long arg);
|
||||
|
@ -54,7 +54,7 @@ struct driver_private {
|
||||
*/
|
||||
struct class_private {
|
||||
struct kset class_subsys;
|
||||
struct list_head class_devices;
|
||||
struct klist class_devices;
|
||||
struct list_head class_interfaces;
|
||||
struct kset class_dirs;
|
||||
struct mutex class_mutex;
|
||||
|
@ -135,6 +135,20 @@ static void remove_class_attrs(struct class *cls)
|
||||
}
|
||||
}
|
||||
|
||||
static void klist_class_dev_get(struct klist_node *n)
|
||||
{
|
||||
struct device *dev = container_of(n, struct device, knode_class);
|
||||
|
||||
get_device(dev);
|
||||
}
|
||||
|
||||
static void klist_class_dev_put(struct klist_node *n)
|
||||
{
|
||||
struct device *dev = container_of(n, struct device, knode_class);
|
||||
|
||||
put_device(dev);
|
||||
}
|
||||
|
||||
int __class_register(struct class *cls, struct lock_class_key *key)
|
||||
{
|
||||
struct class_private *cp;
|
||||
@ -145,7 +159,7 @@ int __class_register(struct class *cls, struct lock_class_key *key)
|
||||
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
|
||||
if (!cp)
|
||||
return -ENOMEM;
|
||||
INIT_LIST_HEAD(&cp->class_devices);
|
||||
klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
|
||||
INIT_LIST_HEAD(&cp->class_interfaces);
|
||||
kset_init(&cp->class_dirs);
|
||||
__mutex_init(&cp->class_mutex, "struct class mutex", key);
|
||||
@ -268,6 +282,71 @@ char *make_class_name(const char *name, struct kobject *kobj)
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* class_dev_iter_init - initialize class device iterator
|
||||
* @iter: class iterator to initialize
|
||||
* @class: the class we wanna iterate over
|
||||
* @start: the device to start iterating from, if any
|
||||
* @type: device_type of the devices to iterate over, NULL for all
|
||||
*
|
||||
* Initialize class iterator @iter such that it iterates over devices
|
||||
* of @class. If @start is set, the list iteration will start there,
|
||||
* otherwise if it is NULL, the iteration starts at the beginning of
|
||||
* the list.
|
||||
*/
|
||||
void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
|
||||
struct device *start, const struct device_type *type)
|
||||
{
|
||||
struct klist_node *start_knode = NULL;
|
||||
|
||||
if (start)
|
||||
start_knode = &start->knode_class;
|
||||
klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode);
|
||||
iter->type = type;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(class_dev_iter_init);
|
||||
|
||||
/**
|
||||
* class_dev_iter_next - iterate to the next device
|
||||
* @iter: class iterator to proceed
|
||||
*
|
||||
* Proceed @iter to the next device and return it. Returns NULL if
|
||||
* iteration is complete.
|
||||
*
|
||||
* The returned device is referenced and won't be released till
|
||||
* iterator is proceed to the next device or exited. The caller is
|
||||
* free to do whatever it wants to do with the device including
|
||||
* calling back into class code.
|
||||
*/
|
||||
struct device *class_dev_iter_next(struct class_dev_iter *iter)
|
||||
{
|
||||
struct klist_node *knode;
|
||||
struct device *dev;
|
||||
|
||||
while (1) {
|
||||
knode = klist_next(&iter->ki);
|
||||
if (!knode)
|
||||
return NULL;
|
||||
dev = container_of(knode, struct device, knode_class);
|
||||
if (!iter->type || iter->type == dev->type)
|
||||
return dev;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(class_dev_iter_next);
|
||||
|
||||
/**
|
||||
* class_dev_iter_exit - finish iteration
|
||||
* @iter: class iterator to finish
|
||||
*
|
||||
* Finish an iteration. Always call this function after iteration is
|
||||
* complete whether the iteration ran till the end or not.
|
||||
*/
|
||||
void class_dev_iter_exit(struct class_dev_iter *iter)
|
||||
{
|
||||
klist_iter_exit(&iter->ki);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(class_dev_iter_exit);
|
||||
|
||||
/**
|
||||
* class_for_each_device - device iterator
|
||||
* @class: the class we're iterating
|
||||
@ -283,13 +362,13 @@ char *make_class_name(const char *name, struct kobject *kobj)
|
||||
* We check the return of @fn each time. If it returns anything
|
||||
* other than 0, we break out and return that value.
|
||||
*
|
||||
* Note, we hold class->class_mutex in this function, so it can not be
|
||||
* re-acquired in @fn, otherwise it will self-deadlocking. For
|
||||
* example, calls to add or remove class members would be verboten.
|
||||
* @fn is allowed to do anything including calling back into class
|
||||
* code. There's no locking restriction.
|
||||
*/
|
||||
int class_for_each_device(struct class *class, struct device *start,
|
||||
void *data, int (*fn)(struct device *, void *))
|
||||
{
|
||||
struct class_dev_iter iter;
|
||||
struct device *dev;
|
||||
int error = 0;
|
||||
|
||||
@ -301,20 +380,13 @@ int class_for_each_device(struct class *class, struct device *start,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&class->p->class_mutex);
|
||||
list_for_each_entry(dev, &class->p->class_devices, node) {
|
||||
if (start) {
|
||||
if (start == dev)
|
||||
start = NULL;
|
||||
continue;
|
||||
}
|
||||
dev = get_device(dev);
|
||||
class_dev_iter_init(&iter, class, start, NULL);
|
||||
while ((dev = class_dev_iter_next(&iter))) {
|
||||
error = fn(dev, data);
|
||||
put_device(dev);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&class->p->class_mutex);
|
||||
class_dev_iter_exit(&iter);
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -337,16 +409,15 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
|
||||
*
|
||||
* Note, you will need to drop the reference with put_device() after use.
|
||||
*
|
||||
* We hold class->class_mutex in this function, so it can not be
|
||||
* re-acquired in @match, otherwise it will self-deadlocking. For
|
||||
* example, calls to add or remove class members would be verboten.
|
||||
* @fn is allowed to do anything including calling back into class
|
||||
* code. There's no locking restriction.
|
||||
*/
|
||||
struct device *class_find_device(struct class *class, struct device *start,
|
||||
void *data,
|
||||
int (*match)(struct device *, void *))
|
||||
{
|
||||
struct class_dev_iter iter;
|
||||
struct device *dev;
|
||||
int found = 0;
|
||||
|
||||
if (!class)
|
||||
return NULL;
|
||||
@ -356,29 +427,23 @@ struct device *class_find_device(struct class *class, struct device *start,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mutex_lock(&class->p->class_mutex);
|
||||
list_for_each_entry(dev, &class->p->class_devices, node) {
|
||||
if (start) {
|
||||
if (start == dev)
|
||||
start = NULL;
|
||||
continue;
|
||||
}
|
||||
dev = get_device(dev);
|
||||
class_dev_iter_init(&iter, class, start, NULL);
|
||||
while ((dev = class_dev_iter_next(&iter))) {
|
||||
if (match(dev, data)) {
|
||||
found = 1;
|
||||
get_device(dev);
|
||||
break;
|
||||
} else
|
||||
put_device(dev);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&class->p->class_mutex);
|
||||
class_dev_iter_exit(&iter);
|
||||
|
||||
return found ? dev : NULL;
|
||||
return dev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(class_find_device);
|
||||
|
||||
int class_interface_register(struct class_interface *class_intf)
|
||||
{
|
||||
struct class *parent;
|
||||
struct class_dev_iter iter;
|
||||
struct device *dev;
|
||||
|
||||
if (!class_intf || !class_intf->class)
|
||||
@ -391,8 +456,10 @@ int class_interface_register(struct class_interface *class_intf)
|
||||
mutex_lock(&parent->p->class_mutex);
|
||||
list_add_tail(&class_intf->node, &parent->p->class_interfaces);
|
||||
if (class_intf->add_dev) {
|
||||
list_for_each_entry(dev, &parent->p->class_devices, node)
|
||||
class_dev_iter_init(&iter, parent, NULL, NULL);
|
||||
while ((dev = class_dev_iter_next(&iter)))
|
||||
class_intf->add_dev(dev, class_intf);
|
||||
class_dev_iter_exit(&iter);
|
||||
}
|
||||
mutex_unlock(&parent->p->class_mutex);
|
||||
|
||||
@ -402,6 +469,7 @@ int class_interface_register(struct class_interface *class_intf)
|
||||
void class_interface_unregister(struct class_interface *class_intf)
|
||||
{
|
||||
struct class *parent = class_intf->class;
|
||||
struct class_dev_iter iter;
|
||||
struct device *dev;
|
||||
|
||||
if (!parent)
|
||||
@ -410,8 +478,10 @@ void class_interface_unregister(struct class_interface *class_intf)
|
||||
mutex_lock(&parent->p->class_mutex);
|
||||
list_del_init(&class_intf->node);
|
||||
if (class_intf->remove_dev) {
|
||||
list_for_each_entry(dev, &parent->p->class_devices, node)
|
||||
class_dev_iter_init(&iter, parent, NULL, NULL);
|
||||
while ((dev = class_dev_iter_next(&iter)))
|
||||
class_intf->remove_dev(dev, class_intf);
|
||||
class_dev_iter_exit(&iter);
|
||||
}
|
||||
mutex_unlock(&parent->p->class_mutex);
|
||||
|
||||
|
@ -536,7 +536,6 @@ void device_initialize(struct device *dev)
|
||||
klist_init(&dev->klist_children, klist_children_get,
|
||||
klist_children_put);
|
||||
INIT_LIST_HEAD(&dev->dma_pools);
|
||||
INIT_LIST_HEAD(&dev->node);
|
||||
init_MUTEX(&dev->sem);
|
||||
spin_lock_init(&dev->devres_lock);
|
||||
INIT_LIST_HEAD(&dev->devres_head);
|
||||
@ -916,7 +915,8 @@ int device_add(struct device *dev)
|
||||
if (dev->class) {
|
||||
mutex_lock(&dev->class->p->class_mutex);
|
||||
/* tie the class to the device */
|
||||
list_add_tail(&dev->node, &dev->class->p->class_devices);
|
||||
klist_add_tail(&dev->knode_class,
|
||||
&dev->class->p->class_devices);
|
||||
|
||||
/* notify any interfaces that the device is here */
|
||||
list_for_each_entry(class_intf,
|
||||
@ -1032,7 +1032,7 @@ void device_del(struct device *dev)
|
||||
if (class_intf->remove_dev)
|
||||
class_intf->remove_dev(dev, class_intf);
|
||||
/* remove the device from the class list */
|
||||
list_del_init(&dev->node);
|
||||
klist_del(&dev->knode_class);
|
||||
mutex_unlock(&dev->class->p->class_mutex);
|
||||
}
|
||||
device_remove_file(dev, &uevent_attr);
|
||||
|
@ -109,12 +109,12 @@ static const struct attribute_group attr_group = {
|
||||
static int
|
||||
aoedisk_add_sysfs(struct aoedev *d)
|
||||
{
|
||||
return sysfs_create_group(&d->gd->dev.kobj, &attr_group);
|
||||
return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
|
||||
}
|
||||
void
|
||||
aoedisk_rm_sysfs(struct aoedev *d)
|
||||
{
|
||||
sysfs_remove_group(&d->gd->dev.kobj, &attr_group);
|
||||
sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -276,7 +276,7 @@ aoeblk_gdalloc(void *vp)
|
||||
gd->first_minor = d->sysminor * AOE_PARTITIONS;
|
||||
gd->fops = &aoe_bdops;
|
||||
gd->private_data = d;
|
||||
gd->capacity = d->ssize;
|
||||
set_capacity(gd, d->ssize);
|
||||
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
|
||||
d->aoemajor, d->aoeminor);
|
||||
|
||||
|
@ -645,7 +645,7 @@ aoecmd_sleepwork(struct work_struct *work)
|
||||
unsigned long flags;
|
||||
u64 ssize;
|
||||
|
||||
ssize = d->gd->capacity;
|
||||
ssize = get_capacity(d->gd);
|
||||
bd = bdget_disk(d->gd, 0);
|
||||
|
||||
if (bd) {
|
||||
@ -707,7 +707,7 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
|
||||
if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
|
||||
return;
|
||||
if (d->gd != NULL) {
|
||||
d->gd->capacity = ssize;
|
||||
set_capacity(d->gd, ssize);
|
||||
d->flags |= DEVFL_NEWSIZE;
|
||||
} else
|
||||
d->flags |= DEVFL_GDALLOC;
|
||||
@ -756,12 +756,17 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
|
||||
unsigned long n_sect = bio->bi_size >> 9;
|
||||
const int rw = bio_data_dir(bio);
|
||||
struct hd_struct *part;
|
||||
int cpu;
|
||||
|
||||
part = get_part(disk, sector);
|
||||
all_stat_inc(disk, part, ios[rw], sector);
|
||||
all_stat_add(disk, part, ticks[rw], duration, sector);
|
||||
all_stat_add(disk, part, sectors[rw], n_sect, sector);
|
||||
all_stat_add(disk, part, io_ticks, duration, sector);
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(disk, sector);
|
||||
|
||||
part_stat_inc(cpu, part, ios[rw]);
|
||||
part_stat_add(cpu, part, ticks[rw], duration);
|
||||
part_stat_add(cpu, part, sectors[rw], n_sect);
|
||||
part_stat_add(cpu, part, io_ticks, duration);
|
||||
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -91,7 +91,7 @@ aoedev_downdev(struct aoedev *d)
|
||||
}
|
||||
|
||||
if (d->gd)
|
||||
d->gd->capacity = 0;
|
||||
set_capacity(d->gd, 0);
|
||||
|
||||
d->flags &= ~DEVFL_UP;
|
||||
}
|
||||
|
@ -3460,8 +3460,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
||||
hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
|
||||
|
||||
hba[i]->cmd_pool_bits =
|
||||
kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
|
||||
1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
|
||||
kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
|
||||
* sizeof(unsigned long), GFP_KERNEL);
|
||||
hba[i]->cmd_pool = (CommandList_struct *)
|
||||
pci_alloc_consistent(hba[i]->pdev,
|
||||
hba[i]->nr_cmds * sizeof(CommandList_struct),
|
||||
@ -3493,8 +3493,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
||||
/* command and error info recs zeroed out before
|
||||
they are used */
|
||||
memset(hba[i]->cmd_pool_bits, 0,
|
||||
((hba[i]->nr_cmds + BITS_PER_LONG -
|
||||
1) / BITS_PER_LONG) * sizeof(unsigned long));
|
||||
DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
|
||||
* sizeof(unsigned long));
|
||||
|
||||
hba[i]->num_luns = 0;
|
||||
hba[i]->highest_lun = -1;
|
||||
|
@ -365,7 +365,7 @@ struct scsi2map {
|
||||
|
||||
static int
|
||||
cciss_scsi_add_entry(int ctlr, int hostno,
|
||||
unsigned char *scsi3addr, int devtype,
|
||||
struct cciss_scsi_dev_t *device,
|
||||
struct scsi2map *added, int *nadded)
|
||||
{
|
||||
/* assumes hba[ctlr]->scsi_ctlr->lock is held */
|
||||
@ -384,12 +384,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
|
||||
lun = 0;
|
||||
/* Is this device a non-zero lun of a multi-lun device */
|
||||
/* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
|
||||
if (scsi3addr[4] != 0) {
|
||||
if (device->scsi3addr[4] != 0) {
|
||||
/* Search through our list and find the device which */
|
||||
/* has the same 8 byte LUN address, excepting byte 4. */
|
||||
/* Assign the same bus and target for this new LUN. */
|
||||
/* Use the logical unit number from the firmware. */
|
||||
memcpy(addr1, scsi3addr, 8);
|
||||
memcpy(addr1, device->scsi3addr, 8);
|
||||
addr1[4] = 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
sd = &ccissscsi[ctlr].dev[i];
|
||||
@ -399,7 +399,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
|
||||
if (memcmp(addr1, addr2, 8) == 0) {
|
||||
bus = sd->bus;
|
||||
target = sd->target;
|
||||
lun = scsi3addr[4];
|
||||
lun = device->scsi3addr[4];
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -420,8 +420,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
|
||||
added[*nadded].lun = sd->lun;
|
||||
(*nadded)++;
|
||||
|
||||
memcpy(&sd->scsi3addr[0], scsi3addr, 8);
|
||||
sd->devtype = devtype;
|
||||
memcpy(sd->scsi3addr, device->scsi3addr, 8);
|
||||
memcpy(sd->vendor, device->vendor, sizeof(sd->vendor));
|
||||
memcpy(sd->revision, device->revision, sizeof(sd->revision));
|
||||
memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
|
||||
sd->devtype = device->devtype;
|
||||
|
||||
ccissscsi[ctlr].ndevices++;
|
||||
|
||||
/* initially, (before registering with scsi layer) we don't
|
||||
@ -487,6 +491,22 @@ static void fixup_botched_add(int ctlr, char *scsi3addr)
|
||||
CPQ_TAPE_UNLOCK(ctlr, flags);
|
||||
}
|
||||
|
||||
static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
|
||||
struct cciss_scsi_dev_t *dev2)
|
||||
{
|
||||
return dev1->devtype == dev2->devtype &&
|
||||
memcmp(dev1->scsi3addr, dev2->scsi3addr,
|
||||
sizeof(dev1->scsi3addr)) == 0 &&
|
||||
memcmp(dev1->device_id, dev2->device_id,
|
||||
sizeof(dev1->device_id)) == 0 &&
|
||||
memcmp(dev1->vendor, dev2->vendor,
|
||||
sizeof(dev1->vendor)) == 0 &&
|
||||
memcmp(dev1->model, dev2->model,
|
||||
sizeof(dev1->model)) == 0 &&
|
||||
memcmp(dev1->revision, dev2->revision,
|
||||
sizeof(dev1->revision)) == 0;
|
||||
}
|
||||
|
||||
static int
|
||||
adjust_cciss_scsi_table(int ctlr, int hostno,
|
||||
struct cciss_scsi_dev_t sd[], int nsds)
|
||||
@ -532,7 +552,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
|
||||
for (j=0;j<nsds;j++) {
|
||||
if (SCSI3ADDR_EQ(sd[j].scsi3addr,
|
||||
csd->scsi3addr)) {
|
||||
if (sd[j].devtype == csd->devtype)
|
||||
if (device_is_the_same(&sd[j], csd))
|
||||
found=2;
|
||||
else
|
||||
found=1;
|
||||
@ -548,22 +568,26 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
|
||||
cciss_scsi_remove_entry(ctlr, hostno, i,
|
||||
removed, &nremoved);
|
||||
/* remove ^^^, hence i not incremented */
|
||||
}
|
||||
else if (found == 1) { /* device is different kind */
|
||||
} else if (found == 1) { /* device is different in some way */
|
||||
changes++;
|
||||
printk("cciss%d: device c%db%dt%dl%d type changed "
|
||||
"(device type now %s).\n",
|
||||
ctlr, hostno, csd->bus, csd->target, csd->lun,
|
||||
scsi_device_type(csd->devtype));
|
||||
printk("cciss%d: device c%db%dt%dl%d has changed.\n",
|
||||
ctlr, hostno, csd->bus, csd->target, csd->lun);
|
||||
cciss_scsi_remove_entry(ctlr, hostno, i,
|
||||
removed, &nremoved);
|
||||
/* remove ^^^, hence i not incremented */
|
||||
if (cciss_scsi_add_entry(ctlr, hostno,
|
||||
&sd[j].scsi3addr[0], sd[j].devtype,
|
||||
if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
|
||||
added, &nadded) != 0)
|
||||
/* we just removed one, so add can't fail. */
|
||||
BUG();
|
||||
csd->devtype = sd[j].devtype;
|
||||
memcpy(csd->device_id, sd[j].device_id,
|
||||
sizeof(csd->device_id));
|
||||
memcpy(csd->vendor, sd[j].vendor,
|
||||
sizeof(csd->vendor));
|
||||
memcpy(csd->model, sd[j].model,
|
||||
sizeof(csd->model));
|
||||
memcpy(csd->revision, sd[j].revision,
|
||||
sizeof(csd->revision));
|
||||
} else /* device is same as it ever was, */
|
||||
i++; /* so just move along. */
|
||||
}
|
||||
@ -577,7 +601,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
|
||||
csd = &ccissscsi[ctlr].dev[j];
|
||||
if (SCSI3ADDR_EQ(sd[i].scsi3addr,
|
||||
csd->scsi3addr)) {
|
||||
if (sd[i].devtype == csd->devtype)
|
||||
if (device_is_the_same(&sd[i], csd))
|
||||
found=2; /* found device */
|
||||
else
|
||||
found=1; /* found a bug. */
|
||||
@ -586,16 +610,14 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
|
||||
}
|
||||
if (!found) {
|
||||
changes++;
|
||||
if (cciss_scsi_add_entry(ctlr, hostno,
|
||||
|
||||
&sd[i].scsi3addr[0], sd[i].devtype,
|
||||
if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
|
||||
added, &nadded) != 0)
|
||||
break;
|
||||
} else if (found == 1) {
|
||||
/* should never happen... */
|
||||
changes++;
|
||||
printk("cciss%d: device unexpectedly changed type\n",
|
||||
ctlr);
|
||||
printk(KERN_WARNING "cciss%d: device "
|
||||
"unexpectedly changed\n", ctlr);
|
||||
/* but if it does happen, we just ignore that device */
|
||||
}
|
||||
}
|
||||
@ -1012,7 +1034,8 @@ cciss_scsi_interpret_error(CommandList_struct *cp)
|
||||
|
||||
static int
|
||||
cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
|
||||
unsigned char *buf, unsigned char bufsize)
|
||||
unsigned char page, unsigned char *buf,
|
||||
unsigned char bufsize)
|
||||
{
|
||||
int rc;
|
||||
CommandList_struct *cp;
|
||||
@ -1032,8 +1055,8 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
|
||||
ei = cp->err_info;
|
||||
|
||||
cdb[0] = CISS_INQUIRY;
|
||||
cdb[1] = 0;
|
||||
cdb[2] = 0;
|
||||
cdb[1] = (page != 0);
|
||||
cdb[2] = page;
|
||||
cdb[3] = 0;
|
||||
cdb[4] = bufsize;
|
||||
cdb[5] = 0;
|
||||
@ -1053,6 +1076,25 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Get the device id from inquiry page 0x83 */
|
||||
static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
|
||||
unsigned char *device_id, int buflen)
|
||||
{
|
||||
int rc;
|
||||
unsigned char *buf;
|
||||
|
||||
if (buflen > 16)
|
||||
buflen = 16;
|
||||
buf = kzalloc(64, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -1;
|
||||
rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
|
||||
if (rc == 0)
|
||||
memcpy(device_id, &buf[8], buflen);
|
||||
kfree(buf);
|
||||
return rc != 0;
|
||||
}
|
||||
|
||||
static int
|
||||
cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
|
||||
ReportLunData_struct *buf, int bufsize)
|
||||
@ -1142,25 +1184,21 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
|
||||
ctlr_info_t *c;
|
||||
__u32 num_luns=0;
|
||||
unsigned char *ch;
|
||||
/* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */
|
||||
struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
|
||||
struct cciss_scsi_dev_t *currentsd, *this_device;
|
||||
int ncurrent=0;
|
||||
int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
|
||||
int i;
|
||||
|
||||
c = (ctlr_info_t *) hba[cntl_num];
|
||||
ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
|
||||
if (ld_buff == NULL) {
|
||||
printk(KERN_ERR "cciss: out of memory\n");
|
||||
return;
|
||||
}
|
||||
inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
|
||||
if (inq_buff == NULL) {
|
||||
printk(KERN_ERR "cciss: out of memory\n");
|
||||
kfree(ld_buff);
|
||||
return;
|
||||
currentsd = kzalloc(sizeof(*currentsd) *
|
||||
(CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
|
||||
if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
|
||||
printk(KERN_ERR "cciss: out of memory\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
this_device = ¤tsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
|
||||
if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
|
||||
ch = &ld_buff->LUNListLength[0];
|
||||
num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
|
||||
@ -1179,23 +1217,34 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
|
||||
|
||||
|
||||
/* adjust our table of devices */
|
||||
for(i=0; i<num_luns; i++)
|
||||
{
|
||||
int devtype;
|
||||
|
||||
for (i = 0; i < num_luns; i++) {
|
||||
/* for each physical lun, do an inquiry */
|
||||
if (ld_buff->LUN[i][3] & 0xC0) continue;
|
||||
memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
|
||||
memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
|
||||
|
||||
if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, inq_buff,
|
||||
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
|
||||
if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
|
||||
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
|
||||
/* Inquiry failed (msg printed already) */
|
||||
devtype = 0; /* so we will skip this device. */
|
||||
} else /* what kind of device is this? */
|
||||
devtype = (inq_buff[0] & 0x1f);
|
||||
continue; /* so we will skip this device. */
|
||||
|
||||
switch (devtype)
|
||||
this_device->devtype = (inq_buff[0] & 0x1f);
|
||||
this_device->bus = -1;
|
||||
this_device->target = -1;
|
||||
this_device->lun = -1;
|
||||
memcpy(this_device->scsi3addr, scsi3addr, 8);
|
||||
memcpy(this_device->vendor, &inq_buff[8],
|
||||
sizeof(this_device->vendor));
|
||||
memcpy(this_device->model, &inq_buff[16],
|
||||
sizeof(this_device->model));
|
||||
memcpy(this_device->revision, &inq_buff[32],
|
||||
sizeof(this_device->revision));
|
||||
memset(this_device->device_id, 0,
|
||||
sizeof(this_device->device_id));
|
||||
cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
|
||||
this_device->device_id, sizeof(this_device->device_id));
|
||||
|
||||
switch (this_device->devtype)
|
||||
{
|
||||
case 0x05: /* CD-ROM */ {
|
||||
|
||||
@ -1220,15 +1269,10 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
|
||||
if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
|
||||
printk(KERN_INFO "cciss%d: %s ignored, "
|
||||
"too many devices.\n", cntl_num,
|
||||
scsi_device_type(devtype));
|
||||
scsi_device_type(this_device->devtype));
|
||||
break;
|
||||
}
|
||||
memcpy(¤tsd[ncurrent].scsi3addr[0],
|
||||
&scsi3addr[0], 8);
|
||||
currentsd[ncurrent].devtype = devtype;
|
||||
currentsd[ncurrent].bus = -1;
|
||||
currentsd[ncurrent].target = -1;
|
||||
currentsd[ncurrent].lun = -1;
|
||||
currentsd[ncurrent] = *this_device;
|
||||
ncurrent++;
|
||||
break;
|
||||
default:
|
||||
@ -1240,6 +1284,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
|
||||
out:
|
||||
kfree(inq_buff);
|
||||
kfree(ld_buff);
|
||||
kfree(currentsd);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -66,6 +66,10 @@ struct cciss_scsi_dev_t {
|
||||
int devtype;
|
||||
int bus, target, lun; /* as presented to the OS */
|
||||
unsigned char scsi3addr[8]; /* as presented to the HW */
|
||||
unsigned char device_id[16]; /* from inquiry pg. 0x83 */
|
||||
unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
|
||||
unsigned char model[16]; /* bytes 16-31 of inquiry data */
|
||||
unsigned char revision[4]; /* bytes 32-35 of inquiry data */
|
||||
};
|
||||
|
||||
struct cciss_scsi_hba_t {
|
||||
|
@ -424,7 +424,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
|
||||
hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
|
||||
&(hba[i]->cmd_pool_dhandle));
|
||||
hba[i]->cmd_pool_bits = kcalloc(
|
||||
(NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG, sizeof(unsigned long),
|
||||
DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
|
||||
|
@ -423,8 +423,15 @@ static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
|
||||
* 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
|
||||
* side 0 is on physical side 0 (but with the misnamed sector IDs).
|
||||
* 'stretch' should probably be renamed to something more general, like
|
||||
* 'options'. Other parameters should be self-explanatory (see also
|
||||
* setfdprm(8)).
|
||||
* 'options'.
|
||||
*
|
||||
* Bits 2 through 9 of 'stretch' tell the number of the first sector.
|
||||
* The LSB (bit 2) is flipped. For most disks, the first sector
|
||||
* is 1 (represented by 0x00<<2). For some CP/M and music sampler
|
||||
* disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
|
||||
* For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
|
||||
*
|
||||
* Other parameters should be self-explanatory (see also setfdprm(8)).
|
||||
*/
|
||||
/*
|
||||
Size
|
||||
@ -1355,20 +1362,20 @@ static void fdc_specify(void)
|
||||
}
|
||||
|
||||
/* Convert step rate from microseconds to milliseconds and 4 bits */
|
||||
srt = 16 - (DP->srt * scale_dtr / 1000 + NOMINAL_DTR - 1) / NOMINAL_DTR;
|
||||
srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR);
|
||||
if (slow_floppy) {
|
||||
srt = srt / 4;
|
||||
}
|
||||
SUPBOUND(srt, 0xf);
|
||||
INFBOUND(srt, 0);
|
||||
|
||||
hlt = (DP->hlt * scale_dtr / 2 + NOMINAL_DTR - 1) / NOMINAL_DTR;
|
||||
hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR);
|
||||
if (hlt < 0x01)
|
||||
hlt = 0x01;
|
||||
else if (hlt > 0x7f)
|
||||
hlt = hlt_max_code;
|
||||
|
||||
hut = (DP->hut * scale_dtr / 16 + NOMINAL_DTR - 1) / NOMINAL_DTR;
|
||||
hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR);
|
||||
if (hut < 0x1)
|
||||
hut = 0x1;
|
||||
else if (hut > 0xf)
|
||||
@ -2236,9 +2243,9 @@ static void setup_format_params(int track)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_floppy->stretch & FD_ZEROBASED) {
|
||||
if (_floppy->stretch & FD_SECTBASEMASK) {
|
||||
for (count = 0; count < F_SECT_PER_TRACK; count++)
|
||||
here[count].sect--;
|
||||
here[count].sect += FD_SECTBASE(_floppy) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2385,7 +2392,7 @@ static void rw_interrupt(void)
|
||||
|
||||
#ifdef FLOPPY_SANITY_CHECK
|
||||
if (nr_sectors / ssize >
|
||||
(in_sector_offset + current_count_sectors + ssize - 1) / ssize) {
|
||||
DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
|
||||
DPRINT("long rw: %x instead of %lx\n",
|
||||
nr_sectors, current_count_sectors);
|
||||
printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
|
||||
@ -2649,7 +2656,7 @@ static int make_raw_rw_request(void)
|
||||
}
|
||||
HEAD = fsector_t / _floppy->sect;
|
||||
|
||||
if (((_floppy->stretch & (FD_SWAPSIDES | FD_ZEROBASED)) ||
|
||||
if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
|
||||
TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect)
|
||||
max_sector = _floppy->sect;
|
||||
|
||||
@ -2679,7 +2686,7 @@ static int make_raw_rw_request(void)
|
||||
CODE2SIZE;
|
||||
SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
|
||||
SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
|
||||
((_floppy->stretch & FD_ZEROBASED) ? 0 : 1);
|
||||
FD_SECTBASE(_floppy);
|
||||
|
||||
/* tracksize describes the size which can be filled up with sectors
|
||||
* of size ssize.
|
||||
@ -3311,7 +3318,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
|
||||
g->head <= 0 ||
|
||||
g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
|
||||
/* check if reserved bits are set */
|
||||
(g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_ZEROBASED)) != 0)
|
||||
(g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
|
||||
return -EINVAL;
|
||||
if (type) {
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
@ -3356,7 +3363,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
|
||||
if (DRS->maxblock > user_params[drive].sect ||
|
||||
DRS->maxtrack ||
|
||||
((user_params[drive].sect ^ oldStretch) &
|
||||
(FD_SWAPSIDES | FD_ZEROBASED)))
|
||||
(FD_SWAPSIDES | FD_SECTBASEMASK)))
|
||||
invalidate_drive(bdev);
|
||||
else
|
||||
process_fd_request();
|
||||
|
@ -403,7 +403,7 @@ static int nbd_do_it(struct nbd_device *lo)
|
||||
BUG_ON(lo->magic != LO_MAGIC);
|
||||
|
||||
lo->pid = current->pid;
|
||||
ret = sysfs_create_file(&lo->disk->dev.kobj, &pid_attr.attr);
|
||||
ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "nbd: sysfs_create_file failed!");
|
||||
return ret;
|
||||
@ -412,7 +412,7 @@ static int nbd_do_it(struct nbd_device *lo)
|
||||
while ((req = nbd_read_stat(lo)) != NULL)
|
||||
nbd_end_request(req);
|
||||
|
||||
sysfs_remove_file(&lo->disk->dev.kobj, &pid_attr.attr);
|
||||
sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2544,7 +2544,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
if (last_zone != zone) {
|
||||
BUG_ON(last_zone != zone + pd->settings.size);
|
||||
first_sectors = last_zone - bio->bi_sector;
|
||||
bp = bio_split(bio, bio_split_pool, first_sectors);
|
||||
bp = bio_split(bio, first_sectors);
|
||||
BUG_ON(!bp);
|
||||
pkt_make_request(q, &bp->bio1);
|
||||
pkt_make_request(q, &bp->bio2);
|
||||
@ -2911,7 +2911,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
|
||||
if (!disk->queue)
|
||||
goto out_mem2;
|
||||
|
||||
pd->pkt_dev = MKDEV(disk->major, disk->first_minor);
|
||||
pd->pkt_dev = MKDEV(pktdev_major, idx);
|
||||
ret = pkt_new_dev(pd, dev);
|
||||
if (ret)
|
||||
goto out_new_dev;
|
||||
|
@ -199,7 +199,8 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
|
||||
if (blk_fs_request(req)) {
|
||||
if (ps3disk_submit_request_sg(dev, req))
|
||||
break;
|
||||
} else if (req->cmd_type == REQ_TYPE_FLUSH) {
|
||||
} else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
|
||||
req->cmd[0] == REQ_LB_OP_FLUSH) {
|
||||
if (ps3disk_submit_flush_request(dev, req))
|
||||
break;
|
||||
} else {
|
||||
@ -257,7 +258,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_FLUSH) {
|
||||
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
|
||||
req->cmd[0] == REQ_LB_OP_FLUSH) {
|
||||
read = 0;
|
||||
num_sectors = req->hard_cur_sectors;
|
||||
op = "flush";
|
||||
@ -405,7 +407,8 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
|
||||
|
||||
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
|
||||
|
||||
req->cmd_type = REQ_TYPE_FLUSH;
|
||||
req->cmd_type = REQ_TYPE_LINUX_BLOCK;
|
||||
req->cmd[0] = REQ_LB_OP_FLUSH;
|
||||
}
|
||||
|
||||
static unsigned long ps3disk_mask;
|
||||
@ -538,7 +541,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
|
||||
struct ps3disk_private *priv = dev->sbd.core.driver_data;
|
||||
|
||||
mutex_lock(&ps3disk_mask_mutex);
|
||||
__clear_bit(priv->gendisk->first_minor / PS3DISK_MINORS,
|
||||
__clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
|
||||
&ps3disk_mask);
|
||||
mutex_unlock(&ps3disk_mask_mutex);
|
||||
del_gendisk(priv->gendisk);
|
||||
|
@ -47,20 +47,20 @@ static void blk_done(struct virtqueue *vq)
|
||||
|
||||
spin_lock_irqsave(&vblk->lock, flags);
|
||||
while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
|
||||
int uptodate;
|
||||
int error;
|
||||
switch (vbr->status) {
|
||||
case VIRTIO_BLK_S_OK:
|
||||
uptodate = 1;
|
||||
error = 0;
|
||||
break;
|
||||
case VIRTIO_BLK_S_UNSUPP:
|
||||
uptodate = -ENOTTY;
|
||||
error = -ENOTTY;
|
||||
break;
|
||||
default:
|
||||
uptodate = 0;
|
||||
error = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
end_dequeued_request(vbr->req, uptodate);
|
||||
__blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
|
||||
list_del(&vbr->list);
|
||||
mempool_free(vbr, vblk->pool);
|
||||
}
|
||||
@ -84,11 +84,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
|
||||
if (blk_fs_request(vbr->req)) {
|
||||
vbr->out_hdr.type = 0;
|
||||
vbr->out_hdr.sector = vbr->req->sector;
|
||||
vbr->out_hdr.ioprio = vbr->req->ioprio;
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
} else if (blk_pc_request(vbr->req)) {
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = vbr->req->ioprio;
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
} else {
|
||||
/* We don't put anything else in the queue. */
|
||||
BUG();
|
||||
|
@ -105,15 +105,17 @@ static DEFINE_SPINLOCK(blkif_io_lock);
|
||||
#define GRANT_INVALID_REF 0
|
||||
|
||||
#define PARTS_PER_DISK 16
|
||||
#define PARTS_PER_EXT_DISK 256
|
||||
|
||||
#define BLKIF_MAJOR(dev) ((dev)>>8)
|
||||
#define BLKIF_MINOR(dev) ((dev) & 0xff)
|
||||
|
||||
#define DEV_NAME "xvd" /* name in /dev */
|
||||
#define EXT_SHIFT 28
|
||||
#define EXTENDED (1<<EXT_SHIFT)
|
||||
#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
|
||||
#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
|
||||
|
||||
/* Information about our VBDs. */
|
||||
#define MAX_VBDS 64
|
||||
static LIST_HEAD(vbds_list);
|
||||
#define DEV_NAME "xvd" /* name in /dev */
|
||||
|
||||
static int get_id_from_freelist(struct blkfront_info *info)
|
||||
{
|
||||
@ -386,31 +388,60 @@ static int xlvbd_barrier(struct blkfront_info *info)
|
||||
}
|
||||
|
||||
|
||||
static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
|
||||
int vdevice, u16 vdisk_info, u16 sector_size,
|
||||
struct blkfront_info *info)
|
||||
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
||||
struct blkfront_info *info,
|
||||
u16 vdisk_info, u16 sector_size)
|
||||
{
|
||||
struct gendisk *gd;
|
||||
int nr_minors = 1;
|
||||
int err = -ENODEV;
|
||||
unsigned int offset;
|
||||
int minor;
|
||||
int nr_parts;
|
||||
|
||||
BUG_ON(info->gd != NULL);
|
||||
BUG_ON(info->rq != NULL);
|
||||
|
||||
if ((minor % PARTS_PER_DISK) == 0)
|
||||
nr_minors = PARTS_PER_DISK;
|
||||
if ((info->vdevice>>EXT_SHIFT) > 1) {
|
||||
/* this is above the extended range; something is wrong */
|
||||
printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!VDEV_IS_EXTENDED(info->vdevice)) {
|
||||
minor = BLKIF_MINOR(info->vdevice);
|
||||
nr_parts = PARTS_PER_DISK;
|
||||
} else {
|
||||
minor = BLKIF_MINOR_EXT(info->vdevice);
|
||||
nr_parts = PARTS_PER_EXT_DISK;
|
||||
}
|
||||
|
||||
if ((minor % nr_parts) == 0)
|
||||
nr_minors = nr_parts;
|
||||
|
||||
gd = alloc_disk(nr_minors);
|
||||
if (gd == NULL)
|
||||
goto out;
|
||||
|
||||
if (nr_minors > 1)
|
||||
sprintf(gd->disk_name, "%s%c", DEV_NAME,
|
||||
'a' + minor / PARTS_PER_DISK);
|
||||
else
|
||||
sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
|
||||
'a' + minor / PARTS_PER_DISK,
|
||||
minor % PARTS_PER_DISK);
|
||||
offset = minor / nr_parts;
|
||||
|
||||
if (nr_minors > 1) {
|
||||
if (offset < 26)
|
||||
sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
|
||||
else
|
||||
sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
|
||||
'a' + ((offset / 26)-1), 'a' + (offset % 26));
|
||||
} else {
|
||||
if (offset < 26)
|
||||
sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
|
||||
'a' + offset,
|
||||
minor & (nr_parts - 1));
|
||||
else
|
||||
sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
|
||||
'a' + ((offset / 26) - 1),
|
||||
'a' + (offset % 26),
|
||||
minor & (nr_parts - 1));
|
||||
}
|
||||
|
||||
gd->major = XENVBD_MAJOR;
|
||||
gd->first_minor = minor;
|
||||
@ -699,8 +730,13 @@ static int blkfront_probe(struct xenbus_device *dev,
|
||||
err = xenbus_scanf(XBT_NIL, dev->nodename,
|
||||
"virtual-device", "%i", &vdevice);
|
||||
if (err != 1) {
|
||||
xenbus_dev_fatal(dev, err, "reading virtual-device");
|
||||
return err;
|
||||
/* go looking in the extended area instead */
|
||||
err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
|
||||
"%i", &vdevice);
|
||||
if (err != 1) {
|
||||
xenbus_dev_fatal(dev, err, "reading virtual-device");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
@ -861,9 +897,7 @@ static void blkfront_connect(struct blkfront_info *info)
|
||||
if (err)
|
||||
info->feature_barrier = 0;
|
||||
|
||||
err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
|
||||
sectors, info->vdevice,
|
||||
binfo, sector_size, info);
|
||||
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
|
||||
if (err) {
|
||||
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
|
||||
info->xbdev->otherend);
|
||||
|
@ -2097,7 +2097,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
||||
|
||||
len = nr * CD_FRAMESIZE_RAW;
|
||||
|
||||
ret = blk_rq_map_user(q, rq, ubuf, len);
|
||||
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
|
@ -624,14 +624,14 @@ static void gdrom_readdisk_dma(struct work_struct *work)
|
||||
ctrl_outb(1, GDROM_DMA_STATUS_REG);
|
||||
wait_event_interruptible_timeout(request_queue,
|
||||
gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
|
||||
err = gd.transfer;
|
||||
err = gd.transfer ? -EIO : 0;
|
||||
gd.transfer = 0;
|
||||
gd.pending = 0;
|
||||
/* now seek to take the request spinlock
|
||||
* before handling ending the request */
|
||||
spin_lock(&gdrom_lock);
|
||||
list_del_init(&req->queuelist);
|
||||
end_dequeued_request(req, 1 - err);
|
||||
__blk_end_request(req, err, blk_rq_bytes(req));
|
||||
}
|
||||
spin_unlock(&gdrom_lock);
|
||||
kfree(read_command);
|
||||
|
@ -661,10 +661,10 @@ void add_disk_randomness(struct gendisk *disk)
|
||||
if (!disk || !disk->random)
|
||||
return;
|
||||
/* first major is 1, so we get >= 0x200 here */
|
||||
DEBUG_ENT("disk event %d:%d\n", disk->major, disk->first_minor);
|
||||
DEBUG_ENT("disk event %d:%d\n",
|
||||
MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
|
||||
|
||||
add_timer_randomness(disk->random,
|
||||
0x100 + MKDEV(disk->major, disk->first_minor));
|
||||
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1113,7 +1113,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
|
||||
|
||||
if (write) {
|
||||
/* disk has become write protected */
|
||||
if (cd->disk->policy) {
|
||||
if (get_disk_ro(cd->disk)) {
|
||||
cdrom_end_request(drive, 0);
|
||||
return ide_stopped;
|
||||
}
|
||||
|
@ -41,6 +41,12 @@
|
||||
#include <asm/io.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
|
||||
#define IDE_DISK_MINORS (1 << PARTN_BITS)
|
||||
#else
|
||||
#define IDE_DISK_MINORS 0
|
||||
#endif
|
||||
|
||||
struct ide_disk_obj {
|
||||
ide_drive_t *drive;
|
||||
ide_driver_t *driver;
|
||||
@ -1151,8 +1157,7 @@ static int ide_disk_probe(ide_drive_t *drive)
|
||||
if (!idkp)
|
||||
goto failed;
|
||||
|
||||
g = alloc_disk_node(1 << PARTN_BITS,
|
||||
hwif_to_node(drive->hwif));
|
||||
g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
|
||||
if (!g)
|
||||
goto out_free_idkp;
|
||||
|
||||
@ -1178,9 +1183,11 @@ static int ide_disk_probe(ide_drive_t *drive)
|
||||
} else
|
||||
drive->attach = 1;
|
||||
|
||||
g->minors = 1 << PARTN_BITS;
|
||||
g->minors = IDE_DISK_MINORS;
|
||||
g->driverfs_dev = &drive->gendev;
|
||||
g->flags = drive->removable ? GENHD_FL_REMOVABLE : 0;
|
||||
g->flags |= GENHD_FL_EXT_DEVT;
|
||||
if (drive->removable)
|
||||
g->flags |= GENHD_FL_REMOVABLE;
|
||||
set_capacity(g, idedisk_capacity(drive));
|
||||
g->fops = &idedisk_ops;
|
||||
add_disk(g);
|
||||
|
@ -1188,7 +1188,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
|
||||
{
|
||||
struct gendisk *p = data;
|
||||
*part &= (1 << PARTN_BITS) - 1;
|
||||
return &p->dev.kobj;
|
||||
return &disk_to_dev(p)->kobj;
|
||||
}
|
||||
|
||||
static int exact_lock(dev_t dev, void *data)
|
||||
|
@ -426,7 +426,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size)
|
||||
old_nl->next = (uint32_t) ((void *) nl -
|
||||
(void *) old_nl);
|
||||
disk = dm_disk(hc->md);
|
||||
nl->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
|
||||
nl->dev = huge_encode_dev(disk_devt(disk));
|
||||
nl->next = 0;
|
||||
strcpy(nl->name, hc->name);
|
||||
|
||||
@ -539,7 +539,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
|
||||
if (dm_suspended(md))
|
||||
param->flags |= DM_SUSPEND_FLAG;
|
||||
|
||||
param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
|
||||
param->dev = huge_encode_dev(disk_devt(disk));
|
||||
|
||||
/*
|
||||
* Yes, this will be out of date by the time it gets back
|
||||
@ -548,7 +548,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
|
||||
*/
|
||||
param->open_count = dm_open_count(md);
|
||||
|
||||
if (disk->policy)
|
||||
if (get_disk_ro(disk))
|
||||
param->flags |= DM_READONLY_FLAG;
|
||||
|
||||
param->event_nr = dm_get_event_nr(md);
|
||||
|
@ -33,6 +33,7 @@ struct pgpath {
|
||||
unsigned fail_count; /* Cumulative failure count */
|
||||
|
||||
struct dm_path path;
|
||||
struct work_struct deactivate_path;
|
||||
};
|
||||
|
||||
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
|
||||
@ -112,6 +113,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
|
||||
static void process_queued_ios(struct work_struct *work);
|
||||
static void trigger_event(struct work_struct *work);
|
||||
static void activate_path(struct work_struct *work);
|
||||
static void deactivate_path(struct work_struct *work);
|
||||
|
||||
|
||||
/*-----------------------------------------------
|
||||
@ -122,8 +124,10 @@ static struct pgpath *alloc_pgpath(void)
|
||||
{
|
||||
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
|
||||
|
||||
if (pgpath)
|
||||
if (pgpath) {
|
||||
pgpath->path.is_active = 1;
|
||||
INIT_WORK(&pgpath->deactivate_path, deactivate_path);
|
||||
}
|
||||
|
||||
return pgpath;
|
||||
}
|
||||
@ -133,6 +137,14 @@ static void free_pgpath(struct pgpath *pgpath)
|
||||
kfree(pgpath);
|
||||
}
|
||||
|
||||
static void deactivate_path(struct work_struct *work)
|
||||
{
|
||||
struct pgpath *pgpath =
|
||||
container_of(work, struct pgpath, deactivate_path);
|
||||
|
||||
blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
|
||||
}
|
||||
|
||||
static struct priority_group *alloc_priority_group(void)
|
||||
{
|
||||
struct priority_group *pg;
|
||||
@ -870,6 +882,7 @@ static int fail_path(struct pgpath *pgpath)
|
||||
pgpath->path.dev->name, m->nr_valid_paths);
|
||||
|
||||
queue_work(kmultipathd, &m->trigger_event);
|
||||
queue_work(kmultipathd, &pgpath->deactivate_path);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
@ -284,8 +284,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
|
||||
|
||||
memset(major_minor, 0, sizeof(major_minor));
|
||||
sprintf(major_minor, "%d:%d",
|
||||
bio->bi_bdev->bd_disk->major,
|
||||
bio->bi_bdev->bd_disk->first_minor);
|
||||
MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
|
||||
MINOR(disk_devt(bio->bi_bdev->bd_disk)));
|
||||
|
||||
/*
|
||||
* Test to see which stripe drive triggered the event
|
||||
|
@ -377,13 +377,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
|
||||
static void start_io_acct(struct dm_io *io)
|
||||
{
|
||||
struct mapped_device *md = io->md;
|
||||
int cpu;
|
||||
|
||||
io->start_time = jiffies;
|
||||
|
||||
preempt_disable();
|
||||
disk_round_stats(dm_disk(md));
|
||||
preempt_enable();
|
||||
dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
|
||||
cpu = part_stat_lock();
|
||||
part_round_stats(cpu, &dm_disk(md)->part0);
|
||||
part_stat_unlock();
|
||||
dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
|
||||
}
|
||||
|
||||
static int end_io_acct(struct dm_io *io)
|
||||
@ -391,15 +392,16 @@ static int end_io_acct(struct dm_io *io)
|
||||
struct mapped_device *md = io->md;
|
||||
struct bio *bio = io->bio;
|
||||
unsigned long duration = jiffies - io->start_time;
|
||||
int pending;
|
||||
int pending, cpu;
|
||||
int rw = bio_data_dir(bio);
|
||||
|
||||
preempt_disable();
|
||||
disk_round_stats(dm_disk(md));
|
||||
preempt_enable();
|
||||
dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
|
||||
cpu = part_stat_lock();
|
||||
part_round_stats(cpu, &dm_disk(md)->part0);
|
||||
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
|
||||
part_stat_unlock();
|
||||
|
||||
disk_stat_add(dm_disk(md), ticks[rw], duration);
|
||||
dm_disk(md)->part0.in_flight = pending =
|
||||
atomic_dec_return(&md->pending);
|
||||
|
||||
return !pending;
|
||||
}
|
||||
@ -885,6 +887,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
|
||||
int r = -EIO;
|
||||
int rw = bio_data_dir(bio);
|
||||
struct mapped_device *md = q->queuedata;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* There is no use in forwarding any barrier request since we can't
|
||||
@ -897,8 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
down_read(&md->io_lock);
|
||||
|
||||
disk_stat_inc(dm_disk(md), ios[rw]);
|
||||
disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
|
||||
part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
|
||||
/*
|
||||
* If we're suspended we have to queue
|
||||
@ -1146,7 +1151,7 @@ static void unlock_fs(struct mapped_device *md);
|
||||
|
||||
static void free_dev(struct mapped_device *md)
|
||||
{
|
||||
int minor = md->disk->first_minor;
|
||||
int minor = MINOR(disk_devt(md->disk));
|
||||
|
||||
if (md->suspended_bdev) {
|
||||
unlock_fs(md);
|
||||
@ -1182,7 +1187,7 @@ static void event_callback(void *context)
|
||||
list_splice_init(&md->uevent_list, &uevents);
|
||||
spin_unlock_irqrestore(&md->uevent_lock, flags);
|
||||
|
||||
dm_send_uevents(&uevents, &md->disk->dev.kobj);
|
||||
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
|
||||
|
||||
atomic_inc(&md->event_nr);
|
||||
wake_up(&md->eventq);
|
||||
@ -1267,7 +1272,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
|
||||
|
||||
md = idr_find(&_minor_idr, minor);
|
||||
if (md && (md == MINOR_ALLOCED ||
|
||||
(dm_disk(md)->first_minor != minor) ||
|
||||
(MINOR(disk_devt(dm_disk(md))) != minor) ||
|
||||
test_bit(DMF_FREEING, &md->flags))) {
|
||||
md = NULL;
|
||||
goto out;
|
||||
@ -1318,7 +1323,8 @@ void dm_put(struct mapped_device *md)
|
||||
|
||||
if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
|
||||
map = dm_get_table(md);
|
||||
idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
|
||||
idr_replace(&_minor_idr, MINOR_ALLOCED,
|
||||
MINOR(disk_devt(dm_disk(md))));
|
||||
set_bit(DMF_FREEING, &md->flags);
|
||||
spin_unlock(&_minor_lock);
|
||||
if (!dm_suspended(md)) {
|
||||
@ -1638,7 +1644,7 @@ int dm_resume(struct mapped_device *md)
|
||||
*---------------------------------------------------------------*/
|
||||
void dm_kobject_uevent(struct mapped_device *md)
|
||||
{
|
||||
kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE);
|
||||
kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
uint32_t dm_next_uevent_seq(struct mapped_device *md)
|
||||
|
@ -318,14 +318,18 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
|
||||
mddev_t *mddev = q->queuedata;
|
||||
dev_info_t *tmp_dev;
|
||||
sector_t block;
|
||||
int cpu;
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
|
||||
bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
|
||||
tmp_dev = which_dev(mddev, bio->bi_sector);
|
||||
block = bio->bi_sector >> 1;
|
||||
@ -349,7 +353,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
|
||||
* split it.
|
||||
*/
|
||||
struct bio_pair *bp;
|
||||
bp = bio_split(bio, bio_split_pool,
|
||||
bp = bio_split(bio,
|
||||
((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector);
|
||||
if (linear_make_request(q, &bp->bio1))
|
||||
generic_make_request(&bp->bio1);
|
||||
|
@ -1464,10 +1464,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
|
||||
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
|
||||
goto fail;
|
||||
|
||||
if (rdev->bdev->bd_part)
|
||||
ko = &rdev->bdev->bd_part->dev.kobj;
|
||||
else
|
||||
ko = &rdev->bdev->bd_disk->dev.kobj;
|
||||
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
|
||||
if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
|
||||
kobject_del(&rdev->kobj);
|
||||
goto fail;
|
||||
@ -3470,8 +3467,8 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
|
||||
disk->queue = mddev->queue;
|
||||
add_disk(disk);
|
||||
mddev->gendisk = disk;
|
||||
error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
|
||||
"%s", "md");
|
||||
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
|
||||
&disk_to_dev(disk)->kobj, "%s", "md");
|
||||
mutex_unlock(&disks_mutex);
|
||||
if (error)
|
||||
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
|
||||
@ -3761,7 +3758,7 @@ static int do_md_run(mddev_t * mddev)
|
||||
sysfs_notify(&mddev->kobj, NULL, "array_state");
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_action");
|
||||
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
||||
kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
|
||||
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5549,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev)
|
||||
rcu_read_lock();
|
||||
rdev_for_each_rcu(rdev, mddev) {
|
||||
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
|
||||
curr_events = disk_stat_read(disk, sectors[0]) +
|
||||
disk_stat_read(disk, sectors[1]) -
|
||||
curr_events = part_stat_read(&disk->part0, sectors[0]) +
|
||||
part_stat_read(&disk->part0, sectors[1]) -
|
||||
atomic_read(&disk->sync_io);
|
||||
/* sync IO will cause sync_io to increase before the disk_stats
|
||||
* as sync_io is counted when a request starts, and
|
||||
|
@ -147,6 +147,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
|
||||
struct multipath_bh * mp_bh;
|
||||
struct multipath_info *multipath;
|
||||
const int rw = bio_data_dir(bio);
|
||||
int cpu;
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
@ -158,8 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
|
||||
mp_bh->master_bio = bio;
|
||||
mp_bh->mddev = mddev;
|
||||
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
|
||||
bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
|
||||
mp_bh->path = multipath_map(conf);
|
||||
if (mp_bh->path < 0) {
|
||||
|
@ -399,14 +399,18 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
|
||||
sector_t chunk;
|
||||
sector_t block, rsect;
|
||||
const int rw = bio_data_dir(bio);
|
||||
int cpu;
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
|
||||
bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
|
||||
chunk_size = mddev->chunk_size >> 10;
|
||||
chunk_sects = mddev->chunk_size >> 9;
|
||||
@ -423,7 +427,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
|
||||
/* This is a one page bio that upper layers
|
||||
* refuse to split for us, so we need to split it.
|
||||
*/
|
||||
bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
|
||||
bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
|
||||
if (raid0_make_request(q, &bp->bio1))
|
||||
generic_make_request(&bp->bio1);
|
||||
if (raid0_make_request(q, &bp->bio2))
|
||||
|
@ -779,7 +779,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
struct page **behind_pages = NULL;
|
||||
const int rw = bio_data_dir(bio);
|
||||
const int do_sync = bio_sync(bio);
|
||||
int do_barriers;
|
||||
int cpu, do_barriers;
|
||||
mdk_rdev_t *blocked_rdev;
|
||||
|
||||
/*
|
||||
@ -804,8 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
|
||||
bitmap = mddev->bitmap;
|
||||
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
|
||||
bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
|
||||
/*
|
||||
* make_request() can abort the operation when READA is being
|
||||
@ -1302,9 +1305,6 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
|
||||
sbio->bi_size = r1_bio->sectors << 9;
|
||||
sbio->bi_idx = 0;
|
||||
sbio->bi_phys_segments = 0;
|
||||
sbio->bi_hw_segments = 0;
|
||||
sbio->bi_hw_front_size = 0;
|
||||
sbio->bi_hw_back_size = 0;
|
||||
sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
||||
sbio->bi_flags |= 1 << BIO_UPTODATE;
|
||||
sbio->bi_next = NULL;
|
||||
@ -1790,7 +1790,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
||||
bio->bi_vcnt = 0;
|
||||
bio->bi_idx = 0;
|
||||
bio->bi_phys_segments = 0;
|
||||
bio->bi_hw_segments = 0;
|
||||
bio->bi_size = 0;
|
||||
bio->bi_end_io = NULL;
|
||||
bio->bi_private = NULL;
|
||||
|
@ -789,6 +789,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
mirror_info_t *mirror;
|
||||
r10bio_t *r10_bio;
|
||||
struct bio *read_bio;
|
||||
int cpu;
|
||||
int i;
|
||||
int chunk_sects = conf->chunk_mask + 1;
|
||||
const int rw = bio_data_dir(bio);
|
||||
@ -816,7 +817,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
/* This is a one page bio that upper layers
|
||||
* refuse to split for us, so we need to split it.
|
||||
*/
|
||||
bp = bio_split(bio, bio_split_pool,
|
||||
bp = bio_split(bio,
|
||||
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
|
||||
if (make_request(q, &bp->bio1))
|
||||
generic_make_request(&bp->bio1);
|
||||
@ -843,8 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
*/
|
||||
wait_barrier(conf);
|
||||
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
|
||||
bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
|
||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||
|
||||
@ -1345,9 +1349,6 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
|
||||
tbio->bi_size = r10_bio->sectors << 9;
|
||||
tbio->bi_idx = 0;
|
||||
tbio->bi_phys_segments = 0;
|
||||
tbio->bi_hw_segments = 0;
|
||||
tbio->bi_hw_front_size = 0;
|
||||
tbio->bi_hw_back_size = 0;
|
||||
tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
||||
tbio->bi_flags |= 1 << BIO_UPTODATE;
|
||||
tbio->bi_next = NULL;
|
||||
@ -1947,7 +1948,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
||||
bio->bi_vcnt = 0;
|
||||
bio->bi_idx = 0;
|
||||
bio->bi_phys_segments = 0;
|
||||
bio->bi_hw_segments = 0;
|
||||
bio->bi_size = 0;
|
||||
}
|
||||
|
||||
|
@ -101,6 +101,40 @@
|
||||
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We maintain a biased count of active stripes in the bottom 16 bits of
|
||||
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
|
||||
*/
|
||||
static inline int raid5_bi_phys_segments(struct bio *bio)
|
||||
{
|
||||
return bio->bi_phys_segments & 0xffff;
|
||||
}
|
||||
|
||||
static inline int raid5_bi_hw_segments(struct bio *bio)
|
||||
{
|
||||
return (bio->bi_phys_segments >> 16) & 0xffff;
|
||||
}
|
||||
|
||||
static inline int raid5_dec_bi_phys_segments(struct bio *bio)
|
||||
{
|
||||
--bio->bi_phys_segments;
|
||||
return raid5_bi_phys_segments(bio);
|
||||
}
|
||||
|
||||
static inline int raid5_dec_bi_hw_segments(struct bio *bio)
|
||||
{
|
||||
unsigned short val = raid5_bi_hw_segments(bio);
|
||||
|
||||
--val;
|
||||
bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
|
||||
{
|
||||
bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
|
||||
}
|
||||
|
||||
static inline int raid6_next_disk(int disk, int raid_disks)
|
||||
{
|
||||
disk++;
|
||||
@ -507,7 +541,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
|
||||
while (rbi && rbi->bi_sector <
|
||||
dev->sector + STRIPE_SECTORS) {
|
||||
rbi2 = r5_next_bio(rbi, dev->sector);
|
||||
if (--rbi->bi_phys_segments == 0) {
|
||||
if (!raid5_dec_bi_phys_segments(rbi)) {
|
||||
rbi->bi_next = return_bi;
|
||||
return_bi = rbi;
|
||||
}
|
||||
@ -1725,7 +1759,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
|
||||
if (*bip)
|
||||
bi->bi_next = *bip;
|
||||
*bip = bi;
|
||||
bi->bi_phys_segments ++;
|
||||
bi->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
spin_unlock(&sh->lock);
|
||||
|
||||
@ -1819,7 +1853,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
|
||||
sh->dev[i].sector + STRIPE_SECTORS) {
|
||||
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
|
||||
clear_bit(BIO_UPTODATE, &bi->bi_flags);
|
||||
if (--bi->bi_phys_segments == 0) {
|
||||
if (!raid5_dec_bi_phys_segments(bi)) {
|
||||
md_write_end(conf->mddev);
|
||||
bi->bi_next = *return_bi;
|
||||
*return_bi = bi;
|
||||
@ -1834,7 +1868,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
|
||||
sh->dev[i].sector + STRIPE_SECTORS) {
|
||||
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
|
||||
clear_bit(BIO_UPTODATE, &bi->bi_flags);
|
||||
if (--bi->bi_phys_segments == 0) {
|
||||
if (!raid5_dec_bi_phys_segments(bi)) {
|
||||
md_write_end(conf->mddev);
|
||||
bi->bi_next = *return_bi;
|
||||
*return_bi = bi;
|
||||
@ -1858,7 +1892,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
|
||||
struct bio *nextbi =
|
||||
r5_next_bio(bi, sh->dev[i].sector);
|
||||
clear_bit(BIO_UPTODATE, &bi->bi_flags);
|
||||
if (--bi->bi_phys_segments == 0) {
|
||||
if (!raid5_dec_bi_phys_segments(bi)) {
|
||||
bi->bi_next = *return_bi;
|
||||
*return_bi = bi;
|
||||
}
|
||||
@ -2033,7 +2067,7 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
|
||||
while (wbi && wbi->bi_sector <
|
||||
dev->sector + STRIPE_SECTORS) {
|
||||
wbi2 = r5_next_bio(wbi, dev->sector);
|
||||
if (--wbi->bi_phys_segments == 0) {
|
||||
if (!raid5_dec_bi_phys_segments(wbi)) {
|
||||
md_write_end(conf->mddev);
|
||||
wbi->bi_next = *return_bi;
|
||||
*return_bi = wbi;
|
||||
@ -2814,7 +2848,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
|
||||
copy_data(0, rbi, dev->page, dev->sector);
|
||||
rbi2 = r5_next_bio(rbi, dev->sector);
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (--rbi->bi_phys_segments == 0) {
|
||||
if (!raid5_dec_bi_phys_segments(rbi)) {
|
||||
rbi->bi_next = return_bi;
|
||||
return_bi = rbi;
|
||||
}
|
||||
@ -3155,8 +3189,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
|
||||
if(bi) {
|
||||
conf->retry_read_aligned_list = bi->bi_next;
|
||||
bi->bi_next = NULL;
|
||||
/*
|
||||
* this sets the active strip count to 1 and the processed
|
||||
* strip count to zero (upper 8 bits)
|
||||
*/
|
||||
bi->bi_phys_segments = 1; /* biased count of active stripes */
|
||||
bi->bi_hw_segments = 0; /* count of processed stripes */
|
||||
}
|
||||
|
||||
return bi;
|
||||
@ -3206,8 +3243,7 @@ static int bio_fits_rdev(struct bio *bi)
|
||||
if ((bi->bi_size>>9) > q->max_sectors)
|
||||
return 0;
|
||||
blk_recount_segments(q, bi);
|
||||
if (bi->bi_phys_segments > q->max_phys_segments ||
|
||||
bi->bi_hw_segments > q->max_hw_segments)
|
||||
if (bi->bi_phys_segments > q->max_phys_segments)
|
||||
return 0;
|
||||
|
||||
if (q->merge_bvec_fn)
|
||||
@ -3351,7 +3387,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
|
||||
sector_t logical_sector, last_sector;
|
||||
struct stripe_head *sh;
|
||||
const int rw = bio_data_dir(bi);
|
||||
int remaining;
|
||||
int cpu, remaining;
|
||||
|
||||
if (unlikely(bio_barrier(bi))) {
|
||||
bio_endio(bi, -EOPNOTSUPP);
|
||||
@ -3360,8 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
|
||||
|
||||
md_write_start(mddev, bi);
|
||||
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
|
||||
bio_sectors(bi));
|
||||
part_stat_unlock();
|
||||
|
||||
if (rw == READ &&
|
||||
mddev->reshape_position == MaxSector &&
|
||||
@ -3468,7 +3507,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
|
||||
|
||||
}
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
remaining = --bi->bi_phys_segments;
|
||||
remaining = raid5_dec_bi_phys_segments(bi);
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
if (remaining == 0) {
|
||||
|
||||
@ -3752,7 +3791,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
|
||||
sector += STRIPE_SECTORS,
|
||||
scnt++) {
|
||||
|
||||
if (scnt < raid_bio->bi_hw_segments)
|
||||
if (scnt < raid5_bi_hw_segments(raid_bio))
|
||||
/* already done this stripe */
|
||||
continue;
|
||||
|
||||
@ -3760,7 +3799,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
|
||||
|
||||
if (!sh) {
|
||||
/* failed to get a stripe - must wait */
|
||||
raid_bio->bi_hw_segments = scnt;
|
||||
raid5_set_bi_hw_segments(raid_bio, scnt);
|
||||
conf->retry_read_aligned = raid_bio;
|
||||
return handled;
|
||||
}
|
||||
@ -3768,7 +3807,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
|
||||
set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
|
||||
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
|
||||
release_stripe(sh);
|
||||
raid_bio->bi_hw_segments = scnt;
|
||||
raid5_set_bi_hw_segments(raid_bio, scnt);
|
||||
conf->retry_read_aligned = raid_bio;
|
||||
return handled;
|
||||
}
|
||||
@ -3778,7 +3817,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
|
||||
handled++;
|
||||
}
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
remaining = --raid_bio->bi_phys_segments;
|
||||
remaining = raid5_dec_bi_phys_segments(raid_bio);
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
if (remaining == 0)
|
||||
bio_endio(raid_bio, 0);
|
||||
|
@ -197,7 +197,7 @@ static int mspro_block_bd_open(struct inode *inode, struct file *filp)
|
||||
static int mspro_block_disk_release(struct gendisk *disk)
|
||||
{
|
||||
struct mspro_block_data *msb = disk->private_data;
|
||||
int disk_id = disk->first_minor >> MSPRO_BLOCK_PART_SHIFT;
|
||||
int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
|
||||
|
||||
mutex_lock(&mspro_block_disk_lock);
|
||||
|
||||
@ -828,7 +828,7 @@ static void mspro_block_submit_req(struct request_queue *q)
|
||||
|
||||
if (msb->eject) {
|
||||
while ((req = elv_next_request(q)) != NULL)
|
||||
end_queued_request(req, -ENODEV);
|
||||
__blk_end_request(req, -ENODEV, blk_rq_bytes(req));
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
|
||||
mutex_lock(&open_lock);
|
||||
md->usage--;
|
||||
if (md->usage == 0) {
|
||||
int devidx = md->disk->first_minor >> MMC_SHIFT;
|
||||
int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
|
||||
__clear_bit(devidx, dev_use);
|
||||
|
||||
put_disk(md->disk);
|
||||
|
@ -1005,6 +1005,29 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
|
||||
return ftl_write((void *)dev, buf, block, 1);
|
||||
}
|
||||
|
||||
static int ftl_discardsect(struct mtd_blktrans_dev *dev,
|
||||
unsigned long sector, unsigned nr_sects)
|
||||
{
|
||||
partition_t *part = (void *)dev;
|
||||
uint32_t bsize = 1 << part->header.EraseUnitSize;
|
||||
|
||||
DEBUG(1, "FTL erase sector %ld for %d sectors\n",
|
||||
sector, nr_sects);
|
||||
|
||||
while (nr_sects) {
|
||||
uint32_t old_addr = part->VirtualBlockMap[sector];
|
||||
if (old_addr != 0xffffffff) {
|
||||
part->VirtualBlockMap[sector] = 0xffffffff;
|
||||
part->EUNInfo[old_addr/bsize].Deleted++;
|
||||
if (set_bam_entry(part, old_addr, 0))
|
||||
return -EIO;
|
||||
}
|
||||
nr_sects--;
|
||||
sector++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
/*====================================================================*/
|
||||
|
||||
static void ftl_freepart(partition_t *part)
|
||||
@ -1069,6 +1092,7 @@ static struct mtd_blktrans_ops ftl_tr = {
|
||||
.blksize = SECTOR_SIZE,
|
||||
.readsect = ftl_readsect,
|
||||
.writesect = ftl_writesect,
|
||||
.discard = ftl_discardsect,
|
||||
.getgeo = ftl_getgeo,
|
||||
.add_mtd = ftl_add_mtd,
|
||||
.remove_dev = ftl_remove_dev,
|
||||
|
@ -32,6 +32,14 @@ struct mtd_blkcore_priv {
|
||||
spinlock_t queue_lock;
|
||||
};
|
||||
|
||||
static int blktrans_discard_request(struct request_queue *q,
|
||||
struct request *req)
|
||||
{
|
||||
req->cmd_type = REQ_TYPE_LINUX_BLOCK;
|
||||
req->cmd[0] = REQ_LB_OP_DISCARD;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
||||
struct mtd_blktrans_dev *dev,
|
||||
struct request *req)
|
||||
@ -44,6 +52,10 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
||||
|
||||
buf = req->buffer;
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
|
||||
req->cmd[0] == REQ_LB_OP_DISCARD)
|
||||
return !tr->discard(dev, block, nsect);
|
||||
|
||||
if (!blk_fs_request(req))
|
||||
return 0;
|
||||
|
||||
@ -367,6 +379,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
|
||||
|
||||
tr->blkcore_priv->rq->queuedata = tr;
|
||||
blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
|
||||
if (tr->discard)
|
||||
blk_queue_set_discard(tr->blkcore_priv->rq,
|
||||
blktrans_discard_request);
|
||||
|
||||
tr->blkshift = ffs(tr->blksize) - 1;
|
||||
|
||||
tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
|
||||
|
@ -76,7 +76,8 @@ dasd_devices_show(struct seq_file *m, void *v)
|
||||
/* Print kdev. */
|
||||
if (block->gdp)
|
||||
seq_printf(m, " at (%3d:%6d)",
|
||||
block->gdp->major, block->gdp->first_minor);
|
||||
MAJOR(disk_devt(block->gdp)),
|
||||
MINOR(disk_devt(block->gdp)));
|
||||
else
|
||||
seq_printf(m, " at (???:??????)");
|
||||
/* Print device name. */
|
||||
|
@ -114,7 +114,7 @@ dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
|
||||
found = 0;
|
||||
// test if minor available
|
||||
list_for_each_entry(entry, &dcssblk_devices, lh)
|
||||
if (minor == entry->gd->first_minor)
|
||||
if (minor == MINOR(disk_devt(entry->gd)))
|
||||
found++;
|
||||
if (!found) break; // got unused minor
|
||||
}
|
||||
@ -397,7 +397,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||
goto unload_seg;
|
||||
}
|
||||
sprintf(dev_info->gd->disk_name, "dcssblk%d",
|
||||
dev_info->gd->first_minor);
|
||||
MINOR(disk_devt(dev_info->gd)));
|
||||
list_add_tail(&dev_info->lh, &dcssblk_devices);
|
||||
|
||||
if (!try_module_get(THIS_MODULE)) {
|
||||
|
@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
|
||||
srbcmd->id = cpu_to_le32(scmd_id(cmd));
|
||||
srbcmd->lun = cpu_to_le32(cmd->device->lun);
|
||||
srbcmd->flags = cpu_to_le32(flag);
|
||||
timeout = cmd->timeout_per_command/HZ;
|
||||
timeout = cmd->request->timeout/HZ;
|
||||
if (timeout == 0)
|
||||
timeout = 1;
|
||||
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
|
||||
|
@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
|
||||
|
||||
/* use request field to save the ptr. to completion struct. */
|
||||
scp->request = (struct request *)&wait;
|
||||
scp->timeout_per_command = timeout*HZ;
|
||||
scp->cmd_len = 12;
|
||||
scp->cmnd = cmnd;
|
||||
cmndinfo.priority = IOCTL_PRI;
|
||||
@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
|
||||
register Scsi_Cmnd *pscp;
|
||||
register Scsi_Cmnd *nscp;
|
||||
ulong flags;
|
||||
unchar b, t;
|
||||
|
||||
TRACE(("gdth_putq() priority %d\n",priority));
|
||||
spin_lock_irqsave(&ha->smp_lock, flags);
|
||||
|
||||
if (!cmndinfo->internal_command) {
|
||||
if (!cmndinfo->internal_command)
|
||||
cmndinfo->priority = priority;
|
||||
b = scp->device->channel;
|
||||
t = scp->device->id;
|
||||
if (priority >= DEFAULT_PRI) {
|
||||
if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
|
||||
(b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
|
||||
TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
|
||||
cmndinfo->timeout = gdth_update_timeout(scp, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ha->req_first==NULL) {
|
||||
ha->req_first = scp; /* queue was empty */
|
||||
@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
|
||||
return ((const char *)ha->binfo.type_string);
|
||||
}
|
||||
|
||||
static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
|
||||
{
|
||||
gdth_ha_str *ha = shost_priv(scp->device->host);
|
||||
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
|
||||
unchar b, t;
|
||||
ulong flags;
|
||||
enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
|
||||
|
||||
TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
|
||||
b = scp->device->channel;
|
||||
t = scp->device->id;
|
||||
|
||||
/*
|
||||
* We don't really honor the command timeout, but we try to
|
||||
* honor 6 times of the actual command timeout! So reset the
|
||||
* timer if this is less than 6th timeout on this command!
|
||||
*/
|
||||
if (++cmndinfo->timeout_count < 6)
|
||||
retval = BLK_EH_RESET_TIMER;
|
||||
|
||||
/* Reset the timeout if it is locked IO */
|
||||
spin_lock_irqsave(&ha->smp_lock, flags);
|
||||
if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
|
||||
(b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
|
||||
TRACE2(("%s(): locked IO, reset timeout\n", __func__));
|
||||
retval = BLK_EH_RESET_TIMER;
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
|
||||
{
|
||||
gdth_ha_str *ha = shost_priv(scp->device->host);
|
||||
@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
|
||||
BUG_ON(!cmndinfo);
|
||||
|
||||
scp->scsi_done = done;
|
||||
gdth_update_timeout(scp, scp->timeout_per_command * 6);
|
||||
cmndinfo->timeout_count = 0;
|
||||
cmndinfo->priority = DEFAULT_PRI;
|
||||
|
||||
return __gdth_queuecommand(ha, scp, cmndinfo);
|
||||
@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
|
||||
ha->hdr[j].lock = 1;
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
gdth_wait_completion(ha, ha->bus_cnt, j);
|
||||
gdth_stop_timeout(ha, ha->bus_cnt, j);
|
||||
} else {
|
||||
spin_lock_irqsave(&ha->smp_lock, flags);
|
||||
ha->hdr[j].lock = 0;
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
gdth_start_timeout(ha, ha->bus_cnt, j);
|
||||
gdth_next(ha);
|
||||
}
|
||||
}
|
||||
@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
|
||||
spin_lock_irqsave(&ha->smp_lock, flags);
|
||||
ha->raw[i].lock = 1;
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
for (j = 0; j < ha->tid_cnt; ++j) {
|
||||
for (j = 0; j < ha->tid_cnt; ++j)
|
||||
gdth_wait_completion(ha, i, j);
|
||||
gdth_stop_timeout(ha, i, j);
|
||||
}
|
||||
} else {
|
||||
spin_lock_irqsave(&ha->smp_lock, flags);
|
||||
ha->raw[i].lock = 0;
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
for (j = 0; j < ha->tid_cnt; ++j) {
|
||||
gdth_start_timeout(ha, i, j);
|
||||
for (j = 0; j < ha->tid_cnt; ++j)
|
||||
gdth_next(ha);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
|
||||
.slave_configure = gdth_slave_configure,
|
||||
.bios_param = gdth_bios_param,
|
||||
.proc_info = gdth_proc_info,
|
||||
.eh_timed_out = gdth_timed_out,
|
||||
.proc_name = "gdth",
|
||||
.can_queue = GDTH_MAXCMDS,
|
||||
.this_id = -1,
|
||||
|
@ -916,7 +916,7 @@ typedef struct {
|
||||
gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
|
||||
dma_addr_t sense_paddr; /* sense dma-addr */
|
||||
unchar priority;
|
||||
int timeout;
|
||||
int timeout_count; /* # of timeout calls */
|
||||
volatile int wait_for_completion;
|
||||
ushort status;
|
||||
ulong32 info;
|
||||
|
@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
}
|
||||
|
||||
static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
|
||||
{
|
||||
ulong flags;
|
||||
Scsi_Cmnd *scp;
|
||||
unchar b, t;
|
||||
|
||||
spin_lock_irqsave(&ha->smp_lock, flags);
|
||||
|
||||
for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
|
||||
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
|
||||
if (!cmndinfo->internal_command) {
|
||||
b = scp->device->channel;
|
||||
t = scp->device->id;
|
||||
if (t == (unchar)id && b == (unchar)busnum) {
|
||||
TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
|
||||
cmndinfo->timeout = gdth_update_timeout(scp, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
}
|
||||
|
||||
static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
|
||||
{
|
||||
ulong flags;
|
||||
Scsi_Cmnd *scp;
|
||||
unchar b, t;
|
||||
|
||||
spin_lock_irqsave(&ha->smp_lock, flags);
|
||||
|
||||
for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
|
||||
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
|
||||
if (!cmndinfo->internal_command) {
|
||||
b = scp->device->channel;
|
||||
t = scp->device->id;
|
||||
if (t == (unchar)id && b == (unchar)busnum) {
|
||||
TRACE2(("gdth_start_timeout(): update_timeout()\n"));
|
||||
gdth_update_timeout(scp, cmndinfo->timeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
}
|
||||
|
||||
static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
|
||||
{
|
||||
int oldto;
|
||||
|
||||
oldto = scp->timeout_per_command;
|
||||
scp->timeout_per_command = timeout;
|
||||
|
||||
if (timeout == 0) {
|
||||
del_timer(&scp->eh_timeout);
|
||||
scp->eh_timeout.data = (unsigned long) NULL;
|
||||
scp->eh_timeout.expires = 0;
|
||||
} else {
|
||||
if (scp->eh_timeout.data != (unsigned long) NULL)
|
||||
del_timer(&scp->eh_timeout);
|
||||
scp->eh_timeout.data = (unsigned long) scp;
|
||||
scp->eh_timeout.expires = jiffies + timeout;
|
||||
add_timer(&scp->eh_timeout);
|
||||
}
|
||||
|
||||
return oldto;
|
||||
}
|
||||
|
@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
|
||||
ulong64 *paddr);
|
||||
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
|
||||
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
|
||||
static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
|
||||
static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
|
||||
static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
|
||||
init_event_struct(evt_struct,
|
||||
handle_cmd_rsp,
|
||||
VIOSRP_SRP_FORMAT,
|
||||
cmnd->timeout_per_command/HZ);
|
||||
cmnd->request->timeout/HZ);
|
||||
|
||||
evt_struct->cmnd = cmnd;
|
||||
evt_struct->cmnd_done = done;
|
||||
|
@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
|
||||
pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
|
||||
pc->scsi_cmd = cmd;
|
||||
pc->done = done;
|
||||
pc->timeout = jiffies + cmd->timeout_per_command;
|
||||
pc->timeout = jiffies + cmd->request->timeout;
|
||||
|
||||
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
|
||||
printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
|
||||
|
@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
|
||||
sdev->no_uld_attach = 1;
|
||||
}
|
||||
if (ipr_is_vset_device(res)) {
|
||||
sdev->timeout = IPR_VSET_RW_TIMEOUT;
|
||||
blk_queue_rq_timeout(sdev->request_queue,
|
||||
IPR_VSET_RW_TIMEOUT);
|
||||
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
|
||||
}
|
||||
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
|
||||
|
@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
|
||||
scb->cmd.dcdb.segment_4G = 0;
|
||||
scb->cmd.dcdb.enhanced_sg = 0;
|
||||
|
||||
TimeOut = scb->scsi_cmd->timeout_per_command;
|
||||
TimeOut = scb->scsi_cmd->request->timeout;
|
||||
|
||||
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
|
||||
if (!scb->sg_len) {
|
||||
|
@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
|
||||
scsi_queue_work(conn->session->host, &conn->xmitwork);
|
||||
}
|
||||
|
||||
static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
|
||||
static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct iscsi_cls_session *cls_session;
|
||||
struct iscsi_session *session;
|
||||
struct iscsi_conn *conn;
|
||||
enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
|
||||
enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
|
||||
|
||||
cls_session = starget_to_session(scsi_target(scmd->device));
|
||||
session = cls_session->dd_data;
|
||||
@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
|
||||
* We are probably in the middle of iscsi recovery so let
|
||||
* that complete and handle the error.
|
||||
*/
|
||||
rc = EH_RESET_TIMER;
|
||||
rc = BLK_EH_RESET_TIMER;
|
||||
goto done;
|
||||
}
|
||||
|
||||
conn = session->leadconn;
|
||||
if (!conn) {
|
||||
/* In the middle of shuting down */
|
||||
rc = EH_RESET_TIMER;
|
||||
rc = BLK_EH_RESET_TIMER;
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
|
||||
*/
|
||||
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
|
||||
(conn->ping_timeout * HZ), jiffies))
|
||||
rc = EH_RESET_TIMER;
|
||||
rc = BLK_EH_RESET_TIMER;
|
||||
/*
|
||||
* if we are about to check the transport then give the command
|
||||
* more time
|
||||
*/
|
||||
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
|
||||
jiffies))
|
||||
rc = EH_RESET_TIMER;
|
||||
rc = BLK_EH_RESET_TIMER;
|
||||
/* if in the middle of checking the transport then give us more time */
|
||||
if (conn->ping_task)
|
||||
rc = EH_RESET_TIMER;
|
||||
rc = BLK_EH_RESET_TIMER;
|
||||
done:
|
||||
spin_unlock(&session->lock);
|
||||
debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
|
||||
debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
|
||||
"timer reset" : "nh");
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
|
||||
|
||||
/* Bounce SCSI-initiated commands to the SCSI EH */
|
||||
if (qc->scsicmd) {
|
||||
scsi_req_abort_cmd(qc->scsicmd);
|
||||
blk_abort_request(qc->scsicmd->request);
|
||||
scsi_schedule_eh(qc->scsicmd->device->host);
|
||||
return;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
|
||||
int sas_register_ports(struct sas_ha_struct *sas_ha);
|
||||
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
|
||||
|
||||
enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
|
||||
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
|
||||
|
||||
int sas_init_queue(struct sas_ha_struct *sas_ha);
|
||||
int sas_init_events(struct sas_ha_struct *sas_ha);
|
||||
|
@ -673,43 +673,43 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
|
||||
return;
|
||||
}
|
||||
|
||||
enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
|
||||
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct sas_task *task = TO_SAS_TASK(cmd);
|
||||
unsigned long flags;
|
||||
|
||||
if (!task) {
|
||||
cmd->timeout_per_command /= 2;
|
||||
cmd->request->timeout /= 2;
|
||||
SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
|
||||
cmd, task, (cmd->timeout_per_command ?
|
||||
"EH_RESET_TIMER" : "EH_NOT_HANDLED"));
|
||||
if (!cmd->timeout_per_command)
|
||||
return EH_NOT_HANDLED;
|
||||
return EH_RESET_TIMER;
|
||||
cmd, task, (cmd->request->timeout ?
|
||||
"BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
|
||||
if (!cmd->request->timeout)
|
||||
return BLK_EH_NOT_HANDLED;
|
||||
return BLK_EH_RESET_TIMER;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&task->task_state_lock, flags);
|
||||
BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
|
||||
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
|
||||
cmd, task);
|
||||
return EH_HANDLED;
|
||||
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
|
||||
"BLK_EH_HANDLED\n", cmd, task);
|
||||
return BLK_EH_HANDLED;
|
||||
}
|
||||
if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
|
||||
"EH_RESET_TIMER\n",
|
||||
"BLK_EH_RESET_TIMER\n",
|
||||
cmd, task);
|
||||
return EH_RESET_TIMER;
|
||||
return BLK_EH_RESET_TIMER;
|
||||
}
|
||||
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
|
||||
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
|
||||
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
|
||||
cmd, task);
|
||||
|
||||
return EH_NOT_HANDLED;
|
||||
return BLK_EH_NOT_HANDLED;
|
||||
}
|
||||
|
||||
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
|
||||
@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
|
||||
return;
|
||||
}
|
||||
|
||||
scsi_req_abort_cmd(sc);
|
||||
blk_abort_request(sc->request);
|
||||
scsi_schedule_eh(sc->device->host);
|
||||
}
|
||||
|
||||
|
@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
|
||||
* cmd has not been completed within the timeout period.
|
||||
*/
|
||||
static enum
|
||||
scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
|
||||
blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
|
||||
struct megasas_instance *instance;
|
||||
@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
|
||||
|
||||
if (time_after(jiffies, scmd->jiffies_at_alloc +
|
||||
(MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
|
||||
return EH_NOT_HANDLED;
|
||||
return BLK_EH_NOT_HANDLED;
|
||||
}
|
||||
|
||||
instance = cmd->instance;
|
||||
@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
|
||||
|
||||
spin_unlock_irqrestore(instance->host->host_lock, flags);
|
||||
}
|
||||
return EH_RESET_TIMER;
|
||||
return BLK_EH_RESET_TIMER;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
|
||||
**
|
||||
**----------------------------------------------------
|
||||
*/
|
||||
if (np->settle_time && cmd->timeout_per_command >= HZ) {
|
||||
u_long tlimit = jiffies + cmd->timeout_per_command - HZ;
|
||||
if (np->settle_time && cmd->request->timeout >= HZ) {
|
||||
u_long tlimit = jiffies + cmd->request->timeout - HZ;
|
||||
if (time_after(np->settle_time, tlimit))
|
||||
np->settle_time = tlimit;
|
||||
}
|
||||
|
@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
|
||||
|
||||
/* Set ISP command timeout. */
|
||||
pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
|
||||
pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
|
||||
|
||||
/* Set device target ID and LUN */
|
||||
pkt->lun = SCSI_LUN_32(cmd);
|
||||
@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
|
||||
|
||||
/* Set ISP command timeout. */
|
||||
pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
|
||||
pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
|
||||
|
||||
/* Set device target ID and LUN */
|
||||
pkt->lun = SCSI_LUN_32(cmd);
|
||||
|
@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
|
||||
"dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
|
||||
cmd, jiffies, cmd->timeout_per_command / HZ,
|
||||
cmd, jiffies, cmd->request->timeout / HZ,
|
||||
ha->dpc_flags, cmd->result, cmd->allowed));
|
||||
|
||||
/* FIXME: wait for hba to go online */
|
||||
@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
|
||||
"to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
|
||||
ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
|
||||
ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
|
||||
ha->dpc_flags, cmd->result, cmd->allowed));
|
||||
|
||||
stat = qla4xxx_reset_target(ha, ddb_entry);
|
||||
|
@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
|
||||
unsigned long flags;
|
||||
|
||||
cmd->device = dev;
|
||||
init_timer(&cmd->eh_timeout);
|
||||
INIT_LIST_HEAD(&cmd->list);
|
||||
spin_lock_irqsave(&dev->list_lock, flags);
|
||||
list_add_tail(&cmd->list, &dev->cmd_list);
|
||||
@ -652,14 +651,19 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
||||
unsigned long timeout;
|
||||
int rtn = 0;
|
||||
|
||||
/*
|
||||
* We will use a queued command if possible, otherwise we will
|
||||
* emulate the queuing and calling of completion function ourselves.
|
||||
*/
|
||||
atomic_inc(&cmd->device->iorequest_cnt);
|
||||
|
||||
/* check if the device is still usable */
|
||||
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
|
||||
/* in SDEV_DEL we error all commands. DID_NO_CONNECT
|
||||
* returns an immediate error upwards, and signals
|
||||
* that the device is no longer present */
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
atomic_inc(&cmd->device->iorequest_cnt);
|
||||
__scsi_done(cmd);
|
||||
scsi_done(cmd);
|
||||
/* return 0 (because the command has been processed) */
|
||||
goto out;
|
||||
}
|
||||
@ -672,6 +676,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
||||
* future requests should not occur until the device
|
||||
* transitions out of the suspend state.
|
||||
*/
|
||||
|
||||
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
|
||||
|
||||
SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
|
||||
@ -714,20 +719,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
||||
host->resetting = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* AK: unlikely race here: for some reason the timer could
|
||||
* expire before the serial number is set up below.
|
||||
*/
|
||||
scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
|
||||
|
||||
scsi_log_send(cmd);
|
||||
|
||||
/*
|
||||
* We will use a queued command if possible, otherwise we will
|
||||
* emulate the queuing and calling of completion function ourselves.
|
||||
*/
|
||||
atomic_inc(&cmd->device->iorequest_cnt);
|
||||
|
||||
/*
|
||||
* Before we queue this command, check if the command
|
||||
* length exceeds what the host adapter can handle.
|
||||
@ -744,6 +737,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
||||
}
|
||||
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
/*
|
||||
* AK: unlikely race here: for some reason the timer could
|
||||
* expire before the serial number is set up below.
|
||||
*
|
||||
* TODO: kill serial or move to blk layer
|
||||
*/
|
||||
scsi_cmd_get_serial(host, cmd);
|
||||
|
||||
if (unlikely(host->shost_state == SHOST_DEL)) {
|
||||
@ -754,12 +753,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
||||
}
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
if (rtn) {
|
||||
if (scsi_delete_timer(cmd)) {
|
||||
atomic_inc(&cmd->device->iodone_cnt);
|
||||
scsi_queue_insert(cmd,
|
||||
(rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
|
||||
rtn : SCSI_MLQUEUE_HOST_BUSY);
|
||||
}
|
||||
scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
|
||||
rtn : SCSI_MLQUEUE_HOST_BUSY);
|
||||
SCSI_LOG_MLQUEUE(3,
|
||||
printk("queuecommand : request rejected\n"));
|
||||
}
|
||||
@ -769,24 +764,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
||||
return rtn;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_req_abort_cmd -- Request command recovery for the specified command
|
||||
* @cmd: pointer to the SCSI command of interest
|
||||
*
|
||||
* This function requests that SCSI Core start recovery for the
|
||||
* command by deleting the timer and adding the command to the eh
|
||||
* queue. It can be called by either LLDDs or SCSI Core. LLDDs who
|
||||
* implement their own error recovery MAY ignore the timeout event if
|
||||
* they generated scsi_req_abort_cmd.
|
||||
*/
|
||||
void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
|
||||
{
|
||||
if (!scsi_delete_timer(cmd))
|
||||
return;
|
||||
scsi_times_out(cmd);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_req_abort_cmd);
|
||||
|
||||
/**
|
||||
* scsi_done - Enqueue the finished SCSI command into the done queue.
|
||||
* @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
|
||||
@ -802,42 +779,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
|
||||
*/
|
||||
static void scsi_done(struct scsi_cmnd *cmd)
|
||||
{
|
||||
/*
|
||||
* We don't have to worry about this one timing out anymore.
|
||||
* If we are unable to remove the timer, then the command
|
||||
* has already timed out. In which case, we have no choice but to
|
||||
* let the timeout function run, as we have no idea where in fact
|
||||
* that function could really be. It might be on another processor,
|
||||
* etc, etc.
|
||||
*/
|
||||
if (!scsi_delete_timer(cmd))
|
||||
return;
|
||||
__scsi_done(cmd);
|
||||
}
|
||||
|
||||
/* Private entry to scsi_done() to complete a command when the timer
|
||||
* isn't running --- used by scsi_times_out */
|
||||
void __scsi_done(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request *rq = cmd->request;
|
||||
|
||||
/*
|
||||
* Set the serial numbers back to zero
|
||||
*/
|
||||
cmd->serial_number = 0;
|
||||
|
||||
atomic_inc(&cmd->device->iodone_cnt);
|
||||
if (cmd->result)
|
||||
atomic_inc(&cmd->device->ioerr_cnt);
|
||||
|
||||
BUG_ON(!rq);
|
||||
|
||||
/*
|
||||
* The uptodate/nbytes values don't matter, as we allow partial
|
||||
* completes and thus will check this in the softirq callback
|
||||
*/
|
||||
rq->completion_data = cmd;
|
||||
blk_complete_request(rq);
|
||||
blk_complete_request(cmd->request);
|
||||
}
|
||||
|
||||
/* Move this to a header if it becomes more generally useful */
|
||||
|
@ -111,70 +111,9 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_add_timer - Start timeout timer for a single scsi command.
|
||||
* @scmd: scsi command that is about to start running.
|
||||
* @timeout: amount of time to allow this command to run.
|
||||
* @complete: timeout function to call if timer isn't canceled.
|
||||
*
|
||||
* Notes:
|
||||
* This should be turned into an inline function. Each scsi command
|
||||
* has its own timer, and as it is added to the queue, we set up the
|
||||
* timer. When the command completes, we cancel the timer.
|
||||
*/
|
||||
void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
|
||||
void (*complete)(struct scsi_cmnd *))
|
||||
{
|
||||
|
||||
/*
|
||||
* If the clock was already running for this command, then
|
||||
* first delete the timer. The timer handling code gets rather
|
||||
* confused if we don't do this.
|
||||
*/
|
||||
if (scmd->eh_timeout.function)
|
||||
del_timer(&scmd->eh_timeout);
|
||||
|
||||
scmd->eh_timeout.data = (unsigned long)scmd;
|
||||
scmd->eh_timeout.expires = jiffies + timeout;
|
||||
scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
|
||||
|
||||
SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
|
||||
" %d, (%p)\n", __func__,
|
||||
scmd, timeout, complete));
|
||||
|
||||
add_timer(&scmd->eh_timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_delete_timer - Delete/cancel timer for a given function.
|
||||
* @scmd: Cmd that we are canceling timer for
|
||||
*
|
||||
* Notes:
|
||||
* This should be turned into an inline function.
|
||||
*
|
||||
* Return value:
|
||||
* 1 if we were able to detach the timer. 0 if we blew it, and the
|
||||
* timer function has already started to run.
|
||||
*/
|
||||
int scsi_delete_timer(struct scsi_cmnd *scmd)
|
||||
{
|
||||
int rtn;
|
||||
|
||||
rtn = del_timer(&scmd->eh_timeout);
|
||||
|
||||
SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
|
||||
" rtn: %d\n", __func__,
|
||||
scmd, rtn));
|
||||
|
||||
scmd->eh_timeout.data = (unsigned long)NULL;
|
||||
scmd->eh_timeout.function = NULL;
|
||||
|
||||
return rtn;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_times_out - Timeout function for normal scsi commands.
|
||||
* @scmd: Cmd that is timing out.
|
||||
* @req: request that is timing out.
|
||||
*
|
||||
* Notes:
|
||||
* We do not need to lock this. There is the potential for a race
|
||||
@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
|
||||
* normal completion function determines that the timer has already
|
||||
* fired, then it mustn't do anything.
|
||||
*/
|
||||
void scsi_times_out(struct scsi_cmnd *scmd)
|
||||
enum blk_eh_timer_return scsi_times_out(struct request *req)
|
||||
{
|
||||
enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
|
||||
struct scsi_cmnd *scmd = req->special;
|
||||
enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
|
||||
enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
|
||||
|
||||
scsi_log_completion(scmd, TIMEOUT_ERROR);
|
||||
|
||||
@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd)
|
||||
eh_timed_out = NULL;
|
||||
|
||||
if (eh_timed_out)
|
||||
switch (eh_timed_out(scmd)) {
|
||||
case EH_HANDLED:
|
||||
__scsi_done(scmd);
|
||||
return;
|
||||
case EH_RESET_TIMER:
|
||||
scsi_add_timer(scmd, scmd->timeout_per_command,
|
||||
scsi_times_out);
|
||||
return;
|
||||
case EH_NOT_HANDLED:
|
||||
rtn = eh_timed_out(scmd);
|
||||
switch (rtn) {
|
||||
case BLK_EH_NOT_HANDLED:
|
||||
break;
|
||||
default:
|
||||
return rtn;
|
||||
}
|
||||
|
||||
if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
|
||||
scmd->result |= DID_TIME_OUT << 16;
|
||||
__scsi_done(scmd);
|
||||
return BLK_EH_HANDLED;
|
||||
}
|
||||
|
||||
return BLK_EH_NOT_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
|
||||
|
||||
blk_rq_init(NULL, &req);
|
||||
scmd->request = &req;
|
||||
memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
|
||||
|
||||
scmd->cmnd = req.cmd;
|
||||
|
||||
@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
|
||||
|
||||
scmd->sc_data_direction = DMA_BIDIRECTIONAL;
|
||||
|
||||
init_timer(&scmd->eh_timeout);
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
shost->tmf_in_progress = 1;
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
|
@ -1181,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
|
||||
|
||||
cmd->transfersize = req->data_len;
|
||||
cmd->allowed = req->retries;
|
||||
cmd->timeout_per_command = req->timeout;
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
|
||||
@ -1416,17 +1415,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
|
||||
spin_unlock(shost->host_lock);
|
||||
spin_lock(sdev->request_queue->queue_lock);
|
||||
|
||||
__scsi_done(cmd);
|
||||
blk_complete_request(req);
|
||||
}
|
||||
|
||||
static void scsi_softirq_done(struct request *rq)
|
||||
{
|
||||
struct scsi_cmnd *cmd = rq->completion_data;
|
||||
unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
|
||||
struct scsi_cmnd *cmd = rq->special;
|
||||
unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
|
||||
int disposition;
|
||||
|
||||
INIT_LIST_HEAD(&cmd->eh_entry);
|
||||
|
||||
/*
|
||||
* Set the serial numbers back to zero
|
||||
*/
|
||||
cmd->serial_number = 0;
|
||||
|
||||
atomic_inc(&cmd->device->iodone_cnt);
|
||||
if (cmd->result)
|
||||
atomic_inc(&cmd->device->ioerr_cnt);
|
||||
|
||||
disposition = scsi_decide_disposition(cmd);
|
||||
if (disposition != SUCCESS &&
|
||||
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
|
||||
@ -1675,6 +1683,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
|
||||
|
||||
blk_queue_prep_rq(q, scsi_prep_fn);
|
||||
blk_queue_softirq_done(q, scsi_softirq_done);
|
||||
blk_queue_rq_timed_out(q, scsi_times_out);
|
||||
return q;
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <linux/device.h>
|
||||
|
||||
struct request_queue;
|
||||
struct request;
|
||||
struct scsi_cmnd;
|
||||
struct scsi_device;
|
||||
struct scsi_host_template;
|
||||
@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
|
||||
extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
|
||||
extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
|
||||
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
|
||||
extern void __scsi_done(struct scsi_cmnd *cmd);
|
||||
#ifdef CONFIG_SCSI_LOGGING
|
||||
void scsi_log_send(struct scsi_cmnd *cmd);
|
||||
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
|
||||
@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void);
|
||||
extern void scsi_exit_devinfo(void);
|
||||
|
||||
/* scsi_error.c */
|
||||
extern void scsi_add_timer(struct scsi_cmnd *, int,
|
||||
void (*)(struct scsi_cmnd *));
|
||||
extern int scsi_delete_timer(struct scsi_cmnd *);
|
||||
extern void scsi_times_out(struct scsi_cmnd *cmd);
|
||||
extern enum blk_eh_timer_return scsi_times_out(struct request *req);
|
||||
extern int scsi_error_handler(void *host);
|
||||
extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
|
||||
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
|
||||
|
@ -560,12 +560,15 @@ sdev_rd_attr (vendor, "%.8s\n");
|
||||
sdev_rd_attr (model, "%.16s\n");
|
||||
sdev_rd_attr (rev, "%.4s\n");
|
||||
|
||||
/*
|
||||
* TODO: can we make these symlinks to the block layer ones?
|
||||
*/
|
||||
static ssize_t
|
||||
sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_device *sdev;
|
||||
sdev = to_scsi_device(dev);
|
||||
return snprintf (buf, 20, "%d\n", sdev->timeout / HZ);
|
||||
return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -576,7 +579,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
|
||||
int timeout;
|
||||
sdev = to_scsi_device(dev);
|
||||
sscanf (buf, "%d\n", &timeout);
|
||||
sdev->timeout = timeout * HZ;
|
||||
blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
|
||||
|
@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
|
||||
int err;
|
||||
|
||||
dprintk("%lx %u\n", uaddr, len);
|
||||
err = blk_rq_map_user(q, rq, (void *)uaddr, len);
|
||||
err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
|
||||
if (err) {
|
||||
/*
|
||||
* TODO: need to fixup sg_tablesize, max_segment_size,
|
||||
|
@ -1950,15 +1950,15 @@ static int fc_vport_match(struct attribute_container *cont,
|
||||
* Notes:
|
||||
* This routine assumes no locks are held on entry.
|
||||
*/
|
||||
static enum scsi_eh_timer_return
|
||||
static enum blk_eh_timer_return
|
||||
fc_timed_out(struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
|
||||
|
||||
if (rport->port_state == FC_PORTSTATE_BLOCKED)
|
||||
return EH_RESET_TIMER;
|
||||
return BLK_EH_RESET_TIMER;
|
||||
|
||||
return EH_NOT_HANDLED;
|
||||
return BLK_EH_NOT_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -86,6 +86,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
|
||||
|
||||
#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
|
||||
#define SD_MINORS 16
|
||||
#else
|
||||
#define SD_MINORS 0
|
||||
#endif
|
||||
|
||||
static int sd_revalidate_disk(struct gendisk *);
|
||||
static int sd_probe(struct device *);
|
||||
static int sd_remove(struct device *);
|
||||
@ -159,7 +165,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
return -EINVAL;
|
||||
}
|
||||
sd_revalidate_disk(sdkp->disk);
|
||||
revalidate_disk(sdkp->disk);
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -377,7 +383,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
|
||||
sector_t block = rq->sector;
|
||||
sector_t threshold;
|
||||
unsigned int this_count = rq->nr_sectors;
|
||||
unsigned int timeout = sdp->timeout;
|
||||
int ret;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
@ -578,7 +583,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
|
||||
SCpnt->transfersize = sdp->sector_size;
|
||||
SCpnt->underflow = this_count << 9;
|
||||
SCpnt->allowed = SD_MAX_RETRIES;
|
||||
SCpnt->timeout_per_command = timeout;
|
||||
|
||||
/*
|
||||
* This indicates that the command is ready from our end to be
|
||||
@ -910,7 +914,7 @@ static void sd_rescan(struct device *dev)
|
||||
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
|
||||
|
||||
if (sdkp) {
|
||||
sd_revalidate_disk(sdkp->disk);
|
||||
revalidate_disk(sdkp->disk);
|
||||
scsi_disk_put(sdkp);
|
||||
}
|
||||
}
|
||||
@ -1763,6 +1767,52 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sd_format_disk_name - format disk name
|
||||
* @prefix: name prefix - ie. "sd" for SCSI disks
|
||||
* @index: index of the disk to format name for
|
||||
* @buf: output buffer
|
||||
* @buflen: length of the output buffer
|
||||
*
|
||||
* SCSI disk names starts at sda. The 26th device is sdz and the
|
||||
* 27th is sdaa. The last one for two lettered suffix is sdzz
|
||||
* which is followed by sdaaa.
|
||||
*
|
||||
* This is basically 26 base counting with one extra 'nil' entry
|
||||
* at the beggining from the second digit on and can be
|
||||
* determined using similar method as 26 base conversion with the
|
||||
* index shifted -1 after each digit is computed.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Don't care.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, -errno on failure.
|
||||
*/
|
||||
static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
|
||||
{
|
||||
const int base = 'z' - 'a' + 1;
|
||||
char *begin = buf + strlen(prefix);
|
||||
char *end = buf + buflen;
|
||||
char *p;
|
||||
int unit;
|
||||
|
||||
p = end - 1;
|
||||
*p = '\0';
|
||||
unit = base;
|
||||
do {
|
||||
if (p == begin)
|
||||
return -EINVAL;
|
||||
*--p = 'a' + (index % unit);
|
||||
index = (index / unit) - 1;
|
||||
} while (index >= 0);
|
||||
|
||||
memmove(begin, p, end - p);
|
||||
memcpy(buf, prefix, strlen(prefix));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sd_probe - called during driver initialization and whenever a
|
||||
* new scsi device is attached to the system. It is called once
|
||||
@ -1801,7 +1851,7 @@ static int sd_probe(struct device *dev)
|
||||
if (!sdkp)
|
||||
goto out;
|
||||
|
||||
gd = alloc_disk(16);
|
||||
gd = alloc_disk(SD_MINORS);
|
||||
if (!gd)
|
||||
goto out_free;
|
||||
|
||||
@ -1815,8 +1865,8 @@ static int sd_probe(struct device *dev)
|
||||
if (error)
|
||||
goto out_put;
|
||||
|
||||
error = -EBUSY;
|
||||
if (index >= SD_MAX_DISKS)
|
||||
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
|
||||
if (error)
|
||||
goto out_free_index;
|
||||
|
||||
sdkp->device = sdp;
|
||||
@ -1826,11 +1876,12 @@ static int sd_probe(struct device *dev)
|
||||
sdkp->openers = 0;
|
||||
sdkp->previous_state = 1;
|
||||
|
||||
if (!sdp->timeout) {
|
||||
if (!sdp->request_queue->rq_timeout) {
|
||||
if (sdp->type != TYPE_MOD)
|
||||
sdp->timeout = SD_TIMEOUT;
|
||||
blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
|
||||
else
|
||||
sdp->timeout = SD_MOD_TIMEOUT;
|
||||
blk_queue_rq_timeout(sdp->request_queue,
|
||||
SD_MOD_TIMEOUT);
|
||||
}
|
||||
|
||||
device_initialize(&sdkp->dev);
|
||||
@ -1843,24 +1894,12 @@ static int sd_probe(struct device *dev)
|
||||
|
||||
get_device(&sdp->sdev_gendev);
|
||||
|
||||
gd->major = sd_major((index & 0xf0) >> 4);
|
||||
gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
|
||||
gd->minors = 16;
|
||||
gd->fops = &sd_fops;
|
||||
|
||||
if (index < 26) {
|
||||
sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
|
||||
} else if (index < (26 + 1) * 26) {
|
||||
sprintf(gd->disk_name, "sd%c%c",
|
||||
'a' + index / 26 - 1,'a' + index % 26);
|
||||
} else {
|
||||
const unsigned int m1 = (index / 26 - 1) / 26 - 1;
|
||||
const unsigned int m2 = (index / 26 - 1) % 26;
|
||||
const unsigned int m3 = index % 26;
|
||||
sprintf(gd->disk_name, "sd%c%c%c",
|
||||
'a' + m1, 'a' + m2, 'a' + m3);
|
||||
if (index < SD_MAX_DISKS) {
|
||||
gd->major = sd_major((index & 0xf0) >> 4);
|
||||
gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
|
||||
gd->minors = SD_MINORS;
|
||||
}
|
||||
|
||||
gd->fops = &sd_fops;
|
||||
gd->private_data = &sdkp->driver;
|
||||
gd->queue = sdkp->device->request_queue;
|
||||
|
||||
@ -1869,7 +1908,7 @@ static int sd_probe(struct device *dev)
|
||||
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
|
||||
|
||||
gd->driverfs_dev = &sdp->sdev_gendev;
|
||||
gd->flags = GENHD_FL_DRIVERFS;
|
||||
gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
|
||||
if (sdp->removable)
|
||||
gd->flags |= GENHD_FL_REMOVABLE;
|
||||
|
||||
|
@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
#include <linux/smp_lock.h>
|
||||
|
||||
@ -69,7 +68,6 @@ static void sg_proc_cleanup(void);
|
||||
#endif
|
||||
|
||||
#define SG_ALLOW_DIO_DEF 0
|
||||
#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
|
||||
|
||||
#define SG_MAX_DEVS 32768
|
||||
|
||||
@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
|
||||
unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
|
||||
unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
|
||||
unsigned bufflen; /* Size of (aggregate) data buffer */
|
||||
unsigned b_malloc_len; /* actual len malloc'ed in buffer */
|
||||
struct scatterlist *buffer;/* scatter list */
|
||||
struct page **pages;
|
||||
int page_order;
|
||||
char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
|
||||
unsigned char cmd_opcode; /* first byte of command */
|
||||
} Sg_scatter_hold;
|
||||
@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
|
||||
char orphan; /* 1 -> drop on sight, 0 -> normal */
|
||||
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
|
||||
volatile char done; /* 0->before bh, 1->before read, 2->read */
|
||||
struct request *rq;
|
||||
struct bio *bio;
|
||||
} Sg_request;
|
||||
|
||||
typedef struct sg_fd { /* holds the state of a file descriptor */
|
||||
@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
|
||||
|
||||
static int sg_fasync(int fd, struct file *filp, int mode);
|
||||
/* tasklet or soft irq callback */
|
||||
static void sg_cmd_done(void *data, char *sense, int result, int resid);
|
||||
static int sg_start_req(Sg_request * srp);
|
||||
static void sg_rq_end_io(struct request *rq, int uptodate);
|
||||
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
|
||||
static void sg_finish_rem_req(Sg_request * srp);
|
||||
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
|
||||
static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
|
||||
@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
|
||||
int read_only, Sg_request **o_srp);
|
||||
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
|
||||
unsigned char *cmnd, int timeout, int blocking);
|
||||
static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
|
||||
int wr_xf, int *countp, unsigned char __user **up);
|
||||
static int sg_write_xfer(Sg_request * srp);
|
||||
static int sg_read_xfer(Sg_request * srp);
|
||||
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
|
||||
static void sg_remove_scat(Sg_scatter_hold * schp);
|
||||
static void sg_build_reserve(Sg_fd * sfp, int req_size);
|
||||
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
|
||||
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
|
||||
static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
|
||||
static void sg_page_free(struct page *page, int size);
|
||||
static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
|
||||
static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
|
||||
static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
|
||||
@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
|
||||
static Sg_request *sg_add_request(Sg_fd * sfp);
|
||||
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
|
||||
static int sg_res_in_use(Sg_fd * sfp);
|
||||
static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
|
||||
static Sg_device *sg_get_dev(int dev);
|
||||
#ifdef CONFIG_SCSI_PROC_FS
|
||||
static int sg_last_dev(void);
|
||||
@ -529,8 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
|
||||
err = -EFAULT;
|
||||
goto err_out;
|
||||
}
|
||||
err = sg_read_xfer(srp);
|
||||
err_out:
|
||||
err_out:
|
||||
sg_finish_rem_req(srp);
|
||||
return (0 == err) ? count : err;
|
||||
}
|
||||
@ -612,7 +604,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
|
||||
else
|
||||
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
|
||||
hp->dxfer_len = mxsize;
|
||||
hp->dxferp = (char __user *)buf + cmd_size;
|
||||
if (hp->dxfer_direction == SG_DXFER_TO_DEV)
|
||||
hp->dxferp = (char __user *)buf + cmd_size;
|
||||
else
|
||||
hp->dxferp = NULL;
|
||||
hp->sbp = NULL;
|
||||
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
|
||||
hp->flags = input_size; /* structure abuse ... */
|
||||
@ -732,16 +727,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
|
||||
(int) cmnd[0], (int) hp->cmd_len));
|
||||
|
||||
if ((k = sg_start_req(srp))) {
|
||||
k = sg_start_req(srp, cmnd);
|
||||
if (k) {
|
||||
SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
|
||||
sg_finish_rem_req(srp);
|
||||
return k; /* probably out of space --> ENOMEM */
|
||||
}
|
||||
if ((k = sg_write_xfer(srp))) {
|
||||
SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
|
||||
sg_finish_rem_req(srp);
|
||||
return k;
|
||||
}
|
||||
if (sdp->detached) {
|
||||
sg_finish_rem_req(srp);
|
||||
return -ENODEV;
|
||||
@ -763,20 +754,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
|
||||
break;
|
||||
}
|
||||
hp->duration = jiffies_to_msecs(jiffies);
|
||||
/* Now send everything of to mid-level. The next time we hear about this
|
||||
packet is when sg_cmd_done() is called (i.e. a callback). */
|
||||
if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
|
||||
hp->dxfer_len, srp->data.k_use_sg, timeout,
|
||||
SG_DEFAULT_RETRIES, srp, sg_cmd_done,
|
||||
GFP_ATOMIC)) {
|
||||
SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
|
||||
/*
|
||||
* most likely out of mem, but could also be a bad map
|
||||
*/
|
||||
sg_finish_rem_req(srp);
|
||||
return -ENOMEM;
|
||||
} else
|
||||
return 0;
|
||||
|
||||
srp->rq->timeout = timeout;
|
||||
blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
|
||||
srp->rq, 1, sg_rq_end_io);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1192,8 +1174,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
Sg_fd *sfp;
|
||||
unsigned long offset, len, sa;
|
||||
Sg_scatter_hold *rsv_schp;
|
||||
struct scatterlist *sg;
|
||||
int k;
|
||||
int k, length;
|
||||
|
||||
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
|
||||
return VM_FAULT_SIGBUS;
|
||||
@ -1203,15 +1184,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
|
||||
offset, rsv_schp->k_use_sg));
|
||||
sg = rsv_schp->buffer;
|
||||
sa = vma->vm_start;
|
||||
for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
|
||||
++k, sg = sg_next(sg)) {
|
||||
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
|
||||
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
|
||||
len = vma->vm_end - sa;
|
||||
len = (len < sg->length) ? len : sg->length;
|
||||
len = (len < length) ? len : length;
|
||||
if (offset < len) {
|
||||
struct page *page;
|
||||
page = virt_to_page(page_address(sg_page(sg)) + offset);
|
||||
struct page *page = nth_page(rsv_schp->pages[k],
|
||||
offset >> PAGE_SHIFT);
|
||||
get_page(page); /* increment page count */
|
||||
vmf->page = page;
|
||||
return 0; /* success */
|
||||
@ -1233,8 +1213,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
Sg_fd *sfp;
|
||||
unsigned long req_sz, len, sa;
|
||||
Sg_scatter_hold *rsv_schp;
|
||||
int k;
|
||||
struct scatterlist *sg;
|
||||
int k, length;
|
||||
|
||||
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
|
||||
return -ENXIO;
|
||||
@ -1248,11 +1227,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return -ENOMEM; /* cannot map more than reserved buffer */
|
||||
|
||||
sa = vma->vm_start;
|
||||
sg = rsv_schp->buffer;
|
||||
for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
|
||||
++k, sg = sg_next(sg)) {
|
||||
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
|
||||
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
|
||||
len = vma->vm_end - sa;
|
||||
len = (len < sg->length) ? len : sg->length;
|
||||
len = (len < length) ? len : length;
|
||||
sa += len;
|
||||
}
|
||||
|
||||
@ -1263,16 +1241,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function is a "bottom half" handler that is called by the
|
||||
* mid level when a command is completed (or has failed). */
|
||||
static void
|
||||
sg_cmd_done(void *data, char *sense, int result, int resid)
|
||||
/*
|
||||
* This function is a "bottom half" handler that is called by the mid
|
||||
* level when a command is completed (or has failed).
|
||||
*/
|
||||
static void sg_rq_end_io(struct request *rq, int uptodate)
|
||||
{
|
||||
Sg_request *srp = data;
|
||||
struct sg_request *srp = rq->end_io_data;
|
||||
Sg_device *sdp = NULL;
|
||||
Sg_fd *sfp;
|
||||
unsigned long iflags;
|
||||
unsigned int ms;
|
||||
char *sense;
|
||||
int result, resid;
|
||||
|
||||
if (NULL == srp) {
|
||||
printk(KERN_ERR "sg_cmd_done: NULL request\n");
|
||||
@ -1286,6 +1267,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
|
||||
return;
|
||||
}
|
||||
|
||||
sense = rq->sense;
|
||||
result = rq->errors;
|
||||
resid = rq->data_len;
|
||||
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
|
||||
sdp->disk->disk_name, srp->header.pack_id, result));
|
||||
@ -1296,7 +1280,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
|
||||
if (0 != result) {
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
|
||||
srp->header.status = 0xff & result;
|
||||
srp->header.masked_status = status_byte(result);
|
||||
srp->header.msg_status = msg_byte(result);
|
||||
@ -1634,37 +1617,79 @@ exit_sg(void)
|
||||
idr_destroy(&sg_index_idr);
|
||||
}
|
||||
|
||||
static int
|
||||
sg_start_req(Sg_request * srp)
|
||||
static int sg_start_req(Sg_request *srp, unsigned char *cmd)
|
||||
{
|
||||
int res;
|
||||
struct request *rq;
|
||||
Sg_fd *sfp = srp->parentfp;
|
||||
sg_io_hdr_t *hp = &srp->header;
|
||||
int dxfer_len = (int) hp->dxfer_len;
|
||||
int dxfer_dir = hp->dxfer_direction;
|
||||
unsigned int iov_count = hp->iovec_count;
|
||||
Sg_scatter_hold *req_schp = &srp->data;
|
||||
Sg_scatter_hold *rsv_schp = &sfp->reserve;
|
||||
struct request_queue *q = sfp->parentdp->device->request_queue;
|
||||
struct rq_map_data *md, map_data;
|
||||
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
|
||||
|
||||
SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
|
||||
dxfer_len));
|
||||
|
||||
rq = blk_get_request(q, rw, GFP_ATOMIC);
|
||||
if (!rq)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(rq->cmd, cmd, hp->cmd_len);
|
||||
|
||||
rq->cmd_len = hp->cmd_len;
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
|
||||
srp->rq = rq;
|
||||
rq->end_io_data = srp;
|
||||
rq->sense = srp->sense_b;
|
||||
rq->retries = SG_DEFAULT_RETRIES;
|
||||
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
|
||||
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
|
||||
return 0;
|
||||
if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
|
||||
(dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
|
||||
(!sfp->parentdp->device->host->unchecked_isa_dma)) {
|
||||
res = sg_build_direct(srp, sfp, dxfer_len);
|
||||
if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
|
||||
return res;
|
||||
|
||||
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
|
||||
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
|
||||
!sfp->parentdp->device->host->unchecked_isa_dma &&
|
||||
blk_rq_aligned(q, hp->dxferp, dxfer_len))
|
||||
md = NULL;
|
||||
else
|
||||
md = &map_data;
|
||||
|
||||
if (md) {
|
||||
if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
|
||||
sg_link_reserve(sfp, srp, dxfer_len);
|
||||
else {
|
||||
res = sg_build_indirect(req_schp, sfp, dxfer_len);
|
||||
if (res)
|
||||
return res;
|
||||
}
|
||||
|
||||
md->pages = req_schp->pages;
|
||||
md->page_order = req_schp->page_order;
|
||||
md->nr_entries = req_schp->k_use_sg;
|
||||
}
|
||||
if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
|
||||
sg_link_reserve(sfp, srp, dxfer_len);
|
||||
else {
|
||||
res = sg_build_indirect(req_schp, sfp, dxfer_len);
|
||||
if (res) {
|
||||
sg_remove_scat(req_schp);
|
||||
return res;
|
||||
|
||||
if (iov_count)
|
||||
res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
|
||||
hp->dxfer_len, GFP_ATOMIC);
|
||||
else
|
||||
res = blk_rq_map_user(q, rq, md, hp->dxferp,
|
||||
hp->dxfer_len, GFP_ATOMIC);
|
||||
|
||||
if (!res) {
|
||||
srp->bio = rq->bio;
|
||||
|
||||
if (!md) {
|
||||
req_schp->dio_in_use = 1;
|
||||
hp->info |= SG_INFO_DIRECT_IO;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1678,186 +1703,37 @@ sg_finish_rem_req(Sg_request * srp)
|
||||
sg_unlink_reserve(sfp, srp);
|
||||
else
|
||||
sg_remove_scat(req_schp);
|
||||
|
||||
if (srp->rq) {
|
||||
if (srp->bio)
|
||||
blk_rq_unmap_user(srp->bio);
|
||||
|
||||
blk_put_request(srp->rq);
|
||||
}
|
||||
|
||||
sg_remove_request(sfp, srp);
|
||||
}
|
||||
|
||||
static int
|
||||
sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
|
||||
{
|
||||
int sg_bufflen = tablesize * sizeof(struct scatterlist);
|
||||
int sg_bufflen = tablesize * sizeof(struct page *);
|
||||
gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
|
||||
|
||||
/*
|
||||
* TODO: test without low_dma, we should not need it since
|
||||
* the block layer will bounce the buffer for us
|
||||
*
|
||||
* XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
|
||||
*/
|
||||
if (sfp->low_dma)
|
||||
gfp_flags |= GFP_DMA;
|
||||
schp->buffer = kzalloc(sg_bufflen, gfp_flags);
|
||||
if (!schp->buffer)
|
||||
schp->pages = kzalloc(sg_bufflen, gfp_flags);
|
||||
if (!schp->pages)
|
||||
return -ENOMEM;
|
||||
sg_init_table(schp->buffer, tablesize);
|
||||
schp->sglist_len = sg_bufflen;
|
||||
return tablesize; /* number of scat_gath elements allocated */
|
||||
}
|
||||
|
||||
#ifdef SG_ALLOW_DIO_CODE
|
||||
/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
|
||||
/* TODO: hopefully we can use the generic block layer code */
|
||||
|
||||
/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
|
||||
- mapping of all pages not successful
|
||||
(i.e., either completely successful or fails)
|
||||
*/
|
||||
static int
|
||||
st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
|
||||
unsigned long uaddr, size_t count, int rw)
|
||||
{
|
||||
unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
unsigned long start = uaddr >> PAGE_SHIFT;
|
||||
const int nr_pages = end - start;
|
||||
int res, i, j;
|
||||
struct page **pages;
|
||||
|
||||
/* User attempted Overflow! */
|
||||
if ((uaddr + count) < uaddr)
|
||||
return -EINVAL;
|
||||
|
||||
/* Too big */
|
||||
if (nr_pages > max_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Hmm? */
|
||||
if (count == 0)
|
||||
return 0;
|
||||
|
||||
if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Try to fault in all of the necessary pages */
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
/* rw==READ means read from drive, write into memory area */
|
||||
res = get_user_pages(
|
||||
current,
|
||||
current->mm,
|
||||
uaddr,
|
||||
nr_pages,
|
||||
rw == READ,
|
||||
0, /* don't force */
|
||||
pages,
|
||||
NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
/* Errors and no page mapped should return here */
|
||||
if (res < nr_pages)
|
||||
goto out_unmap;
|
||||
|
||||
for (i=0; i < nr_pages; i++) {
|
||||
/* FIXME: flush superflous for rw==READ,
|
||||
* probably wrong function for rw==WRITE
|
||||
*/
|
||||
flush_dcache_page(pages[i]);
|
||||
/* ?? Is locking needed? I don't think so */
|
||||
/* if (!trylock_page(pages[i]))
|
||||
goto out_unlock; */
|
||||
}
|
||||
|
||||
sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
|
||||
if (nr_pages > 1) {
|
||||
sgl[0].length = PAGE_SIZE - sgl[0].offset;
|
||||
count -= sgl[0].length;
|
||||
for (i=1; i < nr_pages ; i++)
|
||||
sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
|
||||
}
|
||||
else {
|
||||
sgl[0].length = count;
|
||||
}
|
||||
|
||||
kfree(pages);
|
||||
return nr_pages;
|
||||
|
||||
out_unmap:
|
||||
if (res > 0) {
|
||||
for (j=0; j < res; j++)
|
||||
page_cache_release(pages[j]);
|
||||
res = 0;
|
||||
}
|
||||
kfree(pages);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/* And unmap them... */
|
||||
static int
|
||||
st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
|
||||
int dirtied)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i < nr_pages; i++) {
|
||||
struct page *page = sg_page(&sgl[i]);
|
||||
|
||||
if (dirtied)
|
||||
SetPageDirty(page);
|
||||
/* unlock_page(page); */
|
||||
/* FIXME: cache flush missing for rw==READ
|
||||
* FIXME: call the correct reference counting function
|
||||
*/
|
||||
page_cache_release(page);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
|
||||
#endif
|
||||
|
||||
|
||||
/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
|
||||
static int
|
||||
sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
|
||||
{
|
||||
#ifdef SG_ALLOW_DIO_CODE
|
||||
sg_io_hdr_t *hp = &srp->header;
|
||||
Sg_scatter_hold *schp = &srp->data;
|
||||
int sg_tablesize = sfp->parentdp->sg_tablesize;
|
||||
int mx_sc_elems, res;
|
||||
struct scsi_device *sdev = sfp->parentdp->device;
|
||||
|
||||
if (((unsigned long)hp->dxferp &
|
||||
queue_dma_alignment(sdev->request_queue)) != 0)
|
||||
return 1;
|
||||
|
||||
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
|
||||
if (mx_sc_elems <= 0) {
|
||||
return 1;
|
||||
}
|
||||
res = st_map_user_pages(schp->buffer, mx_sc_elems,
|
||||
(unsigned long)hp->dxferp, dxfer_len,
|
||||
(SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
|
||||
if (res <= 0) {
|
||||
sg_remove_scat(schp);
|
||||
return 1;
|
||||
}
|
||||
schp->k_use_sg = res;
|
||||
schp->dio_in_use = 1;
|
||||
hp->info |= SG_INFO_DIRECT_IO;
|
||||
return 0;
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int
|
||||
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
|
||||
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
|
||||
int sg_tablesize = sfp->parentdp->sg_tablesize;
|
||||
int blk_size = buff_size;
|
||||
struct page *p = NULL;
|
||||
int blk_size = buff_size, order;
|
||||
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
|
||||
|
||||
if (blk_size < 0)
|
||||
return -EFAULT;
|
||||
@ -1881,15 +1757,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
||||
} else
|
||||
scatter_elem_sz_prev = num;
|
||||
}
|
||||
for (k = 0, sg = schp->buffer, rem_sz = blk_size;
|
||||
(rem_sz > 0) && (k < mx_sc_elems);
|
||||
++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
|
||||
|
||||
|
||||
if (sfp->low_dma)
|
||||
gfp_mask |= GFP_DMA;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
||||
gfp_mask |= __GFP_ZERO;
|
||||
|
||||
order = get_order(num);
|
||||
retry:
|
||||
ret_sz = 1 << (PAGE_SHIFT + order);
|
||||
|
||||
for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
|
||||
k++, rem_sz -= ret_sz) {
|
||||
|
||||
num = (rem_sz > scatter_elem_sz_prev) ?
|
||||
scatter_elem_sz_prev : rem_sz;
|
||||
p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
scatter_elem_sz_prev : rem_sz;
|
||||
|
||||
schp->pages[k] = alloc_pages(gfp_mask, order);
|
||||
if (!schp->pages[k])
|
||||
goto out;
|
||||
|
||||
if (num == scatter_elem_sz_prev) {
|
||||
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
|
||||
@ -1897,12 +1784,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
||||
scatter_elem_sz_prev = ret_sz;
|
||||
}
|
||||
}
|
||||
sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
|
||||
|
||||
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
|
||||
"ret_sz=%d\n", k, num, ret_sz));
|
||||
} /* end of for loop */
|
||||
|
||||
schp->page_order = order;
|
||||
schp->k_use_sg = k;
|
||||
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
|
||||
"rem_sz=%d\n", k, rem_sz));
|
||||
@ -1910,223 +1797,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
||||
schp->bufflen = blk_size;
|
||||
if (rem_sz > 0) /* must have failed */
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
out:
|
||||
for (i = 0; i < k; i++)
|
||||
__free_pages(schp->pages[k], order);
|
||||
|
||||
static int
|
||||
sg_write_xfer(Sg_request * srp)
|
||||
{
|
||||
sg_io_hdr_t *hp = &srp->header;
|
||||
Sg_scatter_hold *schp = &srp->data;
|
||||
struct scatterlist *sg = schp->buffer;
|
||||
int num_xfer = 0;
|
||||
int j, k, onum, usglen, ksglen, res;
|
||||
int iovec_count = (int) hp->iovec_count;
|
||||
int dxfer_dir = hp->dxfer_direction;
|
||||
unsigned char *p;
|
||||
unsigned char __user *up;
|
||||
int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
|
||||
if (--order >= 0)
|
||||
goto retry;
|
||||
|
||||
if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
|
||||
(SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
|
||||
num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
|
||||
if (schp->bufflen < num_xfer)
|
||||
num_xfer = schp->bufflen;
|
||||
}
|
||||
if ((num_xfer <= 0) || (schp->dio_in_use) ||
|
||||
(new_interface
|
||||
&& ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
|
||||
return 0;
|
||||
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
|
||||
num_xfer, iovec_count, schp->k_use_sg));
|
||||
if (iovec_count) {
|
||||
onum = iovec_count;
|
||||
if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
|
||||
return -EFAULT;
|
||||
} else
|
||||
onum = 1;
|
||||
|
||||
ksglen = sg->length;
|
||||
p = page_address(sg_page(sg));
|
||||
for (j = 0, k = 0; j < onum; ++j) {
|
||||
res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
for (; p; sg = sg_next(sg), ksglen = sg->length,
|
||||
p = page_address(sg_page(sg))) {
|
||||
if (usglen <= 0)
|
||||
break;
|
||||
if (ksglen > usglen) {
|
||||
if (usglen >= num_xfer) {
|
||||
if (__copy_from_user(p, up, num_xfer))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
if (__copy_from_user(p, up, usglen))
|
||||
return -EFAULT;
|
||||
p += usglen;
|
||||
ksglen -= usglen;
|
||||
break;
|
||||
} else {
|
||||
if (ksglen >= num_xfer) {
|
||||
if (__copy_from_user(p, up, num_xfer))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
if (__copy_from_user(p, up, ksglen))
|
||||
return -EFAULT;
|
||||
up += ksglen;
|
||||
usglen -= ksglen;
|
||||
}
|
||||
++k;
|
||||
if (k >= schp->k_use_sg)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
|
||||
int wr_xf, int *countp, unsigned char __user **up)
|
||||
{
|
||||
int num_xfer = (int) hp->dxfer_len;
|
||||
unsigned char __user *p = hp->dxferp;
|
||||
int count;
|
||||
|
||||
if (0 == sg_num) {
|
||||
if (wr_xf && ('\0' == hp->interface_id))
|
||||
count = (int) hp->flags; /* holds "old" input_size */
|
||||
else
|
||||
count = num_xfer;
|
||||
} else {
|
||||
sg_iovec_t iovec;
|
||||
if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
|
||||
return -EFAULT;
|
||||
p = iovec.iov_base;
|
||||
count = (int) iovec.iov_len;
|
||||
}
|
||||
if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
|
||||
return -EFAULT;
|
||||
if (up)
|
||||
*up = p;
|
||||
if (countp)
|
||||
*countp = count;
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void
|
||||
sg_remove_scat(Sg_scatter_hold * schp)
|
||||
{
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
|
||||
if (schp->buffer && (schp->sglist_len > 0)) {
|
||||
struct scatterlist *sg = schp->buffer;
|
||||
|
||||
if (schp->dio_in_use) {
|
||||
#ifdef SG_ALLOW_DIO_CODE
|
||||
st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
|
||||
#endif
|
||||
} else {
|
||||
if (schp->pages && schp->sglist_len > 0) {
|
||||
if (!schp->dio_in_use) {
|
||||
int k;
|
||||
|
||||
for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
|
||||
++k, sg = sg_next(sg)) {
|
||||
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
|
||||
SCSI_LOG_TIMEOUT(5, printk(
|
||||
"sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
|
||||
k, sg_page(sg), sg->length));
|
||||
sg_page_free(sg_page(sg), sg->length);
|
||||
"sg_remove_scat: k=%d, pg=0x%p\n",
|
||||
k, schp->pages[k]));
|
||||
__free_pages(schp->pages[k], schp->page_order);
|
||||
}
|
||||
|
||||
kfree(schp->pages);
|
||||
}
|
||||
kfree(schp->buffer);
|
||||
}
|
||||
memset(schp, 0, sizeof (*schp));
|
||||
}
|
||||
|
||||
static int
|
||||
sg_read_xfer(Sg_request * srp)
|
||||
{
|
||||
sg_io_hdr_t *hp = &srp->header;
|
||||
Sg_scatter_hold *schp = &srp->data;
|
||||
struct scatterlist *sg = schp->buffer;
|
||||
int num_xfer = 0;
|
||||
int j, k, onum, usglen, ksglen, res;
|
||||
int iovec_count = (int) hp->iovec_count;
|
||||
int dxfer_dir = hp->dxfer_direction;
|
||||
unsigned char *p;
|
||||
unsigned char __user *up;
|
||||
int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
|
||||
|
||||
if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
|
||||
|| (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
|
||||
num_xfer = hp->dxfer_len;
|
||||
if (schp->bufflen < num_xfer)
|
||||
num_xfer = schp->bufflen;
|
||||
}
|
||||
if ((num_xfer <= 0) || (schp->dio_in_use) ||
|
||||
(new_interface
|
||||
&& ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
|
||||
return 0;
|
||||
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
|
||||
num_xfer, iovec_count, schp->k_use_sg));
|
||||
if (iovec_count) {
|
||||
onum = iovec_count;
|
||||
if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
|
||||
return -EFAULT;
|
||||
} else
|
||||
onum = 1;
|
||||
|
||||
p = page_address(sg_page(sg));
|
||||
ksglen = sg->length;
|
||||
for (j = 0, k = 0; j < onum; ++j) {
|
||||
res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
for (; p; sg = sg_next(sg), ksglen = sg->length,
|
||||
p = page_address(sg_page(sg))) {
|
||||
if (usglen <= 0)
|
||||
break;
|
||||
if (ksglen > usglen) {
|
||||
if (usglen >= num_xfer) {
|
||||
if (__copy_to_user(up, p, num_xfer))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
if (__copy_to_user(up, p, usglen))
|
||||
return -EFAULT;
|
||||
p += usglen;
|
||||
ksglen -= usglen;
|
||||
break;
|
||||
} else {
|
||||
if (ksglen >= num_xfer) {
|
||||
if (__copy_to_user(up, p, num_xfer))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
if (__copy_to_user(up, p, ksglen))
|
||||
return -EFAULT;
|
||||
up += ksglen;
|
||||
usglen -= ksglen;
|
||||
}
|
||||
++k;
|
||||
if (k >= schp->k_use_sg)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
|
||||
{
|
||||
Sg_scatter_hold *schp = &srp->data;
|
||||
struct scatterlist *sg = schp->buffer;
|
||||
int k, num;
|
||||
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
|
||||
@ -2134,15 +1840,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
|
||||
if ((!outp) || (num_read_xfer <= 0))
|
||||
return 0;
|
||||
|
||||
for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
|
||||
num = sg->length;
|
||||
num = 1 << (PAGE_SHIFT + schp->page_order);
|
||||
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
|
||||
if (num > num_read_xfer) {
|
||||
if (__copy_to_user(outp, page_address(sg_page(sg)),
|
||||
if (__copy_to_user(outp, page_address(schp->pages[k]),
|
||||
num_read_xfer))
|
||||
return -EFAULT;
|
||||
break;
|
||||
} else {
|
||||
if (__copy_to_user(outp, page_address(sg_page(sg)),
|
||||
if (__copy_to_user(outp, page_address(schp->pages[k]),
|
||||
num))
|
||||
return -EFAULT;
|
||||
num_read_xfer -= num;
|
||||
@ -2177,24 +1883,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
|
||||
{
|
||||
Sg_scatter_hold *req_schp = &srp->data;
|
||||
Sg_scatter_hold *rsv_schp = &sfp->reserve;
|
||||
struct scatterlist *sg = rsv_schp->buffer;
|
||||
int k, num, rem;
|
||||
|
||||
srp->res_used = 1;
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
|
||||
rem = size;
|
||||
|
||||
for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
|
||||
num = sg->length;
|
||||
num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
|
||||
for (k = 0; k < rsv_schp->k_use_sg; k++) {
|
||||
if (rem <= num) {
|
||||
sfp->save_scat_len = num;
|
||||
sg->length = rem;
|
||||
req_schp->k_use_sg = k + 1;
|
||||
req_schp->sglist_len = rsv_schp->sglist_len;
|
||||
req_schp->buffer = rsv_schp->buffer;
|
||||
req_schp->pages = rsv_schp->pages;
|
||||
|
||||
req_schp->bufflen = size;
|
||||
req_schp->b_malloc_len = rsv_schp->b_malloc_len;
|
||||
req_schp->page_order = rsv_schp->page_order;
|
||||
break;
|
||||
} else
|
||||
rem -= num;
|
||||
@ -2208,22 +1911,13 @@ static void
|
||||
sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
|
||||
{
|
||||
Sg_scatter_hold *req_schp = &srp->data;
|
||||
Sg_scatter_hold *rsv_schp = &sfp->reserve;
|
||||
|
||||
SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
|
||||
(int) req_schp->k_use_sg));
|
||||
if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
|
||||
struct scatterlist *sg = rsv_schp->buffer;
|
||||
|
||||
if (sfp->save_scat_len > 0)
|
||||
(sg + (req_schp->k_use_sg - 1))->length =
|
||||
(unsigned) sfp->save_scat_len;
|
||||
else
|
||||
SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
|
||||
}
|
||||
req_schp->k_use_sg = 0;
|
||||
req_schp->bufflen = 0;
|
||||
req_schp->buffer = NULL;
|
||||
req_schp->pages = NULL;
|
||||
req_schp->page_order = 0;
|
||||
req_schp->sglist_len = 0;
|
||||
sfp->save_scat_len = 0;
|
||||
srp->res_used = 0;
|
||||
@ -2481,53 +2175,6 @@ sg_res_in_use(Sg_fd * sfp)
|
||||
return srp ? 1 : 0;
|
||||
}
|
||||
|
||||
/* The size fetched (value output via retSzp) set when non-NULL return */
|
||||
static struct page *
|
||||
sg_page_malloc(int rqSz, int lowDma, int *retSzp)
|
||||
{
|
||||
struct page *resp = NULL;
|
||||
gfp_t page_mask;
|
||||
int order, a_size;
|
||||
int resSz;
|
||||
|
||||
if ((rqSz <= 0) || (NULL == retSzp))
|
||||
return resp;
|
||||
|
||||
if (lowDma)
|
||||
page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
|
||||
else
|
||||
page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
|
||||
|
||||
for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
|
||||
order++, a_size <<= 1) ;
|
||||
resSz = a_size; /* rounded up if necessary */
|
||||
resp = alloc_pages(page_mask, order);
|
||||
while ((!resp) && order) {
|
||||
--order;
|
||||
a_size >>= 1; /* divide by 2, until PAGE_SIZE */
|
||||
resp = alloc_pages(page_mask, order); /* try half */
|
||||
resSz = a_size;
|
||||
}
|
||||
if (resp) {
|
||||
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
||||
memset(page_address(resp), 0, resSz);
|
||||
*retSzp = resSz;
|
||||
}
|
||||
return resp;
|
||||
}
|
||||
|
||||
static void
|
||||
sg_page_free(struct page *page, int size)
|
||||
{
|
||||
int order, a_size;
|
||||
|
||||
if (!page)
|
||||
return;
|
||||
for (order = 0, a_size = PAGE_SIZE; a_size < size;
|
||||
order++, a_size <<= 1) ;
|
||||
__free_pages(page, order);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCSI_PROC_FS
|
||||
static int
|
||||
sg_idr_max_id(int id, void *p, void *data)
|
||||
|
@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
|
||||
|
||||
static int sr_prep_fn(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
int block=0, this_count, s_size, timeout = SR_TIMEOUT;
|
||||
int block = 0, this_count, s_size;
|
||||
struct scsi_cd *cd;
|
||||
struct scsi_cmnd *SCpnt;
|
||||
struct scsi_device *sdp = q->queuedata;
|
||||
@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
|
||||
SCpnt->transfersize = cd->device->sector_size;
|
||||
SCpnt->underflow = this_count << 9;
|
||||
SCpnt->allowed = MAX_RETRIES;
|
||||
SCpnt->timeout_per_command = timeout;
|
||||
|
||||
/*
|
||||
* This indicates that the command is ready from our end to be
|
||||
@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
|
||||
disk->fops = &sr_bdops;
|
||||
disk->flags = GENHD_FL_CD;
|
||||
|
||||
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
|
||||
|
||||
cd->device = sdev;
|
||||
cd->disk = disk;
|
||||
cd->driver = &sr_template;
|
||||
@ -878,7 +879,7 @@ static void sr_kref_release(struct kref *kref)
|
||||
struct gendisk *disk = cd->disk;
|
||||
|
||||
spin_lock(&sr_index_lock);
|
||||
clear_bit(disk->first_minor, sr_index_bits);
|
||||
clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
|
||||
spin_unlock(&sr_index_lock);
|
||||
|
||||
unregister_cdrom(&cd->cdi);
|
||||
|
@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
|
||||
* Shorten our settle_time if needed for
|
||||
* this command not to time out.
|
||||
*/
|
||||
if (np->s.settle_time_valid && cmd->timeout_per_command) {
|
||||
unsigned long tlimit = jiffies + cmd->timeout_per_command;
|
||||
if (np->s.settle_time_valid && cmd->request->timeout) {
|
||||
unsigned long tlimit = jiffies + cmd->request->timeout;
|
||||
tlimit -= SYM_CONF_TIMER_INTERVAL*2;
|
||||
if (time_after(np->s.settle_time, tlimit)) {
|
||||
np->s.settle_time = tlimit;
|
||||
|
@ -107,7 +107,8 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
|
||||
BUG_ON(bip == NULL);
|
||||
|
||||
/* A cloned bio doesn't own the integrity metadata */
|
||||
if (!bio_flagged(bio, BIO_CLONED) && bip->bip_buf != NULL)
|
||||
if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY)
|
||||
&& bip->bip_buf != NULL)
|
||||
kfree(bip->bip_buf);
|
||||
|
||||
mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]);
|
||||
@ -150,6 +151,24 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
|
||||
}
|
||||
EXPORT_SYMBOL(bio_integrity_add_page);
|
||||
|
||||
static int bdev_integrity_enabled(struct block_device *bdev, int rw)
|
||||
{
|
||||
struct blk_integrity *bi = bdev_get_integrity(bdev);
|
||||
|
||||
if (bi == NULL)
|
||||
return 0;
|
||||
|
||||
if (rw == READ && bi->verify_fn != NULL &&
|
||||
(bi->flags & INTEGRITY_FLAG_READ))
|
||||
return 1;
|
||||
|
||||
if (rw == WRITE && bi->generate_fn != NULL &&
|
||||
(bi->flags & INTEGRITY_FLAG_WRITE))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_enabled - Check whether integrity can be passed
|
||||
* @bio: bio to check
|
||||
@ -313,6 +332,14 @@ static void bio_integrity_generate(struct bio *bio)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
|
||||
{
|
||||
if (bi)
|
||||
return bi->tuple_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_prep - Prepare bio for integrity I/O
|
||||
* @bio: bio to prepare
|
||||
|
297
fs/bio.c
297
fs/bio.c
@ -30,7 +30,7 @@
|
||||
|
||||
static struct kmem_cache *bio_slab __read_mostly;
|
||||
|
||||
mempool_t *bio_split_pool __read_mostly;
|
||||
static mempool_t *bio_split_pool __read_mostly;
|
||||
|
||||
/*
|
||||
* if you change this list, also change bvec_alloc or things will
|
||||
@ -60,25 +60,46 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct
|
||||
struct bio_vec *bvl;
|
||||
|
||||
/*
|
||||
* see comment near bvec_array define!
|
||||
* If 'bs' is given, lookup the pool and do the mempool alloc.
|
||||
* If not, this is a bio_kmalloc() allocation and just do a
|
||||
* kzalloc() for the exact number of vecs right away.
|
||||
*/
|
||||
switch (nr) {
|
||||
case 1 : *idx = 0; break;
|
||||
case 2 ... 4: *idx = 1; break;
|
||||
case 5 ... 16: *idx = 2; break;
|
||||
case 17 ... 64: *idx = 3; break;
|
||||
case 65 ... 128: *idx = 4; break;
|
||||
case 129 ... BIO_MAX_PAGES: *idx = 5; break;
|
||||
if (bs) {
|
||||
/*
|
||||
* see comment near bvec_array define!
|
||||
*/
|
||||
switch (nr) {
|
||||
case 1:
|
||||
*idx = 0;
|
||||
break;
|
||||
case 2 ... 4:
|
||||
*idx = 1;
|
||||
break;
|
||||
case 5 ... 16:
|
||||
*idx = 2;
|
||||
break;
|
||||
case 17 ... 64:
|
||||
*idx = 3;
|
||||
break;
|
||||
case 65 ... 128:
|
||||
*idx = 4;
|
||||
break;
|
||||
case 129 ... BIO_MAX_PAGES:
|
||||
*idx = 5;
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* idx now points to the pool we want to allocate from
|
||||
*/
|
||||
}
|
||||
|
||||
bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
|
||||
if (bvl)
|
||||
memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
|
||||
/*
|
||||
* idx now points to the pool we want to allocate from
|
||||
*/
|
||||
bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
|
||||
if (bvl)
|
||||
memset(bvl, 0,
|
||||
bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
|
||||
} else
|
||||
bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
|
||||
|
||||
return bvl;
|
||||
}
|
||||
@ -107,10 +128,17 @@ static void bio_fs_destructor(struct bio *bio)
|
||||
bio_free(bio, fs_bio_set);
|
||||
}
|
||||
|
||||
static void bio_kmalloc_destructor(struct bio *bio)
|
||||
{
|
||||
kfree(bio->bi_io_vec);
|
||||
kfree(bio);
|
||||
}
|
||||
|
||||
void bio_init(struct bio *bio)
|
||||
{
|
||||
memset(bio, 0, sizeof(*bio));
|
||||
bio->bi_flags = 1 << BIO_UPTODATE;
|
||||
bio->bi_comp_cpu = -1;
|
||||
atomic_set(&bio->bi_cnt, 1);
|
||||
}
|
||||
|
||||
@ -118,19 +146,25 @@ void bio_init(struct bio *bio)
|
||||
* bio_alloc_bioset - allocate a bio for I/O
|
||||
* @gfp_mask: the GFP_ mask given to the slab allocator
|
||||
* @nr_iovecs: number of iovecs to pre-allocate
|
||||
* @bs: the bio_set to allocate from
|
||||
* @bs: the bio_set to allocate from. If %NULL, just use kmalloc
|
||||
*
|
||||
* Description:
|
||||
* bio_alloc_bioset will first try it's on mempool to satisfy the allocation.
|
||||
* bio_alloc_bioset will first try its own mempool to satisfy the allocation.
|
||||
* If %__GFP_WAIT is set then we will block on the internal pool waiting
|
||||
* for a &struct bio to become free.
|
||||
* for a &struct bio to become free. If a %NULL @bs is passed in, we will
|
||||
* fall back to just using @kmalloc to allocate the required memory.
|
||||
*
|
||||
* allocate bio and iovecs from the memory pools specified by the
|
||||
* bio_set structure.
|
||||
* bio_set structure, or @kmalloc if none given.
|
||||
**/
|
||||
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
{
|
||||
struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
struct bio *bio;
|
||||
|
||||
if (bs)
|
||||
bio = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
else
|
||||
bio = kmalloc(sizeof(*bio), gfp_mask);
|
||||
|
||||
if (likely(bio)) {
|
||||
struct bio_vec *bvl = NULL;
|
||||
@ -141,7 +175,10 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
|
||||
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
|
||||
if (unlikely(!bvl)) {
|
||||
mempool_free(bio, bs->bio_pool);
|
||||
if (bs)
|
||||
mempool_free(bio, bs->bio_pool);
|
||||
else
|
||||
kfree(bio);
|
||||
bio = NULL;
|
||||
goto out;
|
||||
}
|
||||
@ -164,6 +201,23 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
|
||||
return bio;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like bio_alloc(), but doesn't use a mempool backing. This means that
|
||||
* it CAN fail, but while bio_alloc() can only be used for allocations
|
||||
* that have a short (finite) life span, bio_kmalloc() should be used
|
||||
* for more permanent bio allocations (like allocating some bio's for
|
||||
* initalization or setup purposes).
|
||||
*/
|
||||
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
|
||||
{
|
||||
struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
|
||||
|
||||
if (bio)
|
||||
bio->bi_destructor = bio_kmalloc_destructor;
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
||||
void zero_fill_bio(struct bio *bio)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -208,14 +262,6 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
|
||||
return bio->bi_phys_segments;
|
||||
}
|
||||
|
||||
inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
|
||||
blk_recount_segments(q, bio);
|
||||
|
||||
return bio->bi_hw_segments;
|
||||
}
|
||||
|
||||
/**
|
||||
* __bio_clone - clone a bio
|
||||
* @bio: destination bio
|
||||
@ -350,8 +396,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
||||
*/
|
||||
|
||||
while (bio->bi_phys_segments >= q->max_phys_segments
|
||||
|| bio->bi_hw_segments >= q->max_hw_segments
|
||||
|| BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
|
||||
|| bio->bi_phys_segments >= q->max_hw_segments) {
|
||||
|
||||
if (retried_segments)
|
||||
return 0;
|
||||
@ -395,13 +440,11 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
||||
}
|
||||
|
||||
/* If we may be able to merge these biovecs, force a recount */
|
||||
if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) ||
|
||||
BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
|
||||
if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
|
||||
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
|
||||
|
||||
bio->bi_vcnt++;
|
||||
bio->bi_phys_segments++;
|
||||
bio->bi_hw_segments++;
|
||||
done:
|
||||
bio->bi_size += len;
|
||||
return len;
|
||||
@ -449,16 +492,19 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
|
||||
|
||||
struct bio_map_data {
|
||||
struct bio_vec *iovecs;
|
||||
int nr_sgvecs;
|
||||
struct sg_iovec *sgvecs;
|
||||
int nr_sgvecs;
|
||||
int is_our_pages;
|
||||
};
|
||||
|
||||
static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
|
||||
struct sg_iovec *iov, int iov_count)
|
||||
struct sg_iovec *iov, int iov_count,
|
||||
int is_our_pages)
|
||||
{
|
||||
memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
|
||||
memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
|
||||
bmd->nr_sgvecs = iov_count;
|
||||
bmd->is_our_pages = is_our_pages;
|
||||
bio->bi_private = bmd;
|
||||
}
|
||||
|
||||
@ -493,7 +539,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
|
||||
}
|
||||
|
||||
static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
|
||||
struct sg_iovec *iov, int iov_count, int uncopy)
|
||||
struct sg_iovec *iov, int iov_count, int uncopy,
|
||||
int do_free_page)
|
||||
{
|
||||
int ret = 0, i;
|
||||
struct bio_vec *bvec;
|
||||
@ -536,7 +583,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
|
||||
}
|
||||
}
|
||||
|
||||
if (uncopy)
|
||||
if (do_free_page)
|
||||
__free_page(bvec->bv_page);
|
||||
}
|
||||
|
||||
@ -553,10 +600,11 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
|
||||
int bio_uncopy_user(struct bio *bio)
|
||||
{
|
||||
struct bio_map_data *bmd = bio->bi_private;
|
||||
int ret;
|
||||
|
||||
ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1);
|
||||
int ret = 0;
|
||||
|
||||
if (!bio_flagged(bio, BIO_NULL_MAPPED))
|
||||
ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
|
||||
bmd->nr_sgvecs, 1, bmd->is_our_pages);
|
||||
bio_free_map_data(bmd);
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
@ -565,16 +613,20 @@ int bio_uncopy_user(struct bio *bio)
|
||||
/**
|
||||
* bio_copy_user_iov - copy user data to bio
|
||||
* @q: destination block queue
|
||||
* @map_data: pointer to the rq_map_data holding pages (if necessary)
|
||||
* @iov: the iovec.
|
||||
* @iov_count: number of elements in the iovec
|
||||
* @write_to_vm: bool indicating writing to pages or not
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* Prepares and returns a bio for indirect user io, bouncing data
|
||||
* to/from kernel pages as necessary. Must be paired with
|
||||
* call bio_uncopy_user() on io completion.
|
||||
*/
|
||||
struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
|
||||
int iov_count, int write_to_vm)
|
||||
struct bio *bio_copy_user_iov(struct request_queue *q,
|
||||
struct rq_map_data *map_data,
|
||||
struct sg_iovec *iov, int iov_count,
|
||||
int write_to_vm, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio_map_data *bmd;
|
||||
struct bio_vec *bvec;
|
||||
@ -597,25 +649,38 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
|
||||
len += iov[i].iov_len;
|
||||
}
|
||||
|
||||
bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL);
|
||||
bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
|
||||
if (!bmd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = -ENOMEM;
|
||||
bio = bio_alloc(GFP_KERNEL, nr_pages);
|
||||
bio = bio_alloc(gfp_mask, nr_pages);
|
||||
if (!bio)
|
||||
goto out_bmd;
|
||||
|
||||
bio->bi_rw |= (!write_to_vm << BIO_RW);
|
||||
|
||||
ret = 0;
|
||||
i = 0;
|
||||
while (len) {
|
||||
unsigned int bytes = PAGE_SIZE;
|
||||
unsigned int bytes;
|
||||
|
||||
if (map_data)
|
||||
bytes = 1U << (PAGE_SHIFT + map_data->page_order);
|
||||
else
|
||||
bytes = PAGE_SIZE;
|
||||
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
|
||||
page = alloc_page(q->bounce_gfp | GFP_KERNEL);
|
||||
if (map_data) {
|
||||
if (i == map_data->nr_entries) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
page = map_data->pages[i++];
|
||||
} else
|
||||
page = alloc_page(q->bounce_gfp | gfp_mask);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
@ -634,16 +699,17 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
|
||||
* success
|
||||
*/
|
||||
if (!write_to_vm) {
|
||||
ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0);
|
||||
ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
bio_set_map_data(bmd, bio, iov, iov_count);
|
||||
bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
|
||||
return bio;
|
||||
cleanup:
|
||||
bio_for_each_segment(bvec, bio, i)
|
||||
__free_page(bvec->bv_page);
|
||||
if (!map_data)
|
||||
bio_for_each_segment(bvec, bio, i)
|
||||
__free_page(bvec->bv_page);
|
||||
|
||||
bio_put(bio);
|
||||
out_bmd:
|
||||
@ -654,29 +720,32 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
|
||||
/**
|
||||
* bio_copy_user - copy user data to bio
|
||||
* @q: destination block queue
|
||||
* @map_data: pointer to the rq_map_data holding pages (if necessary)
|
||||
* @uaddr: start of user address
|
||||
* @len: length in bytes
|
||||
* @write_to_vm: bool indicating writing to pages or not
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* Prepares and returns a bio for indirect user io, bouncing data
|
||||
* to/from kernel pages as necessary. Must be paired with
|
||||
* call bio_uncopy_user() on io completion.
|
||||
*/
|
||||
struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
|
||||
unsigned int len, int write_to_vm)
|
||||
struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
|
||||
unsigned long uaddr, unsigned int len,
|
||||
int write_to_vm, gfp_t gfp_mask)
|
||||
{
|
||||
struct sg_iovec iov;
|
||||
|
||||
iov.iov_base = (void __user *)uaddr;
|
||||
iov.iov_len = len;
|
||||
|
||||
return bio_copy_user_iov(q, &iov, 1, write_to_vm);
|
||||
return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
|
||||
}
|
||||
|
||||
static struct bio *__bio_map_user_iov(struct request_queue *q,
|
||||
struct block_device *bdev,
|
||||
struct sg_iovec *iov, int iov_count,
|
||||
int write_to_vm)
|
||||
int write_to_vm, gfp_t gfp_mask)
|
||||
{
|
||||
int i, j;
|
||||
int nr_pages = 0;
|
||||
@ -702,12 +771,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
|
||||
if (!nr_pages)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
bio = bio_alloc(GFP_KERNEL, nr_pages);
|
||||
bio = bio_alloc(gfp_mask, nr_pages);
|
||||
if (!bio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = -ENOMEM;
|
||||
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
|
||||
pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
|
||||
if (!pages)
|
||||
goto out;
|
||||
|
||||
@ -786,19 +855,21 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
|
||||
* @uaddr: start of user address
|
||||
* @len: length in bytes
|
||||
* @write_to_vm: bool indicating writing to pages or not
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* Map the user space address into a bio suitable for io to a block
|
||||
* device. Returns an error pointer in case of error.
|
||||
*/
|
||||
struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
|
||||
unsigned long uaddr, unsigned int len, int write_to_vm)
|
||||
unsigned long uaddr, unsigned int len, int write_to_vm,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct sg_iovec iov;
|
||||
|
||||
iov.iov_base = (void __user *)uaddr;
|
||||
iov.iov_len = len;
|
||||
|
||||
return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);
|
||||
return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -808,18 +879,19 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
|
||||
* @iov: the iovec.
|
||||
* @iov_count: number of elements in the iovec
|
||||
* @write_to_vm: bool indicating writing to pages or not
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* Map the user space address into a bio suitable for io to a block
|
||||
* device. Returns an error pointer in case of error.
|
||||
*/
|
||||
struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
|
||||
struct sg_iovec *iov, int iov_count,
|
||||
int write_to_vm)
|
||||
int write_to_vm, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
|
||||
|
||||
bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
|
||||
gfp_mask);
|
||||
if (IS_ERR(bio))
|
||||
return bio;
|
||||
|
||||
@ -976,48 +1048,13 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
|
||||
struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
||||
gfp_t gfp_mask, int reading)
|
||||
{
|
||||
unsigned long kaddr = (unsigned long)data;
|
||||
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
unsigned long start = kaddr >> PAGE_SHIFT;
|
||||
const int nr_pages = end - start;
|
||||
struct bio *bio;
|
||||
struct bio_vec *bvec;
|
||||
struct bio_map_data *bmd;
|
||||
int i, ret;
|
||||
struct sg_iovec iov;
|
||||
int i;
|
||||
|
||||
iov.iov_base = data;
|
||||
iov.iov_len = len;
|
||||
|
||||
bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask);
|
||||
if (!bmd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = -ENOMEM;
|
||||
bio = bio_alloc(gfp_mask, nr_pages);
|
||||
if (!bio)
|
||||
goto out_bmd;
|
||||
|
||||
while (len) {
|
||||
struct page *page;
|
||||
unsigned int bytes = PAGE_SIZE;
|
||||
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
|
||||
page = alloc_page(q->bounce_gfp | gfp_mask);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
len -= bytes;
|
||||
}
|
||||
bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
|
||||
if (IS_ERR(bio))
|
||||
return bio;
|
||||
|
||||
if (!reading) {
|
||||
void *p = data;
|
||||
@ -1030,20 +1067,9 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
||||
}
|
||||
}
|
||||
|
||||
bio->bi_private = bmd;
|
||||
bio->bi_end_io = bio_copy_kern_endio;
|
||||
|
||||
bio_set_map_data(bmd, bio, &iov, 1);
|
||||
return bio;
|
||||
cleanup:
|
||||
bio_for_each_segment(bvec, bio, i)
|
||||
__free_page(bvec->bv_page);
|
||||
|
||||
bio_put(bio);
|
||||
out_bmd:
|
||||
bio_free_map_data(bmd);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1230,9 +1256,9 @@ static void bio_pair_end_2(struct bio *bi, int err)
|
||||
* split a bio - only worry about a bio with a single page
|
||||
* in it's iovec
|
||||
*/
|
||||
struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
|
||||
struct bio_pair *bio_split(struct bio *bi, int first_sectors)
|
||||
{
|
||||
struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO);
|
||||
struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
|
||||
|
||||
if (!bp)
|
||||
return bp;
|
||||
@ -1266,7 +1292,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
|
||||
bp->bio2.bi_end_io = bio_pair_end_2;
|
||||
|
||||
bp->bio1.bi_private = bi;
|
||||
bp->bio2.bi_private = pool;
|
||||
bp->bio2.bi_private = bio_split_pool;
|
||||
|
||||
if (bio_integrity(bi))
|
||||
bio_integrity_split(bi, bp, first_sectors);
|
||||
@ -1274,6 +1300,42 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
|
||||
return bp;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_sector_offset - Find hardware sector offset in bio
|
||||
* @bio: bio to inspect
|
||||
* @index: bio_vec index
|
||||
* @offset: offset in bv_page
|
||||
*
|
||||
* Return the number of hardware sectors between beginning of bio
|
||||
* and an end point indicated by a bio_vec index and an offset
|
||||
* within that vector's page.
|
||||
*/
|
||||
sector_t bio_sector_offset(struct bio *bio, unsigned short index,
|
||||
unsigned int offset)
|
||||
{
|
||||
unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue);
|
||||
struct bio_vec *bv;
|
||||
sector_t sectors;
|
||||
int i;
|
||||
|
||||
sectors = 0;
|
||||
|
||||
if (index >= bio->bi_idx)
|
||||
index = bio->bi_vcnt - 1;
|
||||
|
||||
__bio_for_each_segment(bv, bio, i, 0) {
|
||||
if (i == index) {
|
||||
if (offset > bv->bv_offset)
|
||||
sectors += (offset - bv->bv_offset) / sector_sz;
|
||||
break;
|
||||
}
|
||||
|
||||
sectors += bv->bv_len / sector_sz;
|
||||
}
|
||||
|
||||
return sectors;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_sector_offset);
|
||||
|
||||
/*
|
||||
* create memory pools for biovec's in a bio_set.
|
||||
@ -1376,6 +1438,7 @@ static int __init init_bio(void)
|
||||
subsys_initcall(init_bio);
|
||||
|
||||
EXPORT_SYMBOL(bio_alloc);
|
||||
EXPORT_SYMBOL(bio_kmalloc);
|
||||
EXPORT_SYMBOL(bio_put);
|
||||
EXPORT_SYMBOL(bio_free);
|
||||
EXPORT_SYMBOL(bio_endio);
|
||||
@ -1383,7 +1446,6 @@ EXPORT_SYMBOL(bio_init);
|
||||
EXPORT_SYMBOL(__bio_clone);
|
||||
EXPORT_SYMBOL(bio_clone);
|
||||
EXPORT_SYMBOL(bio_phys_segments);
|
||||
EXPORT_SYMBOL(bio_hw_segments);
|
||||
EXPORT_SYMBOL(bio_add_page);
|
||||
EXPORT_SYMBOL(bio_add_pc_page);
|
||||
EXPORT_SYMBOL(bio_get_nr_vecs);
|
||||
@ -1393,7 +1455,6 @@ EXPORT_SYMBOL(bio_map_kern);
|
||||
EXPORT_SYMBOL(bio_copy_kern);
|
||||
EXPORT_SYMBOL(bio_pair_release);
|
||||
EXPORT_SYMBOL(bio_split);
|
||||
EXPORT_SYMBOL(bio_split_pool);
|
||||
EXPORT_SYMBOL(bio_copy_user);
|
||||
EXPORT_SYMBOL(bio_uncopy_user);
|
||||
EXPORT_SYMBOL(bioset_create);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user