mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 00:40:56 +07:00
ide: ide_hwgroup_t.rq doesn't need an ide_lock held
While at it: - no need to check for hwgroup presence in ide_dump_opcode() Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
This commit is contained in:
parent
44e3123108
commit
1d0bf587df
@ -317,7 +317,8 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
|
||||
static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
|
||||
{
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
struct request *rq = hwif->hwgroup->rq;
|
||||
ide_hwgroup_t *hwgroup = hwif->hwgroup;
|
||||
struct request *rq = hwgroup->rq;
|
||||
int stat, err, sense_key;
|
||||
|
||||
/* check for errors */
|
||||
@ -508,9 +509,10 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
||||
hwgroup->rq = NULL;
|
||||
|
||||
cdrom_queue_request_sense(drive, rq->sense, rq);
|
||||
} else
|
||||
cdrom_end_request(drive, 0);
|
||||
@ -950,7 +952,8 @@ static int cdrom_newpc_intr_dummy_cb(struct request *rq)
|
||||
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
{
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
struct request *rq = HWGROUP(drive)->rq;
|
||||
ide_hwgroup_t *hwgroup = hwif->hwgroup;
|
||||
struct request *rq = hwgroup->rq;
|
||||
xfer_func_t *xferfunc;
|
||||
ide_expiry_t *expiry = NULL;
|
||||
int dma_error = 0, dma, stat, thislen, uptodate = 0;
|
||||
@ -1157,8 +1160,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
if (__blk_end_request(rq, 0, dlen))
|
||||
BUG();
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
||||
hwgroup->rq = NULL;
|
||||
} else {
|
||||
if (!uptodate)
|
||||
rq->cmd_flags |= REQ_FAILED;
|
||||
|
@ -107,17 +107,10 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
|
||||
int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
|
||||
{
|
||||
unsigned int nr_bytes = nr_sectors << 9;
|
||||
struct request *rq;
|
||||
struct request *rq = drive->hwif->hwgroup->rq;
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
/*
|
||||
* room for locking improvements here, the calls below don't
|
||||
* need the queue lock held at all
|
||||
*/
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
rq = HWGROUP(drive)->rq;
|
||||
|
||||
if (!nr_bytes) {
|
||||
if (blk_pc_request(rq))
|
||||
nr_bytes = rq->data_len;
|
||||
@ -125,9 +118,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
|
||||
nr_bytes = rq->hard_cur_sectors << 9;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
|
||||
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ide_end_request);
|
||||
@ -245,8 +239,9 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
BUG_ON(!blk_rq_started(rq));
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
||||
@ -278,7 +273,11 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
||||
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
|
||||
blk_start_queue(drive->queue);
|
||||
}
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
||||
drive->hwif->hwgroup->rq = NULL;
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
if (__blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
@ -300,12 +299,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
||||
|
||||
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
|
||||
{
|
||||
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
|
||||
struct request *rq = hwgroup->rq;
|
||||
unsigned long flags;
|
||||
struct request *rq;
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
rq = HWGROUP(drive)->rq;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
||||
ide_task_t *task = (ide_task_t *)rq->special;
|
||||
@ -333,15 +329,16 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
hwgroup->rq = NULL;
|
||||
|
||||
rq->errors = err;
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0),
|
||||
blk_rq_bytes(rq))))
|
||||
BUG();
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ide_end_drive_cmd);
|
||||
|
||||
static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
||||
@ -1489,11 +1486,12 @@ irqreturn_t ide_intr (int irq, void *dev_id)
|
||||
|
||||
void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
|
||||
unsigned long flags;
|
||||
ide_hwgroup_t *hwgroup = HWGROUP(drive);
|
||||
|
||||
hwgroup->rq = NULL;
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
hwgroup->rq = NULL;
|
||||
__elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
|
||||
blk_start_queueing(drive->queue);
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
@ -277,14 +277,9 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
|
||||
|
||||
static void ide_dump_opcode(ide_drive_t *drive)
|
||||
{
|
||||
struct request *rq;
|
||||
struct request *rq = drive->hwif->hwgroup->rq;
|
||||
ide_task_t *task = NULL;
|
||||
|
||||
spin_lock(&ide_lock);
|
||||
rq = NULL;
|
||||
if (HWGROUP(drive))
|
||||
rq = HWGROUP(drive)->rq;
|
||||
spin_unlock(&ide_lock);
|
||||
if (!rq)
|
||||
return;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user