mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 12:49:08 +07:00
dac37e15b7
When SCSI EH invokes zFCP's callbacks for eh_device_reset_handler() and eh_target_reset_handler(), it expects us to relent the ownership over the given scsi_cmnd and all other scsi_cmnds within the same scope - LUN or target - when returning with SUCCESS from the callback ('release' them). SCSI EH can then reuse those commands. We did not follow this rule to release commands upon SUCCESS; and if later a reply arrived for one of those supposed to be released commands, we would still make use of the scsi_cmnd in our ingress tasklet. This will at least result in undefined behavior or a kernel panic because of a wrong kernel pointer dereference. To fix this, we NULLify all pointers to scsi_cmnds (struct zfcp_fsf_req *)->data in the matching scope if a TMF was successful. This is done under the locks (struct zfcp_adapter *)->abort_lock and (struct zfcp_reqlist *)->lock to prevent the requests from being removed from the request-hashtable, and the ingress tasklet from making use of the scsi_cmnd-pointer in zfcp_fsf_fcp_cmnd_handler(). For cases where a reply arrives during SCSI EH, but before we get a chance to NULLify the pointer - but before we return from the callback -, we assume that the code is protected from races via the CAS operation in blk_complete_request() that is called in scsi_done(). The following stacktrace shows an example for a crash resulting from the previous behavior: Unable to handle kernel pointer dereference at virtual kernel address fffffee17a672000 Oops: 0038 [#1] SMP CPU: 2 PID: 0 Comm: swapper/2 Not tainted task: 00000003f7ff5be0 ti: 00000003f3d38000 task.ti: 00000003f3d38000 Krnl PSW : 0404d00180000000 00000000001156b0 (smp_vcpu_scheduled+0x18/0x40) R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:3 CC:1 PM:0 EA:3 Krnl GPRS: 000000200000007e 0000000000000000 fffffee17a671fd8 0000000300000015 ffffffff80000000 00000000005dfde8 07000003f7f80e00 000000004fa4e800 000000036ce8d8f8 000000036ce8d9c0 00000003ece8fe00 ffffffff969c9e93 00000003fffffffd 000000036ce8da10 00000000003bf134 00000003f3b07918 Krnl Code: 00000000001156a2: a7190000 lghi %r1,0 00000000001156a6: a7380015 lhi %r3,21 #00000000001156aa: e32050000008 ag %r2,0(%r5) >00000000001156b0: 482022b0 lh %r2,688(%r2) 00000000001156b4: ae123000 sigp %r1,%r2,0(%r3) 00000000001156b8: b2220020 ipm %r2 00000000001156bc: 8820001c srl %r2,28 00000000001156c0: c02700000001 xilf %r2,1 Call Trace: ([<0000000000000000>] 0x0) [<000003ff807bdb8e>] zfcp_fsf_fcp_cmnd_handler+0x3de/0x490 [zfcp] [<000003ff807be30a>] zfcp_fsf_req_complete+0x252/0x800 [zfcp] [<000003ff807c0a48>] zfcp_fsf_reqid_check+0xe8/0x190 [zfcp] [<000003ff807c194e>] zfcp_qdio_int_resp+0x66/0x188 [zfcp] [<000003ff80440c64>] qdio_kick_handler+0xdc/0x310 [qdio] [<000003ff804463d0>] __tiqdio_inbound_processing+0xf8/0xcd8 [qdio] [<0000000000141fd4>] tasklet_action+0x9c/0x170 [<0000000000141550>] __do_softirq+0xe8/0x258 [<000000000010ce0a>] do_softirq+0xba/0xc0 [<000000000014187c>] irq_exit+0xc4/0xe8 [<000000000046b526>] do_IRQ+0x146/0x1d8 [<00000000005d6a3c>] io_return+0x0/0x8 [<00000000005d6422>] vtime_stop_cpu+0x4a/0xa0 ([<0000000000000000>] 0x0) [<0000000000103d8a>] arch_cpu_idle+0xa2/0xb0 [<0000000000197f94>] cpu_startup_entry+0x13c/0x1f8 [<0000000000114782>] smp_start_secondary+0xda/0xe8 [<00000000005d6efe>] restart_int_handler+0x56/0x6c [<0000000000000000>] 0x0 Last Breaking-Event-Address: [<00000000003bf12e>] arch_spin_lock_wait+0x56/0xb0 Suggested-by: Steffen Maier <maier@linux.vnet.ibm.com> Signed-off-by: Benjamin Block <bblock@linux.vnet.ibm.com> Fixes: ea127f9754 ("[PATCH] s390 (7/7): zfcp host adapter.") (tglx/history.git) Cc: <stable@vger.kernel.org> #2.6.32+ Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
212 lines
5.4 KiB
C
212 lines
5.4 KiB
C
/*
|
|
* zfcp device driver
|
|
*
|
|
* Data structure and helper functions for tracking pending FSF
|
|
* requests.
|
|
*
|
|
* Copyright IBM Corp. 2009, 2016
|
|
*/
|
|
|
|
#ifndef ZFCP_REQLIST_H
|
|
#define ZFCP_REQLIST_H
|
|
|
|
/* number of hash buckets */
|
|
#define ZFCP_REQ_LIST_BUCKETS 128
|
|
|
|
/**
|
|
* struct zfcp_reqlist - Container for request list (reqlist)
|
|
* @lock: Spinlock for protecting the hash list
|
|
* @list: Array of hashbuckets, each is a list of requests in this bucket
|
|
*/
|
|
struct zfcp_reqlist {
|
|
spinlock_t lock;
|
|
struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
|
|
};
|
|
|
|
static inline int zfcp_reqlist_hash(unsigned long req_id)
|
|
{
|
|
return req_id % ZFCP_REQ_LIST_BUCKETS;
|
|
}
|
|
|
|
/**
|
|
* zfcp_reqlist_alloc - Allocate and initialize reqlist
|
|
*
|
|
* Returns pointer to allocated reqlist on success, or NULL on
|
|
* allocation failure.
|
|
*/
|
|
static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
|
|
{
|
|
unsigned int i;
|
|
struct zfcp_reqlist *rl;
|
|
|
|
rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
|
|
if (!rl)
|
|
return NULL;
|
|
|
|
spin_lock_init(&rl->lock);
|
|
|
|
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
|
|
INIT_LIST_HEAD(&rl->buckets[i]);
|
|
|
|
return rl;
|
|
}
|
|
|
|
/**
|
|
* zfcp_reqlist_isempty - Check whether the request list empty
|
|
* @rl: pointer to reqlist
|
|
*
|
|
* Returns: 1 if list is empty, 0 if not
|
|
*/
|
|
static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
|
|
if (!list_empty(&rl->buckets[i]))
|
|
return 0;
|
|
return 1;
|
|
}
|
|
|
|
/**
|
|
* zfcp_reqlist_free - Free allocated memory for reqlist
|
|
* @rl: The reqlist where to free memory
|
|
*/
|
|
static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
|
|
{
|
|
/* sanity check */
|
|
BUG_ON(!zfcp_reqlist_isempty(rl));
|
|
|
|
kfree(rl);
|
|
}
|
|
|
|
static inline struct zfcp_fsf_req *
|
|
_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
|
|
{
|
|
struct zfcp_fsf_req *req;
|
|
unsigned int i;
|
|
|
|
i = zfcp_reqlist_hash(req_id);
|
|
list_for_each_entry(req, &rl->buckets[i], list)
|
|
if (req->req_id == req_id)
|
|
return req;
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* zfcp_reqlist_find - Lookup FSF request by its request id
|
|
* @rl: The reqlist where to lookup the FSF request
|
|
* @req_id: The request id to look for
|
|
*
|
|
* Returns a pointer to the FSF request with the specified request id
|
|
* or NULL if there is no known FSF request with this id.
|
|
*/
|
|
static inline struct zfcp_fsf_req *
|
|
zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
|
|
{
|
|
unsigned long flags;
|
|
struct zfcp_fsf_req *req;
|
|
|
|
spin_lock_irqsave(&rl->lock, flags);
|
|
req = _zfcp_reqlist_find(rl, req_id);
|
|
spin_unlock_irqrestore(&rl->lock, flags);
|
|
|
|
return req;
|
|
}
|
|
|
|
/**
|
|
* zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
|
|
* @rl: reqlist where to search and remove entry
|
|
* @req_id: The request id of the request to look for
|
|
*
|
|
* This functions tries to find the FSF request with the specified
|
|
* id and then removes it from the reqlist. The reqlist lock is held
|
|
* during both steps of the operation.
|
|
*
|
|
* Returns: Pointer to the FSF request if the request has been found,
|
|
* NULL if it has not been found.
|
|
*/
|
|
static inline struct zfcp_fsf_req *
|
|
zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
|
|
{
|
|
unsigned long flags;
|
|
struct zfcp_fsf_req *req;
|
|
|
|
spin_lock_irqsave(&rl->lock, flags);
|
|
req = _zfcp_reqlist_find(rl, req_id);
|
|
if (req)
|
|
list_del(&req->list);
|
|
spin_unlock_irqrestore(&rl->lock, flags);
|
|
|
|
return req;
|
|
}
|
|
|
|
/**
|
|
* zfcp_reqlist_add - Add entry to reqlist
|
|
* @rl: reqlist where to add the entry
|
|
* @req: The entry to add
|
|
*
|
|
* The request id always increases. As an optimization new requests
|
|
* are added here with list_add_tail at the end of the bucket lists
|
|
* while old requests are looked up starting at the beginning of the
|
|
* lists.
|
|
*/
|
|
static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
|
|
struct zfcp_fsf_req *req)
|
|
{
|
|
unsigned int i;
|
|
unsigned long flags;
|
|
|
|
i = zfcp_reqlist_hash(req->req_id);
|
|
|
|
spin_lock_irqsave(&rl->lock, flags);
|
|
list_add_tail(&req->list, &rl->buckets[i]);
|
|
spin_unlock_irqrestore(&rl->lock, flags);
|
|
}
|
|
|
|
/**
|
|
* zfcp_reqlist_move - Move all entries from reqlist to simple list
|
|
* @rl: The zfcp_reqlist where to remove all entries
|
|
* @list: The list where to move all entries
|
|
*/
|
|
static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
|
|
struct list_head *list)
|
|
{
|
|
unsigned int i;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&rl->lock, flags);
|
|
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
|
|
list_splice_init(&rl->buckets[i], list);
|
|
spin_unlock_irqrestore(&rl->lock, flags);
|
|
}
|
|
|
|
/**
|
|
* zfcp_reqlist_apply_for_all() - apply a function to every request.
|
|
* @rl: the requestlist that contains the target requests.
|
|
* @f: the function to apply to each request; the first parameter of the
|
|
* function will be the target-request; the second parameter is the same
|
|
* pointer as given with the argument @data.
|
|
* @data: freely chosen argument; passed through to @f as second parameter.
|
|
*
|
|
* Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
|
|
* table (not a 'safe' variant, so don't modify the list).
|
|
*
|
|
* Holds @rl->lock over the entire request-iteration.
|
|
*/
|
|
static inline void
|
|
zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
|
|
void (*f)(struct zfcp_fsf_req *, void *), void *data)
|
|
{
|
|
struct zfcp_fsf_req *req;
|
|
unsigned long flags;
|
|
unsigned int i;
|
|
|
|
spin_lock_irqsave(&rl->lock, flags);
|
|
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
|
|
list_for_each_entry(req, &rl->buckets[i], list)
|
|
f(req, data);
|
|
spin_unlock_irqrestore(&rl->lock, flags);
|
|
}
|
|
|
|
#endif /* ZFCP_REQLIST_H */
|