2007-09-07 14:15:31 +07:00
|
|
|
/*
|
2008-06-10 23:20:58 +07:00
|
|
|
* zfcp device driver
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2008-06-10 23:20:58 +07:00
|
|
|
* External function declarations.
|
2007-09-07 14:15:31 +07:00
|
|
|
*
|
scsi: zfcp: fix rport unblock race with LUN recovery
It is unavoidable that zfcp_scsi_queuecommand() has to finish requests
with DID_IMM_RETRY (like fc_remote_port_chkready()) during the time
window when zfcp detected an unavailable rport but
fc_remote_port_delete(), which is asynchronous via
zfcp_scsi_schedule_rport_block(), has not yet blocked the rport.
However, for the case when the rport becomes available again, we should
prevent unblocking the rport too early. In contrast to other FCP LLDDs,
zfcp has to open each LUN with the FCP channel hardware before it can
send I/O to a LUN. So if a port already has LUNs attached and we
unblock the rport just after port recovery, recoveries of LUNs behind
this port can still be pending which in turn force
zfcp_scsi_queuecommand() to unnecessarily finish requests with
DID_IMM_RETRY.
This also opens a time window with unblocked rport (until the followup
LUN reopen recovery has finished). If a scsi_cmnd timeout occurs during
this time window fc_timed_out() cannot work as desired and such command
would indeed time out and trigger scsi_eh. This prevents a clean and
timely path failover. This should not happen if the path issue can be
recovered on FC transport layer such as path issues involving RSCNs.
Fix this by only calling zfcp_scsi_schedule_rport_register(), to
asynchronously trigger fc_remote_port_add(), after all LUN recoveries as
children of the rport have finished and no new recoveries of equal or
higher order were triggered meanwhile. Finished intentionally includes
any recovery result no matter if successful or failed (still unblock
rport so other successful LUNs work). For simplicity, we check after
each finished LUN recovery if there is another LUN recovery pending on
the same port and then do nothing. We handle the special case of a
successful recovery of a port without LUN children the same way without
changing this case's semantics.
For debugging we introduce 2 new trace records written if the rport
unblock attempt was aborted due to still unfinished or freshly triggered
recovery. The records are only written above the default trace level.
Benjamin noticed the important special case of new recovery that can be
triggered between having given up the erp_lock and before calling
zfcp_erp_action_cleanup() within zfcp_erp_strategy(). We must avoid the
following sequence:
ERP thread rport_work other context
------------------------- -------------- --------------------------------
port is unblocked, rport still blocked,
due to pending/running ERP action,
so ((port->status & ...UNBLOCK) != 0)
and (port->rport == NULL)
unlock ERP
zfcp_erp_action_cleanup()
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_erp_try_rport_unblock()
((status & ...UNBLOCK) != 0) [OLD!]
zfcp_erp_port_reopen()
lock ERP
zfcp_erp_port_block()
port->status clear ...UNBLOCK
unlock ERP
zfcp_scsi_schedule_rport_block()
port->rport_task = RPORT_DEL
queue_work(rport_work)
zfcp_scsi_rport_work()
(port->rport_task != RPORT_ADD)
port->rport_task = RPORT_NONE
zfcp_scsi_rport_block()
if (!port->rport) return
zfcp_scsi_schedule_rport_register()
port->rport_task = RPORT_ADD
queue_work(rport_work)
zfcp_scsi_rport_work()
(port->rport_task == RPORT_ADD)
port->rport_task = RPORT_NONE
zfcp_scsi_rport_register()
(port->rport == NULL)
rport = fc_remote_port_add()
port->rport = rport;
Now the rport was erroneously unblocked while the zfcp_port is blocked.
This is another situation we want to avoid due to scsi_eh
potential. This state would at least remain until the new recovery from
the other context finished successfully, or potentially forever if it
failed. In order to close this race, we take the erp_lock inside
zfcp_erp_try_rport_unblock() when checking the status of zfcp_port or
LUN. With that, the possible corresponding rport state sequences would
be: (unblock[ERP thread],block[other context]) if the ERP thread gets
erp_lock first and still sees ((port->status & ...UNBLOCK) != 0),
(block[other context],NOP[ERP thread]) if the ERP thread gets erp_lock
after the other context has already cleard ...UNBLOCK from port->status.
Since checking fields of struct erp_action is unsafe because they could
have been overwritten (re-used for new recovery) meanwhile, we only
check status of zfcp_port and LUN since these are only changed under
erp_lock elsewhere. Regarding the check of the proper status flags (port
or port_forced are similar to the shown adapter recovery):
[zfcp_erp_adapter_shutdown()]
zfcp_erp_adapter_reopen()
zfcp_erp_adapter_block()
* clear UNBLOCK ---------------------------------------+
zfcp_scsi_schedule_rports_block() |
write_lock_irqsave(&adapter->erp_lock, flags);-------+ |
zfcp_erp_action_enqueue() | |
zfcp_erp_setup_act() | |
* set ERP_INUSE -----------------------------------|--|--+
write_unlock_irqrestore(&adapter->erp_lock, flags);--+ | |
.context-switch. | |
zfcp_erp_thread() | |
zfcp_erp_strategy() | |
write_lock_irqsave(&adapter->erp_lock, flags);------+ | |
... | | |
zfcp_erp_strategy_check_target() | | |
zfcp_erp_strategy_check_adapter() | | |
zfcp_erp_adapter_unblock() | | |
* set UNBLOCK -----------------------------------|--+ |
zfcp_erp_action_dequeue() | |
* clear ERP_INUSE ---------------------------------|-----+
... |
write_unlock_irqrestore(&adapter->erp_lock, flags);-+
Hence, we should check for both UNBLOCK and ERP_INUSE because they are
interleaved. Also we need to explicitly check ERP_FAILED for the link
down case which currently does not clear the UNBLOCK flag in
zfcp_fsf_link_down_info_eval().
Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com>
Fixes: 8830271c4819 ("[SCSI] zfcp: Dont fail SCSI commands when transitioning to blocked fc_rport")
Fixes: a2fa0aede07c ("[SCSI] zfcp: Block FC transport rports early on errors")
Fixes: 5f852be9e11d ("[SCSI] zfcp: Fix deadlock between zfcp ERP and SCSI")
Fixes: 338151e06608 ("[SCSI] zfcp: make use of fc_remote_port_delete when target port is unavailable")
Fixes: 3859f6a248cb ("[PATCH] zfcp: add rports to enable scsi_add_device to work again")
Cc: <stable@vger.kernel.org> #2.6.32+
Reviewed-by: Benjamin Block <bblock@linux.vnet.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2016-12-09 23:16:33 +07:00
|
|
|
* Copyright IBM Corp. 2002, 2016
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef ZFCP_EXT_H
|
|
|
|
#define ZFCP_EXT_H
|
|
|
|
|
2009-11-24 22:54:09 +07:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <scsi/fc/fc_els.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include "zfcp_def.h"
|
2009-11-24 22:54:13 +07:00
|
|
|
#include "zfcp_fc.h"
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-02 15:56:41 +07:00
|
|
|
/* zfcp_aux.c */
|
2008-10-01 17:42:18 +07:00
|
|
|
extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
|
2009-11-24 22:54:00 +07:00
|
|
|
extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
|
2008-10-01 17:42:18 +07:00
|
|
|
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
|
2008-07-02 15:56:41 +07:00
|
|
|
u32);
|
|
|
|
extern void zfcp_sg_free_table(struct scatterlist *, int);
|
|
|
|
extern int zfcp_sg_setup_table(struct scatterlist *, int);
|
2009-11-24 22:53:59 +07:00
|
|
|
extern void zfcp_adapter_release(struct kref *);
|
2009-11-24 22:54:00 +07:00
|
|
|
extern void zfcp_adapter_unregister(struct zfcp_adapter *);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-02 15:56:41 +07:00
|
|
|
/* zfcp_ccw.c */
|
2009-09-24 15:23:22 +07:00
|
|
|
extern struct ccw_driver zfcp_ccw_driver;
|
2009-11-24 22:54:00 +07:00
|
|
|
extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
|
|
|
|
extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
|
2008-06-10 23:20:54 +07:00
|
|
|
|
2008-07-02 15:56:41 +07:00
|
|
|
/* zfcp_dbf.c */
|
2009-08-18 20:43:21 +07:00
|
|
|
extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
|
2010-12-02 21:16:16 +07:00
|
|
|
extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
|
2010-12-02 21:16:12 +07:00
|
|
|
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
|
|
|
|
struct zfcp_port *, struct scsi_device *, u8, u8);
|
|
|
|
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
|
scsi: zfcp: fix rport unblock race with LUN recovery
It is unavoidable that zfcp_scsi_queuecommand() has to finish requests
with DID_IMM_RETRY (like fc_remote_port_chkready()) during the time
window when zfcp detected an unavailable rport but
fc_remote_port_delete(), which is asynchronous via
zfcp_scsi_schedule_rport_block(), has not yet blocked the rport.
However, for the case when the rport becomes available again, we should
prevent unblocking the rport too early. In contrast to other FCP LLDDs,
zfcp has to open each LUN with the FCP channel hardware before it can
send I/O to a LUN. So if a port already has LUNs attached and we
unblock the rport just after port recovery, recoveries of LUNs behind
this port can still be pending which in turn force
zfcp_scsi_queuecommand() to unnecessarily finish requests with
DID_IMM_RETRY.
This also opens a time window with unblocked rport (until the followup
LUN reopen recovery has finished). If a scsi_cmnd timeout occurs during
this time window fc_timed_out() cannot work as desired and such command
would indeed time out and trigger scsi_eh. This prevents a clean and
timely path failover. This should not happen if the path issue can be
recovered on FC transport layer such as path issues involving RSCNs.
Fix this by only calling zfcp_scsi_schedule_rport_register(), to
asynchronously trigger fc_remote_port_add(), after all LUN recoveries as
children of the rport have finished and no new recoveries of equal or
higher order were triggered meanwhile. Finished intentionally includes
any recovery result no matter if successful or failed (still unblock
rport so other successful LUNs work). For simplicity, we check after
each finished LUN recovery if there is another LUN recovery pending on
the same port and then do nothing. We handle the special case of a
successful recovery of a port without LUN children the same way without
changing this case's semantics.
For debugging we introduce 2 new trace records written if the rport
unblock attempt was aborted due to still unfinished or freshly triggered
recovery. The records are only written above the default trace level.
Benjamin noticed the important special case of new recovery that can be
triggered between having given up the erp_lock and before calling
zfcp_erp_action_cleanup() within zfcp_erp_strategy(). We must avoid the
following sequence:
ERP thread rport_work other context
------------------------- -------------- --------------------------------
port is unblocked, rport still blocked,
due to pending/running ERP action,
so ((port->status & ...UNBLOCK) != 0)
and (port->rport == NULL)
unlock ERP
zfcp_erp_action_cleanup()
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_erp_try_rport_unblock()
((status & ...UNBLOCK) != 0) [OLD!]
zfcp_erp_port_reopen()
lock ERP
zfcp_erp_port_block()
port->status clear ...UNBLOCK
unlock ERP
zfcp_scsi_schedule_rport_block()
port->rport_task = RPORT_DEL
queue_work(rport_work)
zfcp_scsi_rport_work()
(port->rport_task != RPORT_ADD)
port->rport_task = RPORT_NONE
zfcp_scsi_rport_block()
if (!port->rport) return
zfcp_scsi_schedule_rport_register()
port->rport_task = RPORT_ADD
queue_work(rport_work)
zfcp_scsi_rport_work()
(port->rport_task == RPORT_ADD)
port->rport_task = RPORT_NONE
zfcp_scsi_rport_register()
(port->rport == NULL)
rport = fc_remote_port_add()
port->rport = rport;
Now the rport was erroneously unblocked while the zfcp_port is blocked.
This is another situation we want to avoid due to scsi_eh
potential. This state would at least remain until the new recovery from
the other context finished successfully, or potentially forever if it
failed. In order to close this race, we take the erp_lock inside
zfcp_erp_try_rport_unblock() when checking the status of zfcp_port or
LUN. With that, the possible corresponding rport state sequences would
be: (unblock[ERP thread],block[other context]) if the ERP thread gets
erp_lock first and still sees ((port->status & ...UNBLOCK) != 0),
(block[other context],NOP[ERP thread]) if the ERP thread gets erp_lock
after the other context has already cleard ...UNBLOCK from port->status.
Since checking fields of struct erp_action is unsafe because they could
have been overwritten (re-used for new recovery) meanwhile, we only
check status of zfcp_port and LUN since these are only changed under
erp_lock elsewhere. Regarding the check of the proper status flags (port
or port_forced are similar to the shown adapter recovery):
[zfcp_erp_adapter_shutdown()]
zfcp_erp_adapter_reopen()
zfcp_erp_adapter_block()
* clear UNBLOCK ---------------------------------------+
zfcp_scsi_schedule_rports_block() |
write_lock_irqsave(&adapter->erp_lock, flags);-------+ |
zfcp_erp_action_enqueue() | |
zfcp_erp_setup_act() | |
* set ERP_INUSE -----------------------------------|--|--+
write_unlock_irqrestore(&adapter->erp_lock, flags);--+ | |
.context-switch. | |
zfcp_erp_thread() | |
zfcp_erp_strategy() | |
write_lock_irqsave(&adapter->erp_lock, flags);------+ | |
... | | |
zfcp_erp_strategy_check_target() | | |
zfcp_erp_strategy_check_adapter() | | |
zfcp_erp_adapter_unblock() | | |
* set UNBLOCK -----------------------------------|--+ |
zfcp_erp_action_dequeue() | |
* clear ERP_INUSE ---------------------------------|-----+
... |
write_unlock_irqrestore(&adapter->erp_lock, flags);-+
Hence, we should check for both UNBLOCK and ERP_INUSE because they are
interleaved. Also we need to explicitly check ERP_FAILED for the link
down case which currently does not clear the UNBLOCK flag in
zfcp_fsf_link_down_info_eval().
Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com>
Fixes: 8830271c4819 ("[SCSI] zfcp: Dont fail SCSI commands when transitioning to blocked fc_rport")
Fixes: a2fa0aede07c ("[SCSI] zfcp: Block FC transport rports early on errors")
Fixes: 5f852be9e11d ("[SCSI] zfcp: Fix deadlock between zfcp ERP and SCSI")
Fixes: 338151e06608 ("[SCSI] zfcp: make use of fc_remote_port_delete when target port is unavailable")
Fixes: 3859f6a248cb ("[PATCH] zfcp: add rports to enable scsi_add_device to work again")
Cc: <stable@vger.kernel.org> #2.6.32+
Reviewed-by: Benjamin Block <bblock@linux.vnet.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2016-12-09 23:16:33 +07:00
|
|
|
extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
|
|
|
|
struct zfcp_erp_action *erp);
|
2016-08-10 23:30:49 +07:00
|
|
|
extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
|
2010-12-02 21:16:14 +07:00
|
|
|
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
|
2016-08-10 23:30:47 +07:00
|
|
|
extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
|
2010-12-02 21:16:14 +07:00
|
|
|
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
|
2009-08-18 20:43:21 +07:00
|
|
|
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
|
2011-08-15 19:40:32 +07:00
|
|
|
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
|
[SCSI] zfcp: Do not wakeup while suspended
If the mapping of FCP device bus ID and corresponding subchannel
is modified while the Linux image is suspended, the resume of FCP
devices can fail. During resume, zfcp gets callbacks from cio regarding
the modified subchannels but they can be arbitrarily mixed with the
restore/resume callback. Since the cio callbacks would trigger
adapter recovery, zfcp could wakeup before the resume callback.
Therefore, ignore the cio callbacks regarding subchannels while
being suspended. We can safely do so, since zfcp does not deal itself
with subchannels. For problem determination purposes, we still trace the
ignored callback events.
The following kernel messages could be seen on resume:
kernel: <WWPN>: parent <FCP device bus ID> should not be sleeping
As part of adapter reopen recovery, zfcp performs auto port scanning
which can erroneously try to register new remote ports with
scsi_transport_fc and the device core code complains about the parent
(adapter) still sleeping.
kernel: zfcp.3dff9c: <FCP device bus ID>:\
Setting up the QDIO connection to the FCP adapter failed
<last kernel message repeated 3 more times>
kernel: zfcp.574d43: <FCP device bus ID>:\
ERP cannot recover an error on the FCP device
In such cases, the adapter gave up recovery and remained blocked along
with its child objects: remote ports and LUNs/scsi devices. Even the
adapter shutdown as part of giving up recovery failed because the ccw
device state remained disconnected. Later, the corresponding remote
ports ran into dev_loss_tmo. As a result, the LUNs were erroneously
not available again after resume.
Even a manually triggered adapter recovery (e.g. sysfs attribute
failed, or device offline/online via sysfs) could not recover the
adapter due to the remaining disconnected state of the corresponding
ccw device.
Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com>
Cc: <stable@vger.kernel.org> #2.6.32+
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2012-09-04 20:23:32 +07:00
|
|
|
extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
|
2010-12-02 21:16:13 +07:00
|
|
|
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
|
|
|
|
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
|
|
|
|
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
|
2016-08-10 23:30:47 +07:00
|
|
|
extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
|
|
|
|
struct zfcp_fsf_req *);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-02 15:56:41 +07:00
|
|
|
/* zfcp_erp.c */
|
2010-09-08 19:40:01 +07:00
|
|
|
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
|
|
|
|
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
|
2010-12-02 21:16:16 +07:00
|
|
|
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
|
|
|
|
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
|
2010-09-08 19:40:01 +07:00
|
|
|
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
|
|
|
|
extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
|
2010-12-02 21:16:16 +07:00
|
|
|
extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
|
|
|
|
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
|
|
|
|
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
|
2010-09-08 19:40:01 +07:00
|
|
|
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
|
|
|
|
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
|
2010-12-02 21:16:16 +07:00
|
|
|
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
|
|
|
|
extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
|
2010-09-08 19:39:55 +07:00
|
|
|
extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
|
2005-04-17 05:20:36 +07:00
|
|
|
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
|
2008-07-02 15:56:40 +07:00
|
|
|
extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
|
|
|
|
extern void zfcp_erp_wait(struct zfcp_adapter *);
|
|
|
|
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
|
|
|
|
extern void zfcp_erp_timeout_handler(unsigned long);
|
2005-09-14 02:50:38 +07:00
|
|
|
|
2008-07-02 15:56:41 +07:00
|
|
|
/* zfcp_fc.c */
|
2011-02-23 01:54:41 +07:00
|
|
|
extern struct kmem_cache *zfcp_fc_req_cache;
|
2010-07-16 20:37:39 +07:00
|
|
|
extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
|
|
|
|
enum fc_host_event_code event_code, u32);
|
|
|
|
extern void zfcp_fc_post_event(struct work_struct *);
|
2009-11-24 22:54:06 +07:00
|
|
|
extern void zfcp_fc_scan_ports(struct work_struct *);
|
2008-07-02 15:56:41 +07:00
|
|
|
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
|
2009-08-18 20:43:20 +07:00
|
|
|
extern void zfcp_fc_port_did_lookup(struct work_struct *);
|
2009-10-14 16:00:43 +07:00
|
|
|
extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
|
2009-11-24 22:54:09 +07:00
|
|
|
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
|
2009-08-18 20:43:23 +07:00
|
|
|
extern void zfcp_fc_test_link(struct zfcp_port *);
|
2009-03-02 19:09:01 +07:00
|
|
|
extern void zfcp_fc_link_test_work(struct work_struct *);
|
2009-11-24 22:54:11 +07:00
|
|
|
extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
|
2009-08-18 20:43:22 +07:00
|
|
|
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
|
|
|
|
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
|
2016-11-17 16:31:19 +07:00
|
|
|
extern int zfcp_fc_exec_bsg_job(struct bsg_job *);
|
|
|
|
extern int zfcp_fc_timeout_bsg_job(struct bsg_job *);
|
2011-02-23 01:54:48 +07:00
|
|
|
extern void zfcp_fc_sym_name_update(struct work_struct *);
|
zfcp: auto port scan resiliency
This patch improves the Fibre Channel port scan behaviour of the zfcp lldd.
Without it the zfcp device driver may churn up the storage area network by
excessive scanning and scan bursts, particularly in big virtual server
environments, potentially resulting in interference of virtual servers and
reduced availability of storage connectivity.
The two main issues as to the zfcp device drivers automatic port scan in
virtual server environments are frequency and simultaneity.
On the one hand, there is no point in allowing lots of ports scans
in a row. It makes sense, though, to make sure that a scan is conducted
eventually if there has been any indication for potential SAN changes.
On the other hand, lots of virtual servers receiving the same indication
for a SAN change had better not attempt to conduct a scan instantly,
that is, at the same time.
Hence this patch has a two-fold approach for better port scanning:
the introduction of a rate limit to amend frequency issues, and the
introduction of a short random backoff to amend simultaneity issues.
Both approaches boil down to deferred port scans, with delays
comprising parts for both approaches.
The new port scan behaviour is summarised best by:
NEW: NEW:
no_auto_port_rescan random rate flush
backoff limit =wait
adapter resume/thaw yes yes no yes*
adapter online (user) no yes no yes*
port rescan (user) no no no yes
adapter recovery (user) yes yes yes no
adapter recovery (other) yes yes yes no
incoming ELS yes yes yes no
incoming ELS lost yes yes yes no
Implementation is straight-forward by converting an existing worker to
a delayed worker. But care is needed whenever that worker is going to be
flushed (in order to make sure work has been completed), since a flush
operation cancels the timer set up for deferred execution (see * above).
There is a small race window whenever a port scan work starts
running up to the point in time of storing the time stamp for that port
scan. The impact is negligible. Closing that gap isn't trivial, though, and
would the destroy the beauty of a simple work-to-delayed-work conversion.
Signed-off-by: Martin Peschke <mpeschke@linux.vnet.ibm.com>
Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2014-11-13 20:59:48 +07:00
|
|
|
extern unsigned int zfcp_fc_port_scan_backoff(void);
|
2012-09-04 20:23:35 +07:00
|
|
|
extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
|
|
|
|
extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
|
2008-07-02 15:56:41 +07:00
|
|
|
|
|
|
|
/* zfcp_fsf.c */
|
2011-02-23 01:54:44 +07:00
|
|
|
extern struct kmem_cache *zfcp_fsf_qtcb_cache;
|
2008-07-02 15:56:41 +07:00
|
|
|
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
|
2009-11-24 22:54:11 +07:00
|
|
|
extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
|
|
|
|
extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
|
2008-07-02 15:56:41 +07:00
|
|
|
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
|
|
|
|
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
|
2010-09-08 19:39:55 +07:00
|
|
|
extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
|
|
|
|
extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
|
2008-07-02 15:56:41 +07:00
|
|
|
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
|
2009-08-18 20:43:19 +07:00
|
|
|
extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
|
2008-07-02 15:56:41 +07:00
|
|
|
struct fsf_qtcb_bottom_config *);
|
|
|
|
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
|
2009-08-18 20:43:19 +07:00
|
|
|
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
|
2008-07-02 15:56:41 +07:00
|
|
|
struct fsf_qtcb_bottom_port *);
|
|
|
|
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
|
2009-08-18 20:43:19 +07:00
|
|
|
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
|
2008-07-02 15:56:41 +07:00
|
|
|
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
|
2009-11-24 22:54:13 +07:00
|
|
|
extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
|
2010-01-14 23:19:02 +07:00
|
|
|
mempool_t *, unsigned int);
|
2009-11-24 22:54:13 +07:00
|
|
|
extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
|
2010-01-14 23:19:02 +07:00
|
|
|
struct zfcp_fsf_ct_els *, unsigned int);
|
2010-09-08 19:39:55 +07:00
|
|
|
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
|
2008-07-02 15:56:41 +07:00
|
|
|
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
|
2010-09-08 19:39:55 +07:00
|
|
|
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
|
|
|
|
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
|
2009-08-18 20:43:19 +07:00
|
|
|
extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
|
2005-09-14 02:50:38 +07:00
|
|
|
|
2008-07-02 15:56:41 +07:00
|
|
|
/* zfcp_qdio.c */
|
2009-08-18 20:43:22 +07:00
|
|
|
extern int zfcp_qdio_setup(struct zfcp_adapter *);
|
|
|
|
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
|
2010-04-30 23:09:35 +07:00
|
|
|
extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
|
2010-02-17 17:18:59 +07:00
|
|
|
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
|
2010-04-30 23:09:34 +07:00
|
|
|
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
|
2010-07-16 20:37:37 +07:00
|
|
|
struct scatterlist *);
|
2009-08-18 20:43:19 +07:00
|
|
|
extern int zfcp_qdio_open(struct zfcp_qdio *);
|
|
|
|
extern void zfcp_qdio_close(struct zfcp_qdio *);
|
2010-07-16 20:37:43 +07:00
|
|
|
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
|
2008-07-02 15:56:41 +07:00
|
|
|
|
|
|
|
/* zfcp_scsi.c */
|
2011-02-23 01:54:46 +07:00
|
|
|
extern struct scsi_transport_template *zfcp_scsi_transport_template;
|
|
|
|
extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
|
|
|
|
extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
|
2008-07-02 15:56:41 +07:00
|
|
|
extern struct fc_function_template zfcp_transport_functions;
|
2009-03-02 19:09:08 +07:00
|
|
|
extern void zfcp_scsi_rport_work(struct work_struct *);
|
|
|
|
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
|
|
|
|
extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
|
|
|
|
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
|
2010-07-16 20:37:42 +07:00
|
|
|
extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
|
|
|
|
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
|
2005-09-14 02:50:38 +07:00
|
|
|
|
2008-07-02 15:56:41 +07:00
|
|
|
/* zfcp_sysfs.c */
|
2013-04-26 21:13:49 +07:00
|
|
|
extern const struct attribute_group *zfcp_unit_attr_groups[];
|
2008-07-02 15:56:41 +07:00
|
|
|
extern struct attribute_group zfcp_sysfs_adapter_attrs;
|
2013-04-26 21:13:48 +07:00
|
|
|
extern const struct attribute_group *zfcp_port_attr_groups[];
|
2012-09-04 20:23:34 +07:00
|
|
|
extern struct mutex zfcp_sysfs_port_units_mutex;
|
2008-07-02 15:56:41 +07:00
|
|
|
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
|
|
|
|
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
|
2008-06-10 23:20:55 +07:00
|
|
|
|
2010-09-08 19:39:52 +07:00
|
|
|
/* zfcp_unit.c */
|
|
|
|
extern int zfcp_unit_add(struct zfcp_port *, u64);
|
|
|
|
extern int zfcp_unit_remove(struct zfcp_port *, u64);
|
|
|
|
extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
|
|
|
|
extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
|
|
|
|
extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
|
|
|
|
extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
|
|
|
|
extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* ZFCP_EXT_H */
|