2009-09-24 07:46:15 +07:00
/*
2015-11-26 15:54:45 +07:00
* Copyright ( c ) 2005 - 2014 Brocade Communications Systems , Inc .
* Copyright ( c ) 2014 - QLogic Corporation .
2009-09-24 07:46:15 +07:00
* All rights reserved
2015-11-26 15:54:45 +07:00
* www . qlogic . com
2009-09-24 07:46:15 +07:00
*
2015-11-26 15:54:46 +07:00
* Linux driver for QLogic BR - series Fibre Channel Host Bus Adapter .
2009-09-24 07:46:15 +07:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License ( GPL ) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*/
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* bfad . c Linux driver PCI interface module .
*/
# include <linux/module.h>
2010-03-04 08:44:02 +07:00
# include <linux/kthread.h>
2010-09-16 01:50:55 +07:00
# include <linux/errno.h>
# include <linux/sched.h>
# include <linux/init.h>
# include <linux/fs.h>
# include <linux/pci.h>
# include <linux/firmware.h>
2016-12-25 02:46:01 +07:00
# include <linux/uaccess.h>
2010-09-16 01:50:55 +07:00
# include <asm/fcntl.h>
2009-09-24 07:46:15 +07:00
# include "bfad_drv.h"
# include "bfad_im.h"
2010-09-16 01:50:55 +07:00
# include "bfa_fcs.h"
# include "bfa_defs.h"
# include "bfa.h"
2009-09-24 07:46:15 +07:00
BFA_TRC_FILE ( LDRV , BFAD ) ;
2010-03-20 01:07:09 +07:00
DEFINE_MUTEX ( bfad_mutex ) ;
2009-09-24 07:46:15 +07:00
LIST_HEAD ( bfad_list ) ;
2010-09-16 01:50:55 +07:00
static int bfad_inst ;
static int num_sgpgs_parm ;
int supported_fc4s ;
char * host_name , * os_name , * os_patch ;
int num_rports , num_ios , num_tms ;
int num_fcxps , num_ufbufs ;
int reqq_size , rspq_size , num_sgpgs ;
int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT ;
int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH ;
int bfa_io_max_sge = BFAD_IO_MAX_SGE ;
2010-12-10 08:11:53 +07:00
int bfa_log_level = 3 ; /* WARNING log level */
2010-09-16 01:50:55 +07:00
int ioc_auto_recover = BFA_TRUE ;
int bfa_linkup_delay = - 1 ;
int fdmi_enable = BFA_TRUE ;
int pcie_max_read_reqsz ;
2010-07-09 10:02:55 +07:00
int bfa_debugfs_enable = 1 ;
2010-09-16 01:50:55 +07:00
int msix_disable_cb = 0 , msix_disable_ct = 0 ;
2011-06-25 10:29:07 +07:00
int max_xfer_size = BFAD_MAX_SECTORS > > 1 ;
2012-08-23 09:52:58 +07:00
int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS ;
2010-09-16 01:50:55 +07:00
2011-04-14 01:44:03 +07:00
/* Firmware releated */
2011-06-14 05:50:35 +07:00
u32 bfi_image_cb_size , bfi_image_ct_size , bfi_image_ct2_size ;
u32 * bfi_image_cb , * bfi_image_ct , * bfi_image_ct2 ;
2010-09-16 01:50:55 +07:00
scsi: bfa: Increase requested firmware version to 3.2.5.1
bna & bfa firmware version 3.2.5.1 was submitted to linux-firmware on
Feb 17 19:10:20 2015 -0500 in 0ab54ff1dc ("linux-firmware: Add QLogic BR
Series Adapter Firmware").
bna was updated to use the newer firmware on Feb 19 16:02:32 2015 -0500 in
3f307c3d70 ("bna: Update the Driver and Firmware Version")
bfa was not updated. I presume this was an oversight but it broke support
for bfa+bna cards such as the following
04:00.0 Fibre Channel [0c04]: Brocade Communications Systems, Inc.
1010/1020/1007/1741 10Gbps CNA [1657:0014] (rev 01)
04:00.1 Fibre Channel [0c04]: Brocade Communications Systems, Inc.
1010/1020/1007/1741 10Gbps CNA [1657:0014] (rev 01)
04:00.2 Ethernet controller [0200]: Brocade Communications Systems,
Inc. 1010/1020/1007/1741 10Gbps CNA [1657:0014] (rev 01)
04:00.3 Ethernet controller [0200]: Brocade Communications Systems,
Inc. 1010/1020/1007/1741 10Gbps CNA [1657:0014] (rev 01)
Currently, if the bfa module is loaded first, bna fails to probe the
respective devices with
[ 215.026787] bna: QLogic BR-series 10G Ethernet driver - version: 3.2.25.1
[ 215.043707] bna 0000:04:00.2: bar0 mapped to ffffc90001fc0000, len 262144
[ 215.060656] bna 0000:04:00.2: initialization failed err=1
[ 215.073893] bna 0000:04:00.3: bar0 mapped to ffffc90002040000, len 262144
[ 215.090644] bna 0000:04:00.3: initialization failed err=1
Whereas if bna is loaded first, bfa fails with
[ 249.592109] QLogic BR-series BFA FC/FCOE SCSI driver - version: 3.2.25.0
[ 249.610738] bfa 0000:04:00.0: Running firmware version is incompatible with the driver version
[ 249.833513] bfa 0000:04:00.0: bfa init failed
[ 249.833919] scsi host6: QLogic BR-series FC/FCOE Adapter, hwpath: 0000:04:00.0 driver: 3.2.25.0
[ 249.841446] bfa 0000:04:00.1: Running firmware version is incompatible with the driver version
[ 250.045449] bfa 0000:04:00.1: bfa init failed
[ 250.045962] scsi host7: QLogic BR-series FC/FCOE Adapter, hwpath: 0000:04:00.1 driver: 3.2.25.0
Increase bfa's requested firmware version. Also increase the driver
version. I only tested that all of the devices probe without error.
Reported-by: Tim Ehlers <tehlers@gwdg.de>
Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
Acked-by: Rasesh Mody <rasesh.mody@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2016-12-24 11:40:19 +07:00
# define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin"
# define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin"
# define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
2011-04-14 01:44:03 +07:00
static u32 * bfad_load_fwimg ( struct pci_dev * pdev ) ;
static void bfad_free_fwimg ( void ) ;
static void bfad_read_firmware ( struct pci_dev * pdev , u32 * * bfi_image ,
u32 * bfi_image_size , char * fw_name ) ;
2010-11-30 09:21:32 +07:00
static const char * msix_name_ct [ ] = {
2011-06-14 05:50:35 +07:00
" ctrl " ,
2010-09-16 01:50:55 +07:00
" cpe0 " , " cpe1 " , " cpe2 " , " cpe3 " ,
2011-06-14 05:50:35 +07:00
" rme0 " , " rme1 " , " rme2 " , " rme3 " } ;
2010-09-16 01:50:55 +07:00
2010-11-30 09:21:32 +07:00
static const char * msix_name_cb [ ] = {
2010-09-16 01:50:55 +07:00
" cpe0 " , " cpe1 " , " cpe2 " , " cpe3 " ,
" rme0 " , " rme1 " , " rme2 " , " rme3 " ,
" eemc " , " elpu0 " , " elpu1 " , " epss " , " mlpu " } ;
2011-06-14 05:50:35 +07:00
MODULE_FIRMWARE ( BFAD_FW_FILE_CB ) ;
MODULE_FIRMWARE ( BFAD_FW_FILE_CT ) ;
MODULE_FIRMWARE ( BFAD_FW_FILE_CT2 ) ;
2009-09-24 07:46:15 +07:00
module_param ( os_name , charp , S_IRUGO | S_IWUSR ) ;
2010-07-09 09:59:49 +07:00
MODULE_PARM_DESC ( os_name , " OS name of the hba host machine " ) ;
2009-09-24 07:46:15 +07:00
module_param ( os_patch , charp , S_IRUGO | S_IWUSR ) ;
2010-07-09 09:59:49 +07:00
MODULE_PARM_DESC ( os_patch , " OS patch level of the hba host machine " ) ;
2009-09-24 07:46:15 +07:00
module_param ( host_name , charp , S_IRUGO | S_IWUSR ) ;
2010-07-09 09:59:49 +07:00
MODULE_PARM_DESC ( host_name , " Hostname of the hba host machine " ) ;
2009-09-24 07:46:15 +07:00
module_param ( num_rports , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( num_rports , " Max number of rports supported per port "
" (physical/logical), default=1024 " ) ;
2009-09-24 07:46:15 +07:00
module_param ( num_ios , int , S_IRUGO | S_IWUSR ) ;
2010-07-09 09:59:49 +07:00
MODULE_PARM_DESC ( num_ios , " Max number of ioim requests, default=2000 " ) ;
2009-09-24 07:46:15 +07:00
module_param ( num_tms , int , S_IRUGO | S_IWUSR ) ;
2010-07-09 09:59:49 +07:00
MODULE_PARM_DESC ( num_tms , " Max number of task im requests, default=128 " ) ;
2009-09-24 07:46:15 +07:00
module_param ( num_fcxps , int , S_IRUGO | S_IWUSR ) ;
2010-07-09 09:59:49 +07:00
MODULE_PARM_DESC ( num_fcxps , " Max number of fcxp requests, default=64 " ) ;
2009-09-24 07:46:15 +07:00
module_param ( num_ufbufs , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( num_ufbufs , " Max number of unsolicited frame "
" buffers, default=64 " ) ;
2009-09-24 07:46:15 +07:00
module_param ( reqq_size , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( reqq_size , " Max number of request queue elements, "
" default=256 " ) ;
2009-09-24 07:46:15 +07:00
module_param ( rspq_size , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( rspq_size , " Max number of response queue elements, "
" default=64 " ) ;
2009-09-24 07:46:15 +07:00
module_param ( num_sgpgs , int , S_IRUGO | S_IWUSR ) ;
2010-07-09 09:59:49 +07:00
MODULE_PARM_DESC ( num_sgpgs , " Number of scatter/gather pages, default=2048 " ) ;
2009-09-24 07:46:15 +07:00
module_param ( rport_del_timeout , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( rport_del_timeout , " Rport delete timeout, default=90 secs, "
" Range[>0] " ) ;
2009-09-24 07:46:15 +07:00
module_param ( bfa_lun_queue_depth , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( bfa_lun_queue_depth , " Lun queue depth, default=32, Range[>0] " ) ;
2009-09-24 07:46:15 +07:00
module_param ( bfa_io_max_sge , int , S_IRUGO | S_IWUSR ) ;
2010-07-09 09:59:49 +07:00
MODULE_PARM_DESC ( bfa_io_max_sge , " Max io scatter/gather elements, default=255 " ) ;
2010-12-10 08:11:53 +07:00
module_param ( bfa_log_level , int , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( bfa_log_level , " Driver log level, default=3, "
2010-09-16 01:50:55 +07:00
" Range[Critical:1|Error:2|Warning:3|Info:4] " ) ;
2009-09-24 07:46:15 +07:00
module_param ( ioc_auto_recover , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( ioc_auto_recover , " IOC auto recovery, default=1, "
" Range[off:0|on:1] " ) ;
2009-09-24 07:46:15 +07:00
module_param ( bfa_linkup_delay , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( bfa_linkup_delay , " Link up delay, default=30 secs for "
" boot port. Otherwise 10 secs in RHEL4 & 0 for "
" [RHEL5, SLES10, ESX40] Range[>0] " ) ;
module_param ( msix_disable_cb , int , S_IRUGO | S_IWUSR ) ;
2015-11-26 15:54:46 +07:00
MODULE_PARM_DESC ( msix_disable_cb , " Disable Message Signaled Interrupts for QLogic-415/425/815/825 cards, default=0 Range[false:0|true:1] " ) ;
2010-09-16 01:50:55 +07:00
module_param ( msix_disable_ct , int , S_IRUGO | S_IWUSR ) ;
2015-11-26 15:54:46 +07:00
MODULE_PARM_DESC ( msix_disable_ct , " Disable Message Signaled Interrupts if possible for QLogic-1010/1020/804/1007/902/1741 cards, default=0, Range[false:0|true:1] " ) ;
2010-07-09 09:59:49 +07:00
module_param ( fdmi_enable , int , S_IRUGO | S_IWUSR ) ;
2010-09-16 01:50:55 +07:00
MODULE_PARM_DESC ( fdmi_enable , " Enables fdmi registration, default=1, "
" Range[false:0|true:1] " ) ;
module_param ( pcie_max_read_reqsz , int , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( pcie_max_read_reqsz , " PCIe max read request size, default=0 "
" (use system setting), Range[128|256|512|1024|2048|4096] " ) ;
2010-07-09 10:02:55 +07:00
module_param ( bfa_debugfs_enable , int , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( bfa_debugfs_enable , " Enables debugfs feature, default=1, "
" Range[false:0|true:1] " ) ;
2011-06-25 10:29:07 +07:00
module_param ( max_xfer_size , int , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( max_xfer_size , " default=32MB, "
" Range[64k|128k|256k|512k|1024k|2048k] " ) ;
2012-08-23 09:52:58 +07:00
module_param ( max_rport_logins , int , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( max_rport_logins , " Max number of logins to initiator and target rports on a port (physical/logical), default=1024 " ) ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
static void
bfad_sm_uninit ( struct bfad_s * bfad , enum bfad_sm_event event ) ;
static void
bfad_sm_created ( struct bfad_s * bfad , enum bfad_sm_event event ) ;
static void
bfad_sm_initializing ( struct bfad_s * bfad , enum bfad_sm_event event ) ;
static void
bfad_sm_operational ( struct bfad_s * bfad , enum bfad_sm_event event ) ;
static void
bfad_sm_stopping ( struct bfad_s * bfad , enum bfad_sm_event event ) ;
static void
bfad_sm_failed ( struct bfad_s * bfad , enum bfad_sm_event event ) ;
static void
bfad_sm_fcs_exit ( struct bfad_s * bfad , enum bfad_sm_event event ) ;
2010-10-19 07:17:23 +07:00
/*
2010-09-16 01:50:55 +07:00
* Beginning state for the driver instance , awaiting the pci_probe event
2009-09-24 07:46:15 +07:00
*/
2010-09-16 01:50:55 +07:00
static void
bfad_sm_uninit ( struct bfad_s * bfad , enum bfad_sm_event event )
{
bfa_trc ( bfad , event ) ;
switch ( event ) {
case BFAD_E_CREATE :
bfa_sm_set_state ( bfad , bfad_sm_created ) ;
bfad - > bfad_tsk = kthread_create ( bfad_worker , ( void * ) bfad ,
" %s " , " bfad_worker " ) ;
if ( IS_ERR ( bfad - > bfad_tsk ) ) {
printk ( KERN_INFO " bfad[%d]: Kernel thread "
" creation failed! \n " , bfad - > inst_no ) ;
bfa_sm_send_event ( bfad , BFAD_E_KTHREAD_CREATE_FAILED ) ;
}
bfa_sm_send_event ( bfad , BFAD_E_INIT ) ;
break ;
case BFAD_E_STOP :
/* Ignore stop; already in uninit */
break ;
default :
bfa_sm_fault ( bfad , event ) ;
}
}
2009-09-24 07:46:15 +07:00
2010-10-19 07:17:23 +07:00
/*
2010-09-16 01:50:55 +07:00
* Driver Instance is created , awaiting event INIT to initialize the bfad
*/
static void
bfad_sm_created ( struct bfad_s * bfad , enum bfad_sm_event event )
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
unsigned long flags ;
2013-11-21 16:37:33 +07:00
bfa_status_t ret ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
bfa_trc ( bfad , event ) ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
switch ( event ) {
case BFAD_E_INIT :
bfa_sm_set_state ( bfad , bfad_sm_initializing ) ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
init_completion ( & bfad - > comp ) ;
2010-03-04 08:44:02 +07:00
2010-09-16 01:50:55 +07:00
/* Enable Interrupt and wait bfa_init completion */
if ( bfad_setup_intr ( bfad ) ) {
printk ( KERN_WARNING " bfad%d: bfad_setup_intr failed \n " ,
bfad - > inst_no ) ;
2013-11-21 16:37:33 +07:00
bfa_sm_send_event ( bfad , BFAD_E_INIT_FAILED ) ;
2010-09-16 01:50:55 +07:00
break ;
}
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
2010-12-10 10:08:43 +07:00
bfa_iocfc_init ( & bfad - > bfa ) ;
2010-09-16 01:50:55 +07:00
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
/* Set up interrupt handler for each vectors */
if ( ( bfad - > bfad_flags & BFAD_MSIX_ON ) & &
bfad_install_msix_handler ( bfad ) ) {
printk ( KERN_WARNING " %s: install_msix failed, bfad%d \n " ,
__func__ , bfad - > inst_no ) ;
}
bfad_init_timer ( bfad ) ;
wait_for_completion ( & bfad - > comp ) ;
if ( ( bfad - > bfad_flags & BFAD_HAL_INIT_DONE ) ) {
bfa_sm_send_event ( bfad , BFAD_E_INIT_SUCCESS ) ;
} else {
2011-04-15 06:50:35 +07:00
printk ( KERN_WARNING
" bfa %s: bfa init failed \n " ,
bfad - > pci_name ) ;
2013-11-21 16:37:33 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_fcs_init ( & bfad - > bfa_fcs ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
ret = bfad_cfg_pport ( bfad , BFA_LPORT_ROLE_FCP_IM ) ;
if ( ret ! = BFA_STATUS_OK ) {
init_completion ( & bfad - > comp ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfad - > pport . flags | = BFAD_PORT_DELETE ;
bfa_fcs_exit ( & bfad - > bfa_fcs ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
wait_for_completion ( & bfad - > comp ) ;
bfa_sm_send_event ( bfad , BFAD_E_INIT_FAILED ) ;
break ;
}
2010-09-16 01:50:55 +07:00
bfad - > bfad_flags | = BFAD_HAL_INIT_FAIL ;
2013-11-21 16:37:33 +07:00
bfa_sm_send_event ( bfad , BFAD_E_HAL_INIT_FAILED ) ;
2010-09-16 01:50:55 +07:00
}
break ;
case BFAD_E_KTHREAD_CREATE_FAILED :
bfa_sm_set_state ( bfad , bfad_sm_uninit ) ;
break ;
default :
bfa_sm_fault ( bfad , event ) ;
}
2009-09-24 07:46:15 +07:00
}
static void
2010-09-16 01:50:55 +07:00
bfad_sm_initializing ( struct bfad_s * bfad , enum bfad_sm_event event )
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
int retval ;
unsigned long flags ;
bfa_trc ( bfad , event ) ;
switch ( event ) {
case BFAD_E_INIT_SUCCESS :
kthread_stop ( bfad - > bfad_tsk ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfad - > bfad_tsk = NULL ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
retval = bfad_start_ops ( bfad ) ;
2013-11-21 16:37:33 +07:00
if ( retval ! = BFA_STATUS_OK ) {
bfa_sm_set_state ( bfad , bfad_sm_failed ) ;
2010-09-16 01:50:55 +07:00
break ;
2013-11-21 16:37:33 +07:00
}
2010-09-16 01:50:55 +07:00
bfa_sm_set_state ( bfad , bfad_sm_operational ) ;
break ;
2013-11-21 16:37:33 +07:00
case BFAD_E_INIT_FAILED :
2010-09-16 01:50:55 +07:00
bfa_sm_set_state ( bfad , bfad_sm_uninit ) ;
kthread_stop ( bfad - > bfad_tsk ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfad - > bfad_tsk = NULL ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
break ;
2013-11-21 16:37:33 +07:00
case BFAD_E_HAL_INIT_FAILED :
2010-09-16 01:50:55 +07:00
bfa_sm_set_state ( bfad , bfad_sm_failed ) ;
break ;
default :
bfa_sm_fault ( bfad , event ) ;
}
2009-09-24 07:46:15 +07:00
}
static void
2010-09-16 01:50:55 +07:00
bfad_sm_failed ( struct bfad_s * bfad , enum bfad_sm_event event )
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
int retval ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
bfa_trc ( bfad , event ) ;
switch ( event ) {
case BFAD_E_INIT_SUCCESS :
retval = bfad_start_ops ( bfad ) ;
if ( retval ! = BFA_STATUS_OK )
break ;
bfa_sm_set_state ( bfad , bfad_sm_operational ) ;
break ;
case BFAD_E_STOP :
2013-11-21 16:37:33 +07:00
bfa_sm_set_state ( bfad , bfad_sm_fcs_exit ) ;
bfa_sm_send_event ( bfad , BFAD_E_FCS_EXIT_COMP ) ;
2010-09-16 01:50:55 +07:00
break ;
case BFAD_E_EXIT_COMP :
bfa_sm_set_state ( bfad , bfad_sm_uninit ) ;
bfad_remove_intr ( bfad ) ;
del_timer_sync ( & bfad - > hal_tmo ) ;
break ;
default :
bfa_sm_fault ( bfad , event ) ;
}
2009-09-24 07:46:15 +07:00
}
2010-09-16 01:50:55 +07:00
static void
bfad_sm_operational ( struct bfad_s * bfad , enum bfad_sm_event event )
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
bfa_trc ( bfad , event ) ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
switch ( event ) {
case BFAD_E_STOP :
bfa_sm_set_state ( bfad , bfad_sm_fcs_exit ) ;
bfad_fcs_stop ( bfad ) ;
break ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
default :
bfa_sm_fault ( bfad , event ) ;
}
}
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
static void
bfad_sm_fcs_exit ( struct bfad_s * bfad , enum bfad_sm_event event )
{
bfa_trc ( bfad , event ) ;
switch ( event ) {
case BFAD_E_FCS_EXIT_COMP :
bfa_sm_set_state ( bfad , bfad_sm_stopping ) ;
bfad_stop ( bfad ) ;
break ;
default :
bfa_sm_fault ( bfad , event ) ;
}
2009-09-24 07:46:15 +07:00
}
static void
2010-09-16 01:50:55 +07:00
bfad_sm_stopping ( struct bfad_s * bfad , enum bfad_sm_event event )
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
bfa_trc ( bfad , event ) ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
switch ( event ) {
case BFAD_E_EXIT_COMP :
bfa_sm_set_state ( bfad , bfad_sm_uninit ) ;
bfad_remove_intr ( bfad ) ;
del_timer_sync ( & bfad - > hal_tmo ) ;
bfad_im_probe_undo ( bfad ) ;
bfad - > bfad_flags & = ~ BFAD_FC4_PROBE_DONE ;
bfad_uncfg_pport ( bfad ) ;
break ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
default :
bfa_sm_fault ( bfad , event ) ;
break ;
}
2009-09-24 07:46:15 +07:00
}
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* BFA callbacks
*/
void
bfad_hcb_comp ( void * arg , bfa_status_t status )
{
struct bfad_hal_comp * fcomp = ( struct bfad_hal_comp * ) arg ;
fcomp - > status = status ;
complete ( & fcomp - > comp ) ;
}
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* bfa_init callback
*/
void
bfa_cb_init ( void * drv , bfa_status_t init_status )
{
2010-09-16 01:50:55 +07:00
struct bfad_s * bfad = drv ;
2009-09-24 07:46:15 +07:00
2010-03-04 08:44:02 +07:00
if ( init_status = = BFA_STATUS_OK ) {
2009-09-24 07:46:15 +07:00
bfad - > bfad_flags | = BFAD_HAL_INIT_DONE ;
2010-09-16 01:50:55 +07:00
/*
* If BFAD_HAL_INIT_FAIL flag is set :
2010-03-04 08:44:02 +07:00
* Wake up the kernel thread to start
* the bfad operations after HAL init done
*/
if ( ( bfad - > bfad_flags & BFAD_HAL_INIT_FAIL ) ) {
bfad - > bfad_flags & = ~ BFAD_HAL_INIT_FAIL ;
wake_up_process ( bfad - > bfad_tsk ) ;
}
}
2009-09-24 07:46:15 +07:00
complete ( & bfad - > comp ) ;
}
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* BFA_FCS callbacks
*/
struct bfad_port_s *
2010-09-16 01:50:55 +07:00
bfa_fcb_lport_new ( struct bfad_s * bfad , struct bfa_fcs_lport_s * port ,
enum bfa_lport_role roles , struct bfad_vf_s * vf_drv ,
2009-09-24 07:46:15 +07:00
struct bfad_vport_s * vp_drv )
{
2010-09-16 01:50:55 +07:00
bfa_status_t rc ;
struct bfad_port_s * port_drv ;
2009-09-24 07:46:15 +07:00
if ( ! vp_drv & & ! vf_drv ) {
port_drv = & bfad - > pport ;
port_drv - > pvb_type = BFAD_PORT_PHYS_BASE ;
} else if ( ! vp_drv & & vf_drv ) {
port_drv = & vf_drv - > base_port ;
port_drv - > pvb_type = BFAD_PORT_VF_BASE ;
} else if ( vp_drv & & ! vf_drv ) {
port_drv = & vp_drv - > drv_port ;
port_drv - > pvb_type = BFAD_PORT_PHYS_VPORT ;
} else {
port_drv = & vp_drv - > drv_port ;
port_drv - > pvb_type = BFAD_PORT_VF_VPORT ;
}
port_drv - > fcs_port = port ;
port_drv - > roles = roles ;
2010-09-16 01:50:55 +07:00
if ( roles & BFA_LPORT_ROLE_FCP_IM ) {
rc = bfad_im_port_new ( bfad , port_drv ) ;
if ( rc ! = BFA_STATUS_OK ) {
bfad_im_port_delete ( bfad , port_drv ) ;
port_drv = NULL ;
}
2009-09-24 07:46:15 +07:00
}
return port_drv ;
}
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* FCS RPORT alloc callback , after successful PLOGI by FCS
*/
bfa_status_t
bfa_fcb_rport_alloc ( struct bfad_s * bfad , struct bfa_fcs_rport_s * * rport ,
struct bfad_rport_s * * rport_drv )
{
2010-09-16 01:50:55 +07:00
bfa_status_t rc = BFA_STATUS_OK ;
2009-09-24 07:46:15 +07:00
* rport_drv = kzalloc ( sizeof ( struct bfad_rport_s ) , GFP_ATOMIC ) ;
if ( * rport_drv = = NULL ) {
rc = BFA_STATUS_ENOMEM ;
goto ext ;
}
* rport = & ( * rport_drv ) - > fcs_rport ;
ext :
return rc ;
}
2010-10-19 07:17:23 +07:00
/*
2010-07-09 09:46:26 +07:00
* FCS PBC VPORT Create
*/
void
bfa_fcb_pbc_vport_create ( struct bfad_s * bfad , struct bfi_pbc_vport_s pbc_vport )
{
2010-09-16 01:50:55 +07:00
struct bfa_lport_cfg_s port_cfg = { 0 } ;
struct bfad_vport_s * vport ;
int rc ;
2010-07-09 09:46:26 +07:00
2014-04-18 14:58:42 +07:00
vport = kzalloc ( sizeof ( struct bfad_vport_s ) , GFP_ATOMIC ) ;
2010-09-16 01:50:55 +07:00
if ( ! vport ) {
2010-07-09 09:46:26 +07:00
bfa_trc ( bfad , 0 ) ;
return ;
}
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
vport - > drv_port . bfad = bfad ;
port_cfg . roles = BFA_LPORT_ROLE_FCP_IM ;
port_cfg . pwwn = pbc_vport . vp_pwwn ;
port_cfg . nwwn = pbc_vport . vp_nwwn ;
port_cfg . preboot_vp = BFA_TRUE ;
rc = bfa_fcs_pbc_vport_create ( & vport - > fcs_vport , & bfad - > bfa_fcs , 0 ,
& port_cfg , vport ) ;
2010-07-09 09:46:26 +07:00
2010-09-16 01:50:55 +07:00
if ( rc ! = BFA_STATUS_OK ) {
bfa_trc ( bfad , 0 ) ;
return ;
}
2010-07-09 09:46:26 +07:00
2010-09-16 01:50:55 +07:00
list_add_tail ( & vport - > list_entry , & bfad - > pbc_vport_list ) ;
2010-07-09 09:46:26 +07:00
}
2009-09-24 07:46:15 +07:00
void
bfad_hal_mem_release ( struct bfad_s * bfad )
{
struct bfa_meminfo_s * hal_meminfo = & bfad - > meminfo ;
2011-06-25 10:24:29 +07:00
struct bfa_mem_dma_s * dma_info , * dma_elem ;
struct bfa_mem_kva_s * kva_info , * kva_elem ;
struct list_head * dm_qe , * km_qe ;
dma_info = & hal_meminfo - > dma_info ;
kva_info = & hal_meminfo - > kva_info ;
/* Iterate through the KVA meminfo queue */
list_for_each ( km_qe , & kva_info - > qe ) {
kva_elem = ( struct bfa_mem_kva_s * ) km_qe ;
vfree ( kva_elem - > kva ) ;
}
/* Iterate through the DMA meminfo queue */
list_for_each ( dm_qe , & dma_info - > qe ) {
dma_elem = ( struct bfa_mem_dma_s * ) dm_qe ;
dma_free_coherent ( & bfad - > pcidev - > dev ,
dma_elem - > mem_len , dma_elem - > kva ,
( dma_addr_t ) dma_elem - > dma ) ;
2009-09-24 07:46:15 +07:00
}
memset ( hal_meminfo , 0 , sizeof ( struct bfa_meminfo_s ) ) ;
}
void
bfad_update_hal_cfg ( struct bfa_iocfc_cfg_s * bfa_cfg )
{
if ( num_rports > 0 )
bfa_cfg - > fwcfg . num_rports = num_rports ;
if ( num_ios > 0 )
bfa_cfg - > fwcfg . num_ioim_reqs = num_ios ;
if ( num_tms > 0 )
bfa_cfg - > fwcfg . num_tskim_reqs = num_tms ;
2011-06-25 10:24:29 +07:00
if ( num_fcxps > 0 & & num_fcxps < = BFA_FCXP_MAX )
2009-09-24 07:46:15 +07:00
bfa_cfg - > fwcfg . num_fcxp_reqs = num_fcxps ;
2011-06-25 10:24:29 +07:00
if ( num_ufbufs > 0 & & num_ufbufs < = BFA_UF_MAX )
2009-09-24 07:46:15 +07:00
bfa_cfg - > fwcfg . num_uf_bufs = num_ufbufs ;
if ( reqq_size > 0 )
bfa_cfg - > drvcfg . num_reqq_elems = reqq_size ;
if ( rspq_size > 0 )
bfa_cfg - > drvcfg . num_rspq_elems = rspq_size ;
2011-06-25 10:24:29 +07:00
if ( num_sgpgs > 0 & & num_sgpgs < = BFA_SGPG_MAX )
2009-09-24 07:46:15 +07:00
bfa_cfg - > drvcfg . num_sgpgs = num_sgpgs ;
/*
* populate the hal values back to the driver for sysfs use .
* otherwise , the default values will be shown as 0 in sysfs
*/
num_rports = bfa_cfg - > fwcfg . num_rports ;
2010-09-16 01:50:55 +07:00
num_ios = bfa_cfg - > fwcfg . num_ioim_reqs ;
num_tms = bfa_cfg - > fwcfg . num_tskim_reqs ;
num_fcxps = bfa_cfg - > fwcfg . num_fcxp_reqs ;
2009-09-24 07:46:15 +07:00
num_ufbufs = bfa_cfg - > fwcfg . num_uf_bufs ;
2010-09-16 01:50:55 +07:00
reqq_size = bfa_cfg - > drvcfg . num_reqq_elems ;
rspq_size = bfa_cfg - > drvcfg . num_rspq_elems ;
num_sgpgs = bfa_cfg - > drvcfg . num_sgpgs ;
2009-09-24 07:46:15 +07:00
}
bfa_status_t
bfad_hal_mem_alloc ( struct bfad_s * bfad )
{
struct bfa_meminfo_s * hal_meminfo = & bfad - > meminfo ;
2011-06-25 10:24:29 +07:00
struct bfa_mem_dma_s * dma_info , * dma_elem ;
struct bfa_mem_kva_s * kva_info , * kva_elem ;
struct list_head * dm_qe , * km_qe ;
2010-09-16 01:50:55 +07:00
bfa_status_t rc = BFA_STATUS_OK ;
2011-06-25 10:24:29 +07:00
dma_addr_t phys_addr ;
2009-09-24 07:46:15 +07:00
bfa_cfg_get_default ( & bfad - > ioc_cfg ) ;
bfad_update_hal_cfg ( & bfad - > ioc_cfg ) ;
bfad - > cfg_data . ioc_queue_depth = bfad - > ioc_cfg . fwcfg . num_ioim_reqs ;
2011-06-25 10:24:29 +07:00
bfa_cfg_get_meminfo ( & bfad - > ioc_cfg , hal_meminfo , & bfad - > bfa ) ;
dma_info = & hal_meminfo - > dma_info ;
kva_info = & hal_meminfo - > kva_info ;
/* Iterate through the KVA meminfo queue */
list_for_each ( km_qe , & kva_info - > qe ) {
kva_elem = ( struct bfa_mem_kva_s * ) km_qe ;
2017-12-30 22:28:31 +07:00
kva_elem - > kva = vzalloc ( kva_elem - > mem_len ) ;
2011-06-25 10:24:29 +07:00
if ( kva_elem - > kva = = NULL ) {
bfad_hal_mem_release ( bfad ) ;
rc = BFA_STATUS_ENOMEM ;
goto ext ;
}
}
2009-09-24 07:46:15 +07:00
2011-06-25 10:24:29 +07:00
/* Iterate through the DMA meminfo queue */
list_for_each ( dm_qe , & dma_info - > qe ) {
dma_elem = ( struct bfa_mem_dma_s * ) dm_qe ;
dma_elem - > kva = dma_alloc_coherent ( & bfad - > pcidev - > dev ,
dma_elem - > mem_len ,
& phys_addr , GFP_KERNEL ) ;
if ( dma_elem - > kva = = NULL ) {
bfad_hal_mem_release ( bfad ) ;
rc = BFA_STATUS_ENOMEM ;
goto ext ;
2009-09-24 07:46:15 +07:00
}
2011-06-25 10:24:29 +07:00
dma_elem - > dma = phys_addr ;
memset ( dma_elem - > kva , 0 , dma_elem - > mem_len ) ;
2009-09-24 07:46:15 +07:00
}
ext :
return rc ;
}
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* Create a vport under a vf .
*/
bfa_status_t
bfad_vport_create ( struct bfad_s * bfad , u16 vf_id ,
2010-09-16 01:50:55 +07:00
struct bfa_lport_cfg_s * port_cfg , struct device * dev )
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
struct bfad_vport_s * vport ;
int rc = BFA_STATUS_OK ;
unsigned long flags ;
2009-09-24 07:46:15 +07:00
struct completion fcomp ;
vport = kzalloc ( sizeof ( struct bfad_vport_s ) , GFP_KERNEL ) ;
if ( ! vport ) {
rc = BFA_STATUS_ENOMEM ;
goto ext ;
}
vport - > drv_port . bfad = bfad ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
2010-09-16 01:50:55 +07:00
rc = bfa_fcs_vport_create ( & vport - > fcs_vport , & bfad - > bfa_fcs , vf_id ,
port_cfg , vport ) ;
2009-09-24 07:46:15 +07:00
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
if ( rc ! = BFA_STATUS_OK )
goto ext_free_vport ;
2010-09-16 01:50:55 +07:00
if ( port_cfg - > roles & BFA_LPORT_ROLE_FCP_IM ) {
2010-03-20 01:05:39 +07:00
rc = bfad_im_scsi_host_alloc ( bfad , vport - > drv_port . im_port ,
dev ) ;
2009-09-24 07:46:15 +07:00
if ( rc ! = BFA_STATUS_OK )
goto ext_free_fcs_vport ;
}
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_fcs_vport_start ( & vport - > fcs_vport ) ;
2011-12-21 09:58:32 +07:00
list_add_tail ( & vport - > list_entry , & bfad - > vport_list ) ;
2009-09-24 07:46:15 +07:00
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
return BFA_STATUS_OK ;
ext_free_fcs_vport :
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
vport - > comp_del = & fcomp ;
init_completion ( vport - > comp_del ) ;
bfa_fcs_vport_delete ( & vport - > fcs_vport ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
wait_for_completion ( vport - > comp_del ) ;
ext_free_vport :
kfree ( vport ) ;
ext :
return rc ;
}
void
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
bfad_bfa_tmo ( struct timer_list * t )
2009-09-24 07:46:15 +07:00
{
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
struct bfad_s * bfad = from_timer ( bfad , t , hal_tmo ) ;
2010-09-16 01:50:55 +07:00
unsigned long flags ;
struct list_head doneq ;
2009-09-24 07:46:15 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
2010-12-10 10:08:43 +07:00
bfa_timer_beat ( & bfad - > bfa . timer_mod ) ;
2009-09-24 07:46:15 +07:00
bfa_comp_deq ( & bfad - > bfa , & doneq ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
if ( ! list_empty ( & doneq ) ) {
bfa_comp_process ( & bfad - > bfa , & doneq ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_comp_free ( & bfad - > bfa , & doneq ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
}
2010-09-16 01:50:55 +07:00
mod_timer ( & bfad - > hal_tmo ,
jiffies + msecs_to_jiffies ( BFA_TIMER_FREQ ) ) ;
2009-09-24 07:46:15 +07:00
}
void
bfad_init_timer ( struct bfad_s * bfad )
{
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
timer_setup ( & bfad - > hal_tmo , bfad_bfa_tmo , 0 ) ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
mod_timer ( & bfad - > hal_tmo ,
jiffies + msecs_to_jiffies ( BFA_TIMER_FREQ ) ) ;
2009-09-24 07:46:15 +07:00
}
int
bfad_pci_init ( struct pci_dev * pdev , struct bfad_s * bfad )
{
2010-09-16 01:50:55 +07:00
int rc = - ENODEV ;
2009-09-24 07:46:15 +07:00
if ( pci_enable_device ( pdev ) ) {
2010-09-16 01:50:55 +07:00
printk ( KERN_ERR " pci_enable_device fail %p \n " , pdev ) ;
2009-09-24 07:46:15 +07:00
goto out ;
}
if ( pci_request_regions ( pdev , BFAD_DRIVER_NAME ) )
goto out_disable_device ;
pci_set_master ( pdev ) ;
2011-06-25 10:23:38 +07:00
if ( ( pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ! = 0 ) | |
( pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ! = 0 ) ) {
if ( ( pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ! = 0 ) | |
( pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ! = 0 ) ) {
2010-09-16 01:50:55 +07:00
printk ( KERN_ERR " pci_set_dma_mask fail %p \n " , pdev ) ;
2009-09-24 07:46:15 +07:00
goto out_release_region ;
}
2011-06-25 10:23:38 +07:00
}
2009-09-24 07:46:15 +07:00
2012-08-23 09:52:02 +07:00
/* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
pci_enable_pcie_error_reporting ( pdev ) ;
2010-03-20 01:06:44 +07:00
bfad - > pci_bar0_kva = pci_iomap ( pdev , 0 , pci_resource_len ( pdev , 0 ) ) ;
2011-06-14 05:50:35 +07:00
bfad - > pci_bar2_kva = pci_iomap ( pdev , 2 , pci_resource_len ( pdev , 2 ) ) ;
2009-09-24 07:46:15 +07:00
if ( bfad - > pci_bar0_kva = = NULL ) {
2010-09-16 01:50:55 +07:00
printk ( KERN_ERR " Fail to map bar0 \n " ) ;
2009-09-24 07:46:15 +07:00
goto out_release_region ;
}
bfad - > hal_pcidev . pci_slot = PCI_SLOT ( pdev - > devfn ) ;
bfad - > hal_pcidev . pci_func = PCI_FUNC ( pdev - > devfn ) ;
bfad - > hal_pcidev . pci_bar_kva = bfad - > pci_bar0_kva ;
bfad - > hal_pcidev . device_id = pdev - > device ;
2011-06-25 10:22:28 +07:00
bfad - > hal_pcidev . ssid = pdev - > subsystem_device ;
2009-09-24 07:46:15 +07:00
bfad - > pci_name = pci_name ( pdev ) ;
bfad - > pci_attr . vendor_id = pdev - > vendor ;
bfad - > pci_attr . device_id = pdev - > device ;
bfad - > pci_attr . ssid = pdev - > subsystem_device ;
bfad - > pci_attr . ssvid = pdev - > subsystem_vendor ;
bfad - > pci_attr . pcifn = PCI_FUNC ( pdev - > devfn ) ;
bfad - > pcidev = pdev ;
2010-09-16 01:50:55 +07:00
/* Adjust PCIe Maximum Read Request Size */
2013-09-05 14:55:25 +07:00
if ( pci_is_pcie ( pdev ) & & pcie_max_read_reqsz ) {
if ( pcie_max_read_reqsz > = 128 & &
pcie_max_read_reqsz < = 4096 & &
is_power_of_2 ( pcie_max_read_reqsz ) ) {
int max_rq = pcie_get_readrq ( pdev ) ;
printk ( KERN_WARNING " BFA[%s]: "
2010-09-16 01:50:55 +07:00
" pcie_max_read_request_size is %d, "
2013-09-05 14:55:25 +07:00
" reset to %d \n " , bfad - > pci_name , max_rq ,
2010-09-16 01:50:55 +07:00
pcie_max_read_reqsz ) ;
2013-09-05 14:55:25 +07:00
pcie_set_readrq ( pdev , pcie_max_read_reqsz ) ;
} else {
printk ( KERN_WARNING " BFA[%s]: invalid "
" pcie_max_read_request_size %d ignored \n " ,
bfad - > pci_name , pcie_max_read_reqsz ) ;
2010-09-16 01:50:55 +07:00
}
}
2012-08-23 09:52:02 +07:00
pci_save_state ( pdev ) ;
2009-09-24 07:46:15 +07:00
return 0 ;
out_release_region :
pci_release_regions ( pdev ) ;
out_disable_device :
pci_disable_device ( pdev ) ;
out :
return rc ;
}
void
bfad_pci_uninit ( struct pci_dev * pdev , struct bfad_s * bfad )
{
pci_iounmap ( pdev , bfad - > pci_bar0_kva ) ;
2011-06-14 05:50:35 +07:00
pci_iounmap ( pdev , bfad - > pci_bar2_kva ) ;
2009-09-24 07:46:15 +07:00
pci_release_regions ( pdev ) ;
2012-08-23 09:52:02 +07:00
/* Disable PCIE Advanced Error Recovery (AER) */
pci_disable_pcie_error_reporting ( pdev ) ;
2009-09-24 07:46:15 +07:00
pci_disable_device ( pdev ) ;
}
bfa_status_t
bfad_drv_init ( struct bfad_s * bfad )
{
2010-09-16 01:50:55 +07:00
bfa_status_t rc ;
unsigned long flags ;
2009-09-24 07:46:15 +07:00
bfad - > cfg_data . rport_del_timeout = rport_del_timeout ;
bfad - > cfg_data . lun_queue_depth = bfa_lun_queue_depth ;
bfad - > cfg_data . io_max_sge = bfa_io_max_sge ;
bfad - > cfg_data . binding_method = FCP_PWWN_BINDING ;
rc = bfad_hal_mem_alloc ( bfad ) ;
if ( rc ! = BFA_STATUS_OK ) {
printk ( KERN_WARNING " bfad%d bfad_hal_mem_alloc failure \n " ,
bfad - > inst_no ) ;
printk ( KERN_WARNING
2015-11-26 15:54:46 +07:00
" Not enough memory to attach all QLogic BR-series HBA ports. System may need more memory. \n " ) ;
2013-11-21 16:37:33 +07:00
return BFA_STATUS_FAILED ;
2009-09-24 07:46:15 +07:00
}
2010-12-10 10:08:43 +07:00
bfad - > bfa . trcmod = bfad - > trcmod ;
bfad - > bfa . plog = & bfad - > plog_buf ;
2009-09-24 07:46:15 +07:00
bfa_plog_init ( & bfad - > plog_buf ) ;
bfa_plog_str ( & bfad - > plog_buf , BFA_PL_MID_DRVR , BFA_PL_EID_DRIVER_START ,
0 , " Driver Attach " ) ;
bfa_attach ( & bfad - > bfa , bfad , & bfad - > ioc_cfg , & bfad - > meminfo ,
& bfad - > hal_pcidev ) ;
2010-09-16 01:50:55 +07:00
/* FCS INIT */
2009-09-24 07:46:15 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
2010-12-10 10:08:43 +07:00
bfad - > bfa_fcs . trcmod = bfad - > trcmod ;
2010-03-04 08:43:30 +07:00
bfa_fcs_attach ( & bfad - > bfa_fcs , & bfad - > bfa , bfad , BFA_FALSE ) ;
2010-12-10 10:08:43 +07:00
bfad - > bfa_fcs . fdmi_enabled = fdmi_enable ;
2009-09-24 07:46:15 +07:00
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
bfad - > bfad_flags | = BFAD_DRV_INIT_DONE ;
2010-09-16 01:50:55 +07:00
2009-09-24 07:46:15 +07:00
return BFA_STATUS_OK ;
}
void
bfad_drv_uninit ( struct bfad_s * bfad )
{
2010-03-04 08:44:02 +07:00
unsigned long flags ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
init_completion ( & bfad - > comp ) ;
2010-12-10 10:08:43 +07:00
bfa_iocfc_stop ( & bfad - > bfa ) ;
2010-03-04 08:44:02 +07:00
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
wait_for_completion ( & bfad - > comp ) ;
2009-09-24 07:46:15 +07:00
del_timer_sync ( & bfad - > hal_tmo ) ;
bfa_isr_disable ( & bfad - > bfa ) ;
bfa_detach ( & bfad - > bfa ) ;
bfad_remove_intr ( bfad ) ;
bfad_hal_mem_release ( bfad ) ;
2010-03-04 08:44:02 +07:00
bfad - > bfad_flags & = ~ BFAD_DRV_INIT_DONE ;
2009-09-24 07:46:15 +07:00
}
void
bfad_drv_start ( struct bfad_s * bfad )
{
2010-09-16 01:50:55 +07:00
unsigned long flags ;
2009-09-24 07:46:15 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
2010-12-10 10:08:43 +07:00
bfa_iocfc_start ( & bfad - > bfa ) ;
2011-06-14 05:54:31 +07:00
bfa_fcs_pbc_vport_init ( & bfad - > bfa_fcs ) ;
2010-12-10 10:08:43 +07:00
bfa_fcs_fabric_modstart ( & bfad - > bfa_fcs ) ;
2009-09-24 07:46:15 +07:00
bfad - > bfad_flags | = BFAD_HAL_START_DONE ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
2010-09-16 01:50:55 +07:00
if ( bfad - > im )
flush_workqueue ( bfad - > im - > drv_workq ) ;
2009-09-24 07:46:15 +07:00
}
void
2010-09-16 01:50:55 +07:00
bfad_fcs_stop ( struct bfad_s * bfad )
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
unsigned long flags ;
2009-09-24 07:46:15 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
init_completion ( & bfad - > comp ) ;
bfad - > pport . flags | = BFAD_PORT_DELETE ;
bfa_fcs_exit ( & bfad - > bfa_fcs ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
wait_for_completion ( & bfad - > comp ) ;
2010-09-16 01:50:55 +07:00
bfa_sm_send_event ( bfad , BFAD_E_FCS_EXIT_COMP ) ;
}
void
bfad_stop ( struct bfad_s * bfad )
{
unsigned long flags ;
2009-09-24 07:46:15 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
init_completion ( & bfad - > comp ) ;
2010-12-10 10:08:43 +07:00
bfa_iocfc_stop ( & bfad - > bfa ) ;
2009-09-24 07:46:15 +07:00
bfad - > bfad_flags & = ~ BFAD_HAL_START_DONE ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
wait_for_completion ( & bfad - > comp ) ;
2010-09-16 01:50:55 +07:00
bfa_sm_send_event ( bfad , BFAD_E_EXIT_COMP ) ;
2009-09-24 07:46:15 +07:00
}
bfa_status_t
2010-09-16 01:50:55 +07:00
bfad_cfg_pport ( struct bfad_s * bfad , enum bfa_lport_role role )
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
int rc = BFA_STATUS_OK ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
/* Allocate scsi_host for the physical port */
if ( ( supported_fc4s & BFA_LPORT_ROLE_FCP_IM ) & &
( role & BFA_LPORT_ROLE_FCP_IM ) ) {
2009-09-24 07:46:15 +07:00
if ( bfad - > pport . im_port = = NULL ) {
rc = BFA_STATUS_FAILED ;
goto out ;
}
2010-03-20 01:05:39 +07:00
rc = bfad_im_scsi_host_alloc ( bfad , bfad - > pport . im_port ,
& bfad - > pcidev - > dev ) ;
2009-09-24 07:46:15 +07:00
if ( rc ! = BFA_STATUS_OK )
goto out ;
2010-09-16 01:50:55 +07:00
bfad - > pport . roles | = BFA_LPORT_ROLE_FCP_IM ;
2009-09-24 07:46:15 +07:00
}
bfad - > bfad_flags | = BFAD_CFG_PPORT_DONE ;
out :
return rc ;
}
void
bfad_uncfg_pport ( struct bfad_s * bfad )
{
2010-09-16 01:50:55 +07:00
if ( ( supported_fc4s & BFA_LPORT_ROLE_FCP_IM ) & &
( bfad - > pport . roles & BFA_LPORT_ROLE_FCP_IM ) ) {
2009-09-24 07:46:15 +07:00
bfad_im_scsi_host_free ( bfad , bfad - > pport . im_port ) ;
bfad_im_port_clean ( bfad - > pport . im_port ) ;
kfree ( bfad - > pport . im_port ) ;
2010-09-16 01:50:55 +07:00
bfad - > pport . roles & = ~ BFA_LPORT_ROLE_FCP_IM ;
2009-09-24 07:46:15 +07:00
}
bfad - > bfad_flags & = ~ BFAD_CFG_PPORT_DONE ;
}
2010-03-04 08:44:02 +07:00
bfa_status_t
2010-09-16 01:50:55 +07:00
bfad_start_ops ( struct bfad_s * bfad ) {
int retval ;
unsigned long flags ;
struct bfad_vport_s * vport , * vport_new ;
struct bfa_fcs_driver_info_s driver_info ;
2011-06-25 10:29:07 +07:00
/* Limit min/max. xfer size to [64k-32MB] */
if ( max_xfer_size < BFAD_MIN_SECTORS > > 1 )
max_xfer_size = BFAD_MIN_SECTORS > > 1 ;
if ( max_xfer_size > BFAD_MAX_SECTORS > > 1 )
max_xfer_size = BFAD_MAX_SECTORS > > 1 ;
2010-09-16 01:50:55 +07:00
/* Fill the driver_info info to fcs*/
memset ( & driver_info , 0 , sizeof ( driver_info ) ) ;
2017-12-04 21:47:00 +07:00
strlcpy ( driver_info . version , BFAD_DRIVER_VERSION ,
sizeof ( driver_info . version ) ) ;
2010-09-16 01:50:55 +07:00
if ( host_name )
2017-12-04 21:47:00 +07:00
strlcpy ( driver_info . host_machine_name , host_name ,
sizeof ( driver_info . host_machine_name ) ) ;
2010-09-16 01:50:55 +07:00
if ( os_name )
2017-12-04 21:47:00 +07:00
strlcpy ( driver_info . host_os_name , os_name ,
sizeof ( driver_info . host_os_name ) ) ;
2010-09-16 01:50:55 +07:00
if ( os_patch )
2017-12-04 21:47:00 +07:00
strlcpy ( driver_info . host_os_patch , os_patch ,
sizeof ( driver_info . host_os_patch ) ) ;
2010-09-16 01:50:55 +07:00
2017-12-04 21:47:00 +07:00
strlcpy ( driver_info . os_device_name , bfad - > pci_name ,
sizeof ( driver_info . os_device_name ) ) ;
2010-09-16 01:50:55 +07:00
2011-06-14 05:54:31 +07:00
/* FCS driver info init */
2010-09-16 01:50:55 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_fcs_driver_info_init ( & bfad - > bfa_fcs , & driver_info ) ;
2013-11-21 16:37:33 +07:00
if ( bfad - > bfad_flags & BFAD_CFG_PPORT_DONE )
bfa_fcs_update_cfg ( & bfad - > bfa_fcs ) ;
else
bfa_fcs_init ( & bfad - > bfa_fcs ) ;
2010-09-16 01:50:55 +07:00
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
2010-03-04 08:44:02 +07:00
2013-11-21 16:37:33 +07:00
if ( ! ( bfad - > bfad_flags & BFAD_CFG_PPORT_DONE ) ) {
retval = bfad_cfg_pport ( bfad , BFA_LPORT_ROLE_FCP_IM ) ;
if ( retval ! = BFA_STATUS_OK )
return BFA_STATUS_FAILED ;
}
2011-06-14 05:54:31 +07:00
/* Setup fc host fixed attribute if the lk supports */
bfad_fc_host_init ( bfad - > pport . im_port ) ;
2010-03-04 08:44:02 +07:00
2010-09-16 01:50:55 +07:00
/* BFAD level FC4 IM specific resource allocation */
retval = bfad_im_probe ( bfad ) ;
if ( retval ! = BFA_STATUS_OK ) {
printk ( KERN_WARNING " bfad_im_probe failed \n " ) ;
if ( bfa_sm_cmp_state ( bfad , bfad_sm_initializing ) )
bfa_sm_set_state ( bfad , bfad_sm_failed ) ;
return BFA_STATUS_FAILED ;
} else
bfad - > bfad_flags | = BFAD_FC4_PROBE_DONE ;
2010-03-04 08:44:02 +07:00
bfad_drv_start ( bfad ) ;
2010-09-16 01:50:55 +07:00
/* Complete pbc vport create */
list_for_each_entry_safe ( vport , vport_new , & bfad - > pbc_vport_list ,
list_entry ) {
2010-07-09 09:46:26 +07:00
struct fc_vport_identifiers vid ;
struct fc_vport * fc_vport ;
2010-09-16 01:50:55 +07:00
char pwwn_buf [ BFA_STRING_32 ] ;
2010-07-09 09:46:26 +07:00
memset ( & vid , 0 , sizeof ( vid ) ) ;
vid . roles = FC_PORT_ROLE_FCP_INITIATOR ;
vid . vport_type = FC_PORTTYPE_NPIV ;
vid . disable = false ;
2010-09-16 01:50:55 +07:00
vid . node_name = wwn_to_u64 ( ( u8 * )
( & ( ( vport - > fcs_vport ) . lport . port_cfg . nwwn ) ) ) ;
vid . port_name = wwn_to_u64 ( ( u8 * )
( & ( ( vport - > fcs_vport ) . lport . port_cfg . pwwn ) ) ) ;
2010-07-09 09:46:26 +07:00
fc_vport = fc_vport_create ( bfad - > pport . im_port - > shost , 0 , & vid ) ;
2010-09-16 01:50:55 +07:00
if ( ! fc_vport ) {
wwn2str ( pwwn_buf , vid . port_name ) ;
2010-07-09 09:46:26 +07:00
printk ( KERN_WARNING " bfad%d: failed to create pbc vport "
2010-09-16 01:50:55 +07:00
" %s \n " , bfad - > inst_no , pwwn_buf ) ;
}
list_del ( & vport - > list_entry ) ;
kfree ( vport ) ;
2010-07-09 09:46:26 +07:00
}
2010-03-04 08:44:02 +07:00
/*
* If bfa_linkup_delay is set to - 1 default ; try to retrive the
2010-12-10 10:12:32 +07:00
* value using the bfad_get_linkup_delay ( ) ; else use the
2010-03-04 08:44:02 +07:00
* passed in module param value as the bfa_linkup_delay .
*/
if ( bfa_linkup_delay < 0 ) {
2010-12-10 10:12:32 +07:00
bfa_linkup_delay = bfad_get_linkup_delay ( bfad ) ;
bfad_rport_online_wait ( bfad ) ;
2010-03-04 08:44:02 +07:00
bfa_linkup_delay = - 1 ;
2010-09-16 01:50:55 +07:00
} else
2010-12-10 10:12:32 +07:00
bfad_rport_online_wait ( bfad ) ;
2010-03-04 08:44:02 +07:00
2010-12-10 08:11:53 +07:00
BFA_LOG ( KERN_INFO , bfad , bfa_log_level , " bfa device claimed \n " ) ;
2010-03-04 08:44:02 +07:00
return BFA_STATUS_OK ;
}
int
2010-07-09 09:46:26 +07:00
bfad_worker ( void * ptr )
2010-03-04 08:44:02 +07:00
{
2015-04-17 02:48:29 +07:00
struct bfad_s * bfad = ptr ;
unsigned long flags ;
2010-03-04 08:44:02 +07:00
2015-04-17 02:48:29 +07:00
if ( kthread_should_stop ( ) )
return 0 ;
2010-03-04 08:44:02 +07:00
2015-04-17 02:48:29 +07:00
/* Send event BFAD_E_INIT_SUCCESS */
bfa_sm_send_event ( bfad , BFAD_E_INIT_SUCCESS ) ;
2010-03-04 08:44:02 +07:00
2015-04-17 02:48:29 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfad - > bfad_tsk = NULL ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
2010-03-04 08:44:02 +07:00
return 0 ;
}
2010-10-19 07:17:23 +07:00
/*
2010-09-16 01:50:55 +07:00
* BFA driver interrupt functions
*/
irqreturn_t
bfad_intx ( int irq , void * dev_id )
{
struct bfad_s * bfad = dev_id ;
struct list_head doneq ;
unsigned long flags ;
bfa_boolean_t rc ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
rc = bfa_intx ( & bfad - > bfa ) ;
if ( ! rc ) {
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
return IRQ_NONE ;
}
bfa_comp_deq ( & bfad - > bfa , & doneq ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
if ( ! list_empty ( & doneq ) ) {
bfa_comp_process ( & bfad - > bfa , & doneq ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_comp_free ( & bfad - > bfa , & doneq ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
}
return IRQ_HANDLED ;
}
static irqreturn_t
bfad_msix ( int irq , void * dev_id )
{
struct bfad_msix_s * vec = dev_id ;
struct bfad_s * bfad = vec - > bfad ;
struct list_head doneq ;
unsigned long flags ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_msix ( & bfad - > bfa , vec - > msix . entry ) ;
bfa_comp_deq ( & bfad - > bfa , & doneq ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
if ( ! list_empty ( & doneq ) ) {
bfa_comp_process ( & bfad - > bfa , & doneq ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_comp_free ( & bfad - > bfa , & doneq ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
}
return IRQ_HANDLED ;
}
2010-10-19 07:17:23 +07:00
/*
2010-09-16 01:50:55 +07:00
* Initialize the MSIX entry table .
*/
static void
bfad_init_msix_entry ( struct bfad_s * bfad , struct msix_entry * msix_entries ,
int mask , int max_bit )
{
int i ;
int match = 0x00000001 ;
for ( i = 0 , bfad - > nvec = 0 ; i < MAX_MSIX_ENTRY ; i + + ) {
if ( mask & match ) {
bfad - > msix_tab [ bfad - > nvec ] . msix . entry = i ;
bfad - > msix_tab [ bfad - > nvec ] . bfad = bfad ;
msix_entries [ bfad - > nvec ] . entry = i ;
bfad - > nvec + + ;
}
match < < = 1 ;
}
}
int
bfad_install_msix_handler ( struct bfad_s * bfad )
{
int i , error = 0 ;
for ( i = 0 ; i < bfad - > nvec ; i + + ) {
sprintf ( bfad - > msix_tab [ i ] . name , " bfa-%s-%s " ,
bfad - > pci_name ,
2011-06-14 05:50:35 +07:00
( ( bfa_asic_id_cb ( bfad - > hal_pcidev . device_id ) ) ?
msix_name_cb [ i ] : msix_name_ct [ i ] ) ) ;
2010-09-16 01:50:55 +07:00
error = request_irq ( bfad - > msix_tab [ i ] . msix . vector ,
( irq_handler_t ) bfad_msix , 0 ,
bfad - > msix_tab [ i ] . name , & bfad - > msix_tab [ i ] ) ;
bfa_trc ( bfad , i ) ;
bfa_trc ( bfad , bfad - > msix_tab [ i ] . msix . vector ) ;
if ( error ) {
int j ;
for ( j = 0 ; j < i ; j + + )
free_irq ( bfad - > msix_tab [ j ] . msix . vector ,
& bfad - > msix_tab [ j ] ) ;
2011-06-25 10:29:07 +07:00
bfad - > bfad_flags & = ~ BFAD_MSIX_ON ;
pci_disable_msix ( bfad - > pcidev ) ;
2010-09-16 01:50:55 +07:00
return 1 ;
}
}
return 0 ;
}
2010-10-19 07:17:23 +07:00
/*
2010-09-16 01:50:55 +07:00
* Setup MSIX based interrupt .
*/
int
bfad_setup_intr ( struct bfad_s * bfad )
{
2014-07-17 01:05:07 +07:00
int error ;
2010-09-16 01:50:55 +07:00
u32 mask = 0 , i , num_bit = 0 , max_bit = 0 ;
struct msix_entry msix_entries [ MAX_MSIX_ENTRY ] ;
struct pci_dev * pdev = bfad - > pcidev ;
2011-06-25 10:23:38 +07:00
u16 reg ;
2010-09-16 01:50:55 +07:00
/* Call BFA to get the msix map for this PCI function. */
bfa_msix_getvecs ( & bfad - > bfa , & mask , & num_bit , & max_bit ) ;
/* Set up the msix entry table */
bfad_init_msix_entry ( bfad , msix_entries , mask , max_bit ) ;
2011-06-14 05:50:35 +07:00
if ( ( bfa_asic_id_ctc ( pdev - > device ) & & ! msix_disable_ct ) | |
( bfa_asic_id_cb ( pdev - > device ) & & ! msix_disable_cb ) ) {
2010-09-16 01:50:55 +07:00
2014-07-17 01:05:08 +07:00
error = pci_enable_msix_exact ( bfad - > pcidev ,
msix_entries , bfad - > nvec ) ;
2014-07-17 01:05:06 +07:00
/* In CT1 & CT2, try to allocate just one vector */
2014-07-17 01:05:08 +07:00
if ( error = = - ENOSPC & & bfa_asic_id_ctc ( pdev - > device ) ) {
2014-07-17 01:05:06 +07:00
printk ( KERN_WARNING " bfa %s: trying one msix "
" vector failed to allocate %d[%d] \n " ,
bfad - > pci_name , bfad - > nvec , error ) ;
bfad - > nvec = 1 ;
2014-07-17 01:05:08 +07:00
error = pci_enable_msix_exact ( bfad - > pcidev ,
msix_entries , 1 ) ;
2014-07-17 01:05:06 +07:00
}
2012-08-23 09:52:02 +07:00
2014-07-17 01:05:06 +07:00
if ( error ) {
printk ( KERN_WARNING " bfad%d: "
2014-07-17 01:05:08 +07:00
" pci_enable_msix_exact failed (%d), "
2014-07-17 01:05:06 +07:00
" use line based. \n " ,
bfad - > inst_no , error ) ;
goto line_based ;
2010-09-16 01:50:55 +07:00
}
2011-06-25 10:23:38 +07:00
/* Disable INTX in MSI-X mode */
pci_read_config_word ( pdev , PCI_COMMAND , & reg ) ;
if ( ! ( reg & PCI_COMMAND_INTX_DISABLE ) )
pci_write_config_word ( pdev , PCI_COMMAND ,
reg | PCI_COMMAND_INTX_DISABLE ) ;
2010-09-16 01:50:55 +07:00
/* Save the vectors */
for ( i = 0 ; i < bfad - > nvec ; i + + ) {
bfa_trc ( bfad , msix_entries [ i ] . vector ) ;
bfad - > msix_tab [ i ] . msix . vector = msix_entries [ i ] . vector ;
}
bfa_msix_init ( & bfad - > bfa , bfad - > nvec ) ;
bfad - > bfad_flags | = BFAD_MSIX_ON ;
2014-07-17 01:05:07 +07:00
return 0 ;
2010-09-16 01:50:55 +07:00
}
line_based :
2014-07-17 01:05:07 +07:00
error = request_irq ( bfad - > pcidev - > irq , ( irq_handler_t ) bfad_intx ,
BFAD_IRQ_FLAGS , BFAD_DRIVER_NAME , bfad ) ;
if ( error )
return error ;
2011-06-25 10:29:07 +07:00
bfad - > bfad_flags | = BFAD_INTX_ON ;
2010-09-16 01:50:55 +07:00
2014-07-17 01:05:07 +07:00
return 0 ;
2010-09-16 01:50:55 +07:00
}
void
bfad_remove_intr ( struct bfad_s * bfad )
{
int i ;
if ( bfad - > bfad_flags & BFAD_MSIX_ON ) {
for ( i = 0 ; i < bfad - > nvec ; i + + )
free_irq ( bfad - > msix_tab [ i ] . msix . vector ,
& bfad - > msix_tab [ i ] ) ;
pci_disable_msix ( bfad - > pcidev ) ;
bfad - > bfad_flags & = ~ BFAD_MSIX_ON ;
2011-06-25 10:29:07 +07:00
} else if ( bfad - > bfad_flags & BFAD_INTX_ON ) {
2010-09-16 01:50:55 +07:00
free_irq ( bfad - > pcidev - > irq , bfad ) ;
}
}
2009-09-24 07:46:15 +07:00
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* PCI probe entry .
*/
int
bfad_pci_probe ( struct pci_dev * pdev , const struct pci_device_id * pid )
{
2010-09-16 01:50:55 +07:00
struct bfad_s * bfad ;
2011-07-21 06:59:13 +07:00
int error = - ENODEV , retval , i ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
/* For single port cards - only claim function 0 */
2011-06-14 05:52:40 +07:00
if ( ( pdev - > device = = BFA_PCI_DEVICE_ID_FC_8G1P ) & &
2010-09-16 01:50:55 +07:00
( PCI_FUNC ( pdev - > devfn ) ! = 0 ) )
2009-09-24 07:46:15 +07:00
return - ENODEV ;
bfad = kzalloc ( sizeof ( struct bfad_s ) , GFP_KERNEL ) ;
if ( ! bfad ) {
error = - ENOMEM ;
goto out ;
}
bfad - > trcmod = kzalloc ( sizeof ( struct bfa_trc_mod_s ) , GFP_KERNEL ) ;
if ( ! bfad - > trcmod ) {
printk ( KERN_WARNING " Error alloc trace buffer! \n " ) ;
error = - ENOMEM ;
goto out_alloc_trace_failure ;
}
2010-09-16 01:50:55 +07:00
/* TRACE INIT */
2009-09-24 07:46:15 +07:00
bfa_trc_init ( bfad - > trcmod ) ;
bfa_trc ( bfad , bfad_inst ) ;
2011-07-21 06:59:13 +07:00
/* AEN INIT */
INIT_LIST_HEAD ( & bfad - > free_aen_q ) ;
INIT_LIST_HEAD ( & bfad - > active_aen_q ) ;
for ( i = 0 ; i < BFA_AEN_MAX_ENTRY ; i + + )
list_add_tail ( & bfad - > aen_list [ i ] . qe , & bfad - > free_aen_q ) ;
2009-09-24 07:46:15 +07:00
if ( ! ( bfad_load_fwimg ( pdev ) ) ) {
kfree ( bfad - > trcmod ) ;
goto out_alloc_trace_failure ;
}
retval = bfad_pci_init ( pdev , bfad ) ;
if ( retval ) {
printk ( KERN_WARNING " bfad_pci_init failure! \n " ) ;
error = retval ;
goto out_pci_init_failure ;
}
mutex_lock ( & bfad_mutex ) ;
bfad - > inst_no = bfad_inst + + ;
list_add_tail ( & bfad - > list_entry , & bfad_list ) ;
mutex_unlock ( & bfad_mutex ) ;
2010-09-16 01:50:55 +07:00
/* Initializing the state machine: State set to uninit */
bfa_sm_set_state ( bfad , bfad_sm_uninit ) ;
2009-09-24 07:46:15 +07:00
spin_lock_init ( & bfad - > bfad_lock ) ;
2012-07-11 23:42:55 +07:00
spin_lock_init ( & bfad - > bfad_aen_spinlock ) ;
2009-09-24 07:46:15 +07:00
pci_set_drvdata ( pdev , bfad ) ;
bfad - > ref_count = 0 ;
bfad - > pport . bfad = bfad ;
2010-09-16 01:50:55 +07:00
INIT_LIST_HEAD ( & bfad - > pbc_vport_list ) ;
2011-12-21 09:58:32 +07:00
INIT_LIST_HEAD ( & bfad - > vport_list ) ;
2010-03-04 08:44:02 +07:00
2011-04-15 06:50:35 +07:00
/* Setup the debugfs node for this bfad */
if ( bfa_debugfs_enable )
bfad_debugfs_init ( & bfad - > pport ) ;
2009-09-24 07:46:15 +07:00
retval = bfad_drv_init ( bfad ) ;
if ( retval ! = BFA_STATUS_OK )
goto out_drv_init_failure ;
2010-09-16 01:50:55 +07:00
bfa_sm_send_event ( bfad , BFAD_E_CREATE ) ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
if ( bfa_sm_cmp_state ( bfad , bfad_sm_uninit ) )
goto out_bfad_sm_failure ;
2009-09-24 07:46:15 +07:00
return 0 ;
2010-09-16 01:50:55 +07:00
out_bfad_sm_failure :
bfad_hal_mem_release ( bfad ) ;
2009-09-24 07:46:15 +07:00
out_drv_init_failure :
2011-04-15 06:50:35 +07:00
/* Remove the debugfs node for this bfad */
kfree ( bfad - > regdata ) ;
bfad_debugfs_exit ( & bfad - > pport ) ;
2009-09-24 07:46:15 +07:00
mutex_lock ( & bfad_mutex ) ;
bfad_inst - - ;
list_del ( & bfad - > list_entry ) ;
mutex_unlock ( & bfad_mutex ) ;
bfad_pci_uninit ( pdev , bfad ) ;
out_pci_init_failure :
kfree ( bfad - > trcmod ) ;
out_alloc_trace_failure :
kfree ( bfad ) ;
out :
return error ;
}
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* PCI remove entry .
*/
void
bfad_pci_remove ( struct pci_dev * pdev )
{
2010-09-16 01:50:55 +07:00
struct bfad_s * bfad = pci_get_drvdata ( pdev ) ;
unsigned long flags ;
2009-09-24 07:46:15 +07:00
bfa_trc ( bfad , bfad - > inst_no ) ;
2010-03-04 08:44:02 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
2010-09-16 01:50:55 +07:00
if ( bfad - > bfad_tsk ! = NULL ) {
2009-09-24 07:46:15 +07:00
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
2010-09-16 01:50:55 +07:00
kthread_stop ( bfad - > bfad_tsk ) ;
} else {
2010-03-04 08:44:02 +07:00
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
}
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
/* Send Event BFAD_E_STOP */
bfa_sm_send_event ( bfad , BFAD_E_STOP ) ;
2010-03-04 08:44:02 +07:00
2010-09-16 01:50:55 +07:00
/* Driver detach and dealloc mem */
2009-09-24 07:46:15 +07:00
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_detach ( & bfad - > bfa ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
bfad_hal_mem_release ( bfad ) ;
2011-04-15 06:50:35 +07:00
/* Remove the debugfs node for this bfad */
kfree ( bfad - > regdata ) ;
bfad_debugfs_exit ( & bfad - > pport ) ;
2010-09-16 01:50:55 +07:00
/* Cleaning the BFAD instance */
2009-09-24 07:46:15 +07:00
mutex_lock ( & bfad_mutex ) ;
bfad_inst - - ;
list_del ( & bfad - > list_entry ) ;
mutex_unlock ( & bfad_mutex ) ;
bfad_pci_uninit ( pdev , bfad ) ;
kfree ( bfad - > trcmod ) ;
kfree ( bfad ) ;
}
2012-08-23 09:52:02 +07:00
/*
* PCI Error Recovery entry , error detected .
*/
static pci_ers_result_t
bfad_pci_error_detected ( struct pci_dev * pdev , pci_channel_state_t state )
{
struct bfad_s * bfad = pci_get_drvdata ( pdev ) ;
unsigned long flags ;
pci_ers_result_t ret = PCI_ERS_RESULT_NONE ;
dev_printk ( KERN_ERR , & pdev - > dev ,
" error detected state: %d - flags: 0x%x \n " ,
state , bfad - > bfad_flags ) ;
switch ( state ) {
case pci_channel_io_normal : /* non-fatal error */
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfad - > bfad_flags & = ~ BFAD_EEH_BUSY ;
/* Suspend/fail all bfa operations */
bfa_ioc_suspend ( & bfad - > bfa . ioc ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
del_timer_sync ( & bfad - > hal_tmo ) ;
ret = PCI_ERS_RESULT_CAN_RECOVER ;
break ;
case pci_channel_io_frozen : /* fatal error */
init_completion ( & bfad - > comp ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfad - > bfad_flags | = BFAD_EEH_BUSY ;
/* Suspend/fail all bfa operations */
bfa_ioc_suspend ( & bfad - > bfa . ioc ) ;
bfa_fcs_stop ( & bfad - > bfa_fcs ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
wait_for_completion ( & bfad - > comp ) ;
bfad_remove_intr ( bfad ) ;
del_timer_sync ( & bfad - > hal_tmo ) ;
pci_disable_device ( pdev ) ;
ret = PCI_ERS_RESULT_NEED_RESET ;
break ;
case pci_channel_io_perm_failure : /* PCI Card is DEAD */
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfad - > bfad_flags | = BFAD_EEH_BUSY |
BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
/* If the error_detected handler is called with the reason
* pci_channel_io_perm_failure - it will subsequently call
* pci_remove ( ) entry point to remove the pci device from the
* system - So defer the cleanup to pci_remove ( ) ; cleaning up
* here causes inconsistent state during pci_remove ( ) .
*/
ret = PCI_ERS_RESULT_DISCONNECT ;
break ;
default :
WARN_ON ( 1 ) ;
}
return ret ;
}
int
restart_bfa ( struct bfad_s * bfad )
{
unsigned long flags ;
struct pci_dev * pdev = bfad - > pcidev ;
bfa_attach ( & bfad - > bfa , bfad , & bfad - > ioc_cfg ,
& bfad - > meminfo , & bfad - > hal_pcidev ) ;
/* Enable Interrupt and wait bfa_init completion */
if ( bfad_setup_intr ( bfad ) ) {
dev_printk ( KERN_WARNING , & pdev - > dev ,
" %s: bfad_setup_intr failed \n " , bfad - > pci_name ) ;
2013-11-21 16:37:33 +07:00
bfa_sm_send_event ( bfad , BFAD_E_INIT_FAILED ) ;
2012-08-23 09:52:02 +07:00
return - 1 ;
}
init_completion ( & bfad - > comp ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfa_iocfc_init ( & bfad - > bfa ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
/* Set up interrupt handler for each vectors */
if ( ( bfad - > bfad_flags & BFAD_MSIX_ON ) & &
bfad_install_msix_handler ( bfad ) )
dev_printk ( KERN_WARNING , & pdev - > dev ,
" %s: install_msix failed. \n " , bfad - > pci_name ) ;
bfad_init_timer ( bfad ) ;
wait_for_completion ( & bfad - > comp ) ;
bfad_drv_start ( bfad ) ;
return 0 ;
}
/*
* PCI Error Recovery entry , re - initialize the chip .
*/
static pci_ers_result_t
bfad_pci_slot_reset ( struct pci_dev * pdev )
{
struct bfad_s * bfad = pci_get_drvdata ( pdev ) ;
u8 byte ;
dev_printk ( KERN_ERR , & pdev - > dev ,
" bfad_pci_slot_reset flags: 0x%x \n " , bfad - > bfad_flags ) ;
if ( pci_enable_device ( pdev ) ) {
dev_printk ( KERN_ERR , & pdev - > dev , " Cannot re-enable "
" PCI device after reset. \n " ) ;
return PCI_ERS_RESULT_DISCONNECT ;
}
pci_restore_state ( pdev ) ;
/*
* Read some byte ( e . g . DMA max . payload size which can ' t
* be 0xff any time ) to make sure - we did not hit another PCI error
* in the middle of recovery . If we did , then declare permanent failure .
*/
pci_read_config_byte ( pdev , 0x68 , & byte ) ;
if ( byte = = 0xff ) {
dev_printk ( KERN_ERR , & pdev - > dev ,
" slot_reset failed ... got another PCI error ! \n " ) ;
goto out_disable_device ;
}
pci_save_state ( pdev ) ;
pci_set_master ( pdev ) ;
if ( pci_set_dma_mask ( bfad - > pcidev , DMA_BIT_MASK ( 64 ) ) ! = 0 )
if ( pci_set_dma_mask ( bfad - > pcidev , DMA_BIT_MASK ( 32 ) ) ! = 0 )
goto out_disable_device ;
pci_cleanup_aer_uncorrect_error_status ( pdev ) ;
if ( restart_bfa ( bfad ) = = - 1 )
goto out_disable_device ;
pci_enable_pcie_error_reporting ( pdev ) ;
dev_printk ( KERN_WARNING , & pdev - > dev ,
" slot_reset completed flags: 0x%x! \n " , bfad - > bfad_flags ) ;
return PCI_ERS_RESULT_RECOVERED ;
out_disable_device :
pci_disable_device ( pdev ) ;
return PCI_ERS_RESULT_DISCONNECT ;
}
static pci_ers_result_t
bfad_pci_mmio_enabled ( struct pci_dev * pdev )
{
unsigned long flags ;
struct bfad_s * bfad = pci_get_drvdata ( pdev ) ;
dev_printk ( KERN_INFO , & pdev - > dev , " mmio_enabled \n " ) ;
/* Fetch FW diagnostic information */
bfa_ioc_debug_save_ftrc ( & bfad - > bfa . ioc ) ;
/* Cancel all pending IOs */
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
init_completion ( & bfad - > comp ) ;
bfa_fcs_stop ( & bfad - > bfa_fcs ) ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
wait_for_completion ( & bfad - > comp ) ;
bfad_remove_intr ( bfad ) ;
del_timer_sync ( & bfad - > hal_tmo ) ;
pci_disable_device ( pdev ) ;
return PCI_ERS_RESULT_NEED_RESET ;
}
static void
bfad_pci_resume ( struct pci_dev * pdev )
{
unsigned long flags ;
struct bfad_s * bfad = pci_get_drvdata ( pdev ) ;
dev_printk ( KERN_WARNING , & pdev - > dev , " resume \n " ) ;
/* wait until the link is online */
bfad_rport_online_wait ( bfad ) ;
spin_lock_irqsave ( & bfad - > bfad_lock , flags ) ;
bfad - > bfad_flags & = ~ BFAD_EEH_BUSY ;
spin_unlock_irqrestore ( & bfad - > bfad_lock , flags ) ;
}
2010-09-16 01:50:55 +07:00
struct pci_device_id bfad_id_table [ ] = {
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
. vendor = BFA_PCI_VENDOR_ID_BROCADE ,
. device = BFA_PCI_DEVICE_ID_FC_8G2P ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
} ,
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
. vendor = BFA_PCI_VENDOR_ID_BROCADE ,
. device = BFA_PCI_DEVICE_ID_FC_8G1P ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
} ,
2009-09-24 07:46:15 +07:00
{
2010-09-16 01:50:55 +07:00
. vendor = BFA_PCI_VENDOR_ID_BROCADE ,
. device = BFA_PCI_DEVICE_ID_CT ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
. class = ( PCI_CLASS_SERIAL_FIBER < < 8 ) ,
. class_mask = ~ 0 ,
} ,
2010-07-09 09:45:20 +07:00
{
2010-09-16 01:50:55 +07:00
. vendor = BFA_PCI_VENDOR_ID_BROCADE ,
. device = BFA_PCI_DEVICE_ID_CT_FC ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
. class = ( PCI_CLASS_SERIAL_FIBER < < 8 ) ,
. class_mask = ~ 0 ,
2010-07-09 09:45:20 +07:00
} ,
2011-06-14 05:50:35 +07:00
{
. vendor = BFA_PCI_VENDOR_ID_BROCADE ,
. device = BFA_PCI_DEVICE_ID_CT2 ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
. class = ( PCI_CLASS_SERIAL_FIBER < < 8 ) ,
. class_mask = ~ 0 ,
} ,
2009-09-24 07:46:15 +07:00
2013-05-13 16:33:33 +07:00
{
. vendor = BFA_PCI_VENDOR_ID_BROCADE ,
. device = BFA_PCI_DEVICE_ID_CT2_QUAD ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
. class = ( PCI_CLASS_SERIAL_FIBER < < 8 ) ,
. class_mask = ~ 0 ,
} ,
2009-09-24 07:46:15 +07:00
{ 0 , 0 } ,
} ;
MODULE_DEVICE_TABLE ( pci , bfad_id_table ) ;
2012-08-23 09:52:02 +07:00
/*
* PCI error recovery handlers .
*/
static struct pci_error_handlers bfad_err_handler = {
. error_detected = bfad_pci_error_detected ,
. slot_reset = bfad_pci_slot_reset ,
. mmio_enabled = bfad_pci_mmio_enabled ,
. resume = bfad_pci_resume ,
} ;
2009-09-24 07:46:15 +07:00
static struct pci_driver bfad_pci_driver = {
. name = BFAD_DRIVER_NAME ,
. id_table = bfad_id_table ,
. probe = bfad_pci_probe ,
2012-12-22 04:08:55 +07:00
. remove = bfad_pci_remove ,
2012-08-23 09:52:02 +07:00
. err_handler = & bfad_err_handler ,
2009-09-24 07:46:15 +07:00
} ;
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* Driver module init .
*/
2010-09-16 01:50:55 +07:00
static int __init
2009-09-24 07:46:15 +07:00
bfad_init ( void )
{
2010-09-16 01:50:55 +07:00
int error = 0 ;
2009-09-24 07:46:15 +07:00
2015-11-26 15:54:46 +07:00
pr_info ( " QLogic BR-series BFA FC/FCOE SCSI driver - version: %s \n " ,
2010-09-16 01:50:55 +07:00
BFAD_DRIVER_VERSION ) ;
2009-09-24 07:46:15 +07:00
if ( num_sgpgs > 0 )
num_sgpgs_parm = num_sgpgs ;
2010-09-16 01:50:55 +07:00
error = bfad_im_module_init ( ) ;
2009-09-24 07:46:15 +07:00
if ( error ) {
error = - ENOMEM ;
2010-09-16 01:50:55 +07:00
printk ( KERN_WARNING " bfad_im_module_init failure \n " ) ;
2009-09-24 07:46:15 +07:00
goto ext ;
}
2010-09-16 01:50:55 +07:00
if ( strcmp ( FCPI_NAME , " fcpim " ) = = 0 )
supported_fc4s | = BFA_LPORT_ROLE_FCP_IM ;
2009-09-24 07:46:15 +07:00
2010-12-10 10:08:43 +07:00
bfa_auto_recover = ioc_auto_recover ;
2009-09-24 07:46:15 +07:00
bfa_fcs_rport_set_del_timeout ( rport_del_timeout ) ;
2012-08-23 09:52:58 +07:00
bfa_fcs_rport_set_max_logins ( max_rport_logins ) ;
2009-09-24 07:46:15 +07:00
2010-09-16 01:50:55 +07:00
error = pci_register_driver ( & bfad_pci_driver ) ;
2009-09-24 07:46:15 +07:00
if ( error ) {
2010-09-16 01:50:55 +07:00
printk ( KERN_WARNING " pci_register_driver failure \n " ) ;
2009-09-24 07:46:15 +07:00
goto ext ;
}
return 0 ;
ext :
2010-09-16 01:50:55 +07:00
bfad_im_module_exit ( ) ;
2009-09-24 07:46:15 +07:00
return error ;
}
2010-10-19 07:17:23 +07:00
/*
2009-09-24 07:46:15 +07:00
* Driver module exit .
*/
2010-09-16 01:50:55 +07:00
static void __exit
2009-09-24 07:46:15 +07:00
bfad_exit ( void )
{
pci_unregister_driver ( & bfad_pci_driver ) ;
2010-09-16 01:50:55 +07:00
bfad_im_module_exit ( ) ;
2009-09-24 07:46:15 +07:00
bfad_free_fwimg ( ) ;
}
2010-09-16 01:50:55 +07:00
/* Firmware handling */
2011-04-14 01:44:03 +07:00
static void
2010-09-16 01:50:55 +07:00
bfad_read_firmware ( struct pci_dev * pdev , u32 * * bfi_image ,
u32 * bfi_image_size , char * fw_name )
{
const struct firmware * fw ;
if ( request_firmware ( & fw , fw_name , & pdev - > dev ) ) {
printk ( KERN_ALERT " Can't locate firmware %s \n " , fw_name ) ;
2011-04-14 01:44:03 +07:00
* bfi_image = NULL ;
goto out ;
2010-09-16 01:50:55 +07:00
}
* bfi_image = vmalloc ( fw - > size ) ;
if ( NULL = = * bfi_image ) {
printk ( KERN_ALERT " Fail to allocate buffer for fw image "
" size=%x! \n " , ( u32 ) fw - > size ) ;
2011-04-14 01:44:03 +07:00
goto out ;
2010-09-16 01:50:55 +07:00
}
memcpy ( * bfi_image , fw - > data , fw - > size ) ;
* bfi_image_size = fw - > size / sizeof ( u32 ) ;
2011-04-14 01:44:03 +07:00
out :
release_firmware ( fw ) ;
2010-09-16 01:50:55 +07:00
}
2011-04-14 01:44:03 +07:00
static u32 *
bfad_load_fwimg ( struct pci_dev * pdev )
2010-09-16 01:50:55 +07:00
{
2013-12-04 20:43:58 +07:00
if ( bfa_asic_id_ct2 ( pdev - > device ) ) {
2011-06-14 05:50:35 +07:00
if ( bfi_image_ct2_size = = 0 )
bfad_read_firmware ( pdev , & bfi_image_ct2 ,
& bfi_image_ct2_size , BFAD_FW_FILE_CT2 ) ;
return bfi_image_ct2 ;
} else if ( bfa_asic_id_ct ( pdev - > device ) ) {
if ( bfi_image_ct_size = = 0 )
bfad_read_firmware ( pdev , & bfi_image_ct ,
& bfi_image_ct_size , BFAD_FW_FILE_CT ) ;
return bfi_image_ct ;
2013-12-04 20:43:58 +07:00
} else if ( bfa_asic_id_cb ( pdev - > device ) ) {
2011-06-14 05:50:35 +07:00
if ( bfi_image_cb_size = = 0 )
bfad_read_firmware ( pdev , & bfi_image_cb ,
& bfi_image_cb_size , BFAD_FW_FILE_CB ) ;
return bfi_image_cb ;
2010-09-16 01:50:55 +07:00
}
2013-12-04 20:43:58 +07:00
return NULL ;
2010-09-16 01:50:55 +07:00
}
2009-09-24 07:46:15 +07:00
2011-04-14 01:44:03 +07:00
static void
bfad_free_fwimg ( void )
{
2011-06-14 05:50:35 +07:00
if ( bfi_image_ct2_size & & bfi_image_ct2 )
vfree ( bfi_image_ct2 ) ;
if ( bfi_image_ct_size & & bfi_image_ct )
vfree ( bfi_image_ct ) ;
if ( bfi_image_cb_size & & bfi_image_cb )
vfree ( bfi_image_cb ) ;
2011-04-14 01:44:03 +07:00
}
2009-09-24 07:46:15 +07:00
module_init ( bfad_init ) ;
module_exit ( bfad_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
2015-11-26 15:54:46 +07:00
MODULE_DESCRIPTION ( " QLogic BR-series Fibre Channel HBA Driver " BFAD_PROTO_NAME ) ;
MODULE_AUTHOR ( " QLogic Corporation " ) ;
2009-09-24 07:46:15 +07:00
MODULE_VERSION ( BFAD_DRIVER_VERSION ) ;