[SCSI] lpfc 8.3.42: Fix crash on driver load due to cpu affinity logic

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
James Smart 2013-09-06 12:20:36 -04:00 committed by James Bottomley
parent 14660f4fc8
commit ec2087a725
2 changed files with 27 additions and 6 deletions

View File

@ -4013,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
# For [0], FCP commands are issued to Work Queues ina round robin fashion. # For [0], FCP commands are issued to Work Queues ina round robin fashion.
# For [1], FCP commands are issued to a Work Queue associated with the # For [1], FCP commands are issued to a Work Queue associated with the
# current CPU. # current CPU.
# It would be set to 1 by the driver if it's able to set up cpu affinity
# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
# roundrobin scheduling of FCP I/Os through WQs will be used.
*/ */
LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
"issuing commands [0] - Round Robin, [1] - Current CPU"); "issuing commands [0] - Round Robin, [1] - Current CPU");
/* /*

View File

@ -8399,7 +8399,8 @@ static int
lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
{ {
int i, idx, saved_chann, used_chann, cpu, phys_id; int i, idx, saved_chann, used_chann, cpu, phys_id;
int max_phys_id, num_io_channel, first_cpu; int max_phys_id, min_phys_id;
int num_io_channel, first_cpu, chan;
struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *cpup;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo; struct cpuinfo_x86 *cpuinfo;
@ -8417,6 +8418,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
phba->sli4_hba.num_present_cpu)); phba->sli4_hba.num_present_cpu));
max_phys_id = 0; max_phys_id = 0;
min_phys_id = 0xff;
phys_id = 0; phys_id = 0;
num_io_channel = 0; num_io_channel = 0;
first_cpu = LPFC_VECTOR_MAP_EMPTY; first_cpu = LPFC_VECTOR_MAP_EMPTY;
@ -8440,9 +8442,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
if (cpup->phys_id > max_phys_id) if (cpup->phys_id > max_phys_id)
max_phys_id = cpup->phys_id; max_phys_id = cpup->phys_id;
if (cpup->phys_id < min_phys_id)
min_phys_id = cpup->phys_id;
cpup++; cpup++;
} }
phys_id = min_phys_id;
/* Now associate the HBA vectors with specific CPUs */ /* Now associate the HBA vectors with specific CPUs */
for (idx = 0; idx < vectors; idx++) { for (idx = 0; idx < vectors; idx++) {
cpup = phba->sli4_hba.cpu_map; cpup = phba->sli4_hba.cpu_map;
@ -8453,13 +8458,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
for (i = 1; i < max_phys_id; i++) { for (i = 1; i < max_phys_id; i++) {
phys_id++; phys_id++;
if (phys_id > max_phys_id) if (phys_id > max_phys_id)
phys_id = 0; phys_id = min_phys_id;
cpu = lpfc_find_next_cpu(phba, phys_id); cpu = lpfc_find_next_cpu(phba, phys_id);
if (cpu == LPFC_VECTOR_MAP_EMPTY) if (cpu == LPFC_VECTOR_MAP_EMPTY)
continue; continue;
goto found; goto found;
} }
/* Use round robin for scheduling */
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
chan = 0;
cpup = phba->sli4_hba.cpu_map;
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
cpup->channel_id = chan;
cpup++;
chan++;
if (chan >= phba->cfg_fcp_io_channel)
chan = 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3329 Cannot set affinity:" "3329 Cannot set affinity:"
"Error mapping vector %d (%d)\n", "Error mapping vector %d (%d)\n",
@ -8497,7 +8514,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
/* Spread vector mapping across multple physical CPU nodes */ /* Spread vector mapping across multple physical CPU nodes */
phys_id++; phys_id++;
if (phys_id > max_phys_id) if (phys_id > max_phys_id)
phys_id = 0; phys_id = min_phys_id;
} }
/* /*
@ -8507,7 +8524,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
* Base the remaining IO channel assigned, to IO channels already * Base the remaining IO channel assigned, to IO channels already
* assigned to other CPUs on the same phys_id. * assigned to other CPUs on the same phys_id.
*/ */
for (i = 0; i <= max_phys_id; i++) { for (i = min_phys_id; i <= max_phys_id; i++) {
/* /*
* If there are no io channels already mapped to * If there are no io channels already mapped to
* this phys_id, just round robin thru the io_channels. * this phys_id, just round robin thru the io_channels.
@ -8589,10 +8606,11 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
if (num_io_channel != phba->sli4_hba.num_present_cpu) if (num_io_channel != phba->sli4_hba.num_present_cpu)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set affinity mismatch:" "3333 Set affinity mismatch:"
"%d chann != %d cpus: %d vactors\n", "%d chann != %d cpus: %d vectors\n",
num_io_channel, phba->sli4_hba.num_present_cpu, num_io_channel, phba->sli4_hba.num_present_cpu,
vectors); vectors);
/* Enable using cpu affinity for scheduling */
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
return 1; return 1;
} }