mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 05:56:54 +07:00
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause interrupts to fail to be correctly affinitized. This causes unexpected behavior and has a substantial impact on performance when it happens. The bug occurs if there is heavy traffic, any number of CPUs that have an i40e interrupt are pegged at 100%, and the interrupt afffinity for those CPUs is changed. Instead of moving to the new CPU, the interrupt continues to be polled while there is heavy traffic. The bug is most readily realized as the driver is first brought up and all interrupts start on CPU0. If there is heavy traffic and the interrupt starts polling before the interrupt is affinitized, the interrupt will be stuck on CPU0 until traffic stops. The bug, however, can also be wrought out more simply by affinitizing all the interrupts to a single CPU and then attempting to move any of those interrupts off while there is heavy traffic. This patch fixes the bug by registering for update notifications from the kernel when the interrupt affinity changes. When that fires, we cache the intended affinity mask. Then, while polling, if the cpu is pegged at 100% and we failed to clean the rings, we check to make sure we have the correct affinity and stop polling if we're firing on the wrong CPU. When the kernel successfully moves the interrupt, it will start polling on the correct CPU. The performance impact is minimal since the only time this section gets executed is when performance is already compromised by the CPU. Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830 Signed-off-by: Alan Brady <alan.brady@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
6d6fd1be2a
commit
96db776a36
@ -607,6 +607,8 @@ struct i40e_q_vector {
|
||||
unsigned long hung_detected; /* Set/Reset for hung_detection logic */
|
||||
|
||||
cpumask_t affinity_mask;
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
|
||||
struct rcu_head rcu; /* to avoid race with update stats on free */
|
||||
char name[I40E_INT_NAME_STR_LEN];
|
||||
bool arm_wb_state;
|
||||
|
@ -3316,6 +3316,33 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_irq_affinity_notify - Callback for affinity changes
|
||||
* @notify: context as to what irq was changed
|
||||
* @mask: the new affinity mask
|
||||
*
|
||||
* This is a callback function used by the irq_set_affinity_notifier function
|
||||
* so that we may register to receive changes to the irq affinity masks.
|
||||
**/
|
||||
static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
|
||||
const cpumask_t *mask)
|
||||
{
|
||||
struct i40e_q_vector *q_vector =
|
||||
container_of(notify, struct i40e_q_vector, affinity_notify);
|
||||
|
||||
q_vector->affinity_mask = *mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_irq_affinity_release - Callback for affinity notifier release
|
||||
* @ref: internal core kernel usage
|
||||
*
|
||||
* This is a callback function used by the irq_set_affinity_notifier function
|
||||
* to inform the current notification subscriber that they will no longer
|
||||
* receive notifications.
|
||||
**/
|
||||
static void i40e_irq_affinity_release(struct kref *ref) {}
|
||||
|
||||
/**
|
||||
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
|
||||
* @vsi: the VSI being configured
|
||||
@ -3331,10 +3358,13 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
|
||||
int rx_int_idx = 0;
|
||||
int tx_int_idx = 0;
|
||||
int vector, err;
|
||||
int irq_num;
|
||||
|
||||
for (vector = 0; vector < q_vectors; vector++) {
|
||||
struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
|
||||
|
||||
irq_num = pf->msix_entries[base + vector].vector;
|
||||
|
||||
if (q_vector->tx.ring && q_vector->rx.ring) {
|
||||
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
|
||||
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
|
||||
@ -3349,7 +3379,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
|
||||
/* skip this unused q_vector */
|
||||
continue;
|
||||
}
|
||||
err = request_irq(pf->msix_entries[base + vector].vector,
|
||||
err = request_irq(irq_num,
|
||||
vsi->irq_handler,
|
||||
0,
|
||||
q_vector->name,
|
||||
@ -3359,9 +3389,13 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
|
||||
"MSIX request_irq failed, error: %d\n", err);
|
||||
goto free_queue_irqs;
|
||||
}
|
||||
|
||||
/* register for affinity change notifications */
|
||||
q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
|
||||
q_vector->affinity_notify.release = i40e_irq_affinity_release;
|
||||
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
|
||||
/* assign the mask for this irq */
|
||||
irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
|
||||
&q_vector->affinity_mask);
|
||||
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
|
||||
}
|
||||
|
||||
vsi->irqs_ready = true;
|
||||
@ -3370,10 +3404,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
|
||||
free_queue_irqs:
|
||||
while (vector) {
|
||||
vector--;
|
||||
irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
|
||||
NULL);
|
||||
free_irq(pf->msix_entries[base + vector].vector,
|
||||
&(vsi->q_vectors[vector]));
|
||||
irq_num = pf->msix_entries[base + vector].vector;
|
||||
irq_set_affinity_notifier(irq_num, NULL);
|
||||
irq_set_affinity_hint(irq_num, NULL);
|
||||
free_irq(irq_num, &vsi->q_vectors[vector]);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -4012,19 +4046,23 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
|
||||
|
||||
vsi->irqs_ready = false;
|
||||
for (i = 0; i < vsi->num_q_vectors; i++) {
|
||||
u16 vector = i + base;
|
||||
int irq_num;
|
||||
u16 vector;
|
||||
|
||||
vector = i + base;
|
||||
irq_num = pf->msix_entries[vector].vector;
|
||||
|
||||
/* free only the irqs that were actually requested */
|
||||
if (!vsi->q_vectors[i] ||
|
||||
!vsi->q_vectors[i]->num_ringpairs)
|
||||
continue;
|
||||
|
||||
/* clear the affinity notifier in the IRQ descriptor */
|
||||
irq_set_affinity_notifier(irq_num, NULL);
|
||||
/* clear the affinity_mask in the IRQ descriptor */
|
||||
irq_set_affinity_hint(pf->msix_entries[vector].vector,
|
||||
NULL);
|
||||
synchronize_irq(pf->msix_entries[vector].vector);
|
||||
free_irq(pf->msix_entries[vector].vector,
|
||||
vsi->q_vectors[i]);
|
||||
irq_set_affinity_hint(irq_num, NULL);
|
||||
synchronize_irq(irq_num);
|
||||
free_irq(irq_num, vsi->q_vectors[i]);
|
||||
|
||||
/* Tear down the interrupt queue link list
|
||||
*
|
||||
|
@ -1999,12 +1999,25 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
/* If work not completed, return budget and polling will return */
|
||||
if (!clean_complete) {
|
||||
const cpumask_t *aff_mask = &q_vector->affinity_mask;
|
||||
int cpu_id = smp_processor_id();
|
||||
|
||||
/* It is possible that the interrupt affinity has changed but,
|
||||
* if the cpu is pegged at 100%, polling will never exit while
|
||||
* traffic continues and the interrupt will be stuck on this
|
||||
* cpu. We check to make sure affinity is correct before we
|
||||
* continue to poll, otherwise we must stop polling so the
|
||||
* interrupt can move to the correct cpu.
|
||||
*/
|
||||
if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
|
||||
!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
|
||||
tx_only:
|
||||
if (arm_wb) {
|
||||
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
|
||||
i40e_enable_wb_on_itr(vsi, q_vector);
|
||||
if (arm_wb) {
|
||||
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
|
||||
i40e_enable_wb_on_itr(vsi, q_vector);
|
||||
}
|
||||
return budget;
|
||||
}
|
||||
return budget;
|
||||
}
|
||||
|
||||
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
|
||||
@ -2012,11 +2025,18 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
/* Work is done so exit the polling mode and re-enable the interrupt */
|
||||
napi_complete_done(napi, work_done);
|
||||
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
|
||||
i40e_update_enable_itr(vsi, q_vector);
|
||||
} else { /* Legacy mode */
|
||||
|
||||
/* If we're prematurely stopping polling to fix the interrupt
|
||||
* affinity we want to make sure polling starts back up so we
|
||||
* issue a call to i40e_force_wb which triggers a SW interrupt.
|
||||
*/
|
||||
if (!clean_complete)
|
||||
i40e_force_wb(vsi, q_vector);
|
||||
else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
|
||||
i40e_irq_dynamic_enable_icr0(vsi->back, false);
|
||||
}
|
||||
else
|
||||
i40e_update_enable_itr(vsi, q_vector);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1461,12 +1461,24 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
/* If work not completed, return budget and polling will return */
|
||||
if (!clean_complete) {
|
||||
const cpumask_t *aff_mask = &q_vector->affinity_mask;
|
||||
int cpu_id = smp_processor_id();
|
||||
|
||||
/* It is possible that the interrupt affinity has changed but,
|
||||
* if the cpu is pegged at 100%, polling will never exit while
|
||||
* traffic continues and the interrupt will be stuck on this
|
||||
* cpu. We check to make sure affinity is correct before we
|
||||
* continue to poll, otherwise we must stop polling so the
|
||||
* interrupt can move to the correct cpu.
|
||||
*/
|
||||
if (likely(cpumask_test_cpu(cpu_id, aff_mask))) {
|
||||
tx_only:
|
||||
if (arm_wb) {
|
||||
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
|
||||
i40e_enable_wb_on_itr(vsi, q_vector);
|
||||
if (arm_wb) {
|
||||
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
|
||||
i40e_enable_wb_on_itr(vsi, q_vector);
|
||||
}
|
||||
return budget;
|
||||
}
|
||||
return budget;
|
||||
}
|
||||
|
||||
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
|
||||
@ -1474,7 +1486,16 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
/* Work is done so exit the polling mode and re-enable the interrupt */
|
||||
napi_complete_done(napi, work_done);
|
||||
i40e_update_enable_itr(vsi, q_vector);
|
||||
|
||||
/* If we're prematurely stopping polling to fix the interrupt
|
||||
* affinity we want to make sure polling starts back up so we
|
||||
* issue a call to i40evf_force_wb which triggers a SW interrupt.
|
||||
*/
|
||||
if (!clean_complete)
|
||||
i40evf_force_wb(vsi, q_vector);
|
||||
else
|
||||
i40e_update_enable_itr(vsi, q_vector);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,8 @@ struct i40e_q_vector {
|
||||
int v_idx; /* vector index in list */
|
||||
char name[IFNAMSIZ + 9];
|
||||
bool arm_wb_state;
|
||||
cpumask_var_t affinity_mask;
|
||||
cpumask_t affinity_mask;
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
};
|
||||
|
||||
/* Helper macros to switch between ints/sec and what the register uses.
|
||||
|
@ -495,6 +495,33 @@ static void i40evf_netpoll(struct net_device *netdev)
|
||||
}
|
||||
|
||||
#endif
|
||||
/**
|
||||
* i40evf_irq_affinity_notify - Callback for affinity changes
|
||||
* @notify: context as to what irq was changed
|
||||
* @mask: the new affinity mask
|
||||
*
|
||||
* This is a callback function used by the irq_set_affinity_notifier function
|
||||
* so that we may register to receive changes to the irq affinity masks.
|
||||
**/
|
||||
static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
|
||||
const cpumask_t *mask)
|
||||
{
|
||||
struct i40e_q_vector *q_vector =
|
||||
container_of(notify, struct i40e_q_vector, affinity_notify);
|
||||
|
||||
q_vector->affinity_mask = *mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_irq_affinity_release - Callback for affinity notifier release
|
||||
* @ref: internal core kernel usage
|
||||
*
|
||||
* This is a callback function used by the irq_set_affinity_notifier function
|
||||
* to inform the current notification subscriber that they will no longer
|
||||
* receive notifications.
|
||||
**/
|
||||
static void i40evf_irq_affinity_release(struct kref *ref) {}
|
||||
|
||||
/**
|
||||
* i40evf_request_traffic_irqs - Initialize MSI-X interrupts
|
||||
* @adapter: board private structure
|
||||
@ -507,6 +534,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
|
||||
{
|
||||
int vector, err, q_vectors;
|
||||
int rx_int_idx = 0, tx_int_idx = 0;
|
||||
int irq_num;
|
||||
|
||||
i40evf_irq_disable(adapter);
|
||||
/* Decrement for Other and TCP Timer vectors */
|
||||
@ -514,6 +542,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
|
||||
|
||||
for (vector = 0; vector < q_vectors; vector++) {
|
||||
struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
|
||||
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
|
||||
|
||||
if (q_vector->tx.ring && q_vector->rx.ring) {
|
||||
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
|
||||
@ -532,21 +561,23 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
|
||||
/* skip this unused q_vector */
|
||||
continue;
|
||||
}
|
||||
err = request_irq(
|
||||
adapter->msix_entries[vector + NONQ_VECS].vector,
|
||||
i40evf_msix_clean_rings,
|
||||
0,
|
||||
q_vector->name,
|
||||
q_vector);
|
||||
err = request_irq(irq_num,
|
||||
i40evf_msix_clean_rings,
|
||||
0,
|
||||
q_vector->name,
|
||||
q_vector);
|
||||
if (err) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Request_irq failed, error: %d\n", err);
|
||||
goto free_queue_irqs;
|
||||
}
|
||||
/* register for affinity change notifications */
|
||||
q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
|
||||
q_vector->affinity_notify.release =
|
||||
i40evf_irq_affinity_release;
|
||||
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
|
||||
/* assign the mask for this irq */
|
||||
irq_set_affinity_hint(
|
||||
adapter->msix_entries[vector + NONQ_VECS].vector,
|
||||
q_vector->affinity_mask);
|
||||
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -554,11 +585,10 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
|
||||
free_queue_irqs:
|
||||
while (vector) {
|
||||
vector--;
|
||||
irq_set_affinity_hint(
|
||||
adapter->msix_entries[vector + NONQ_VECS].vector,
|
||||
NULL);
|
||||
free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
|
||||
&adapter->q_vectors[vector]);
|
||||
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
|
||||
irq_set_affinity_notifier(irq_num, NULL);
|
||||
irq_set_affinity_hint(irq_num, NULL);
|
||||
free_irq(irq_num, &adapter->q_vectors[vector]);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -599,16 +629,15 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
|
||||
**/
|
||||
static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
int q_vectors;
|
||||
int vector, irq_num, q_vectors;
|
||||
|
||||
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
|
||||
|
||||
for (i = 0; i < q_vectors; i++) {
|
||||
irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
|
||||
NULL);
|
||||
free_irq(adapter->msix_entries[i+1].vector,
|
||||
&adapter->q_vectors[i]);
|
||||
for (vector = 0; vector < q_vectors; vector++) {
|
||||
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
|
||||
irq_set_affinity_notifier(irq_num, NULL);
|
||||
irq_set_affinity_hint(irq_num, NULL);
|
||||
free_irq(irq_num, &adapter->q_vectors[vector]);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user