mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-20 14:09:28 +07:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2014-08-27 This series contains updates to i40e and i40evf. Carolyn provides two patches, first changes the wording of the flow director add/remove and asynchronous failure messages to include the fd_id to try and add some way to track the operations on a given fd_id. Second adds a check during handle_link_event for unqualified modules when link is down and there is a module plugged in. Anjali provides four patches to i40e/i40evf. First update flow director messages so that a user can tell if a filter was added or deleted. Then updates the ATR policy to not auto-disable ATR when we have errors in programming. The disabling of ATR when we got programming errors was buggy and was still adding new rules and causing continuous errors. With this policy change, we flush instead when we see too many errors. In addition she adds a flow director flush counter to ethtool to help know how many times the interface had to flush and replay the flow director filter table. Updates the driver to ignores a driver perceived transmit hang if the number of descriptors pending is less than 4, and instead log a stat when this situation happens. This is because the queue progresses forward and the stack never experiences a real hang in these situations. Shannon provides three patches for i40e/i40evf, first enables the l2tsel bit on receive queue contexts that are assigned to VFs so that the VF can get the stripped VLAN tag. Then adds a max buffer size parameter to the print helper to be sure the code knows when to stop. Lastly, remove the complaint when removing the default MAC VLAN filter. This was because old firmware had an incorrect MAC VLAN filter that needed to be replaced at startup, and now newer firmware does not have this problem. So now we only add the new filter if the removal succeeded and no need to complain if the removal fails. Ashish provides a change to vsi->num_queue_pairs to equal the number that is configured by the VF. This limits the number of queues that are enabled/disabled and fixes the mismatch case for when a VF configures fewer queues than is allocated to it by the PF. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3a5fc21815
@ -144,6 +144,7 @@ enum i40e_state_t {
|
||||
__I40E_PTP_TX_IN_PROGRESS,
|
||||
__I40E_BAD_EEPROM,
|
||||
__I40E_DOWN_REQUESTED,
|
||||
__I40E_FD_FLUSH_REQUESTED,
|
||||
};
|
||||
|
||||
enum i40e_interrupt_policy {
|
||||
@ -250,6 +251,11 @@ struct i40e_pf {
|
||||
u16 fdir_pf_active_filters;
|
||||
u16 fd_sb_cnt_idx;
|
||||
u16 fd_atr_cnt_idx;
|
||||
unsigned long fd_flush_timestamp;
|
||||
u32 fd_flush_cnt;
|
||||
u32 fd_add_err;
|
||||
u32 fd_atr_cnt;
|
||||
u32 fd_tcp_rule;
|
||||
|
||||
#ifdef CONFIG_I40E_VXLAN
|
||||
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
|
||||
@ -310,6 +316,7 @@ struct i40e_pf {
|
||||
u32 tx_timeout_count;
|
||||
u32 tx_timeout_recovery_level;
|
||||
unsigned long tx_timeout_last_recovery;
|
||||
u32 tx_sluggish_count;
|
||||
u32 hw_csum_rx_error;
|
||||
u32 led_status;
|
||||
u16 corer_count; /* Core reset count */
|
||||
@ -608,6 +615,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
|
||||
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
|
||||
int i40e_get_current_fd_count(struct i40e_pf *pf);
|
||||
int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
|
||||
int i40e_get_current_atr_cnt(struct i40e_pf *pf);
|
||||
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
|
||||
void i40e_set_ethtool_ops(struct net_device *netdev);
|
||||
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
|
||||
|
@ -840,7 +840,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
|
||||
|
||||
/* bump the tail */
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
|
||||
buff, buff_size);
|
||||
(hw->aq.asq.next_to_use)++;
|
||||
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
|
||||
hw->aq.asq.next_to_use = 0;
|
||||
@ -891,7 +892,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
|
||||
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||
"AQTX: desc and buffer writeback:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
|
||||
|
||||
/* update the error if time out occurred */
|
||||
if ((!cmd_completed) &&
|
||||
@ -987,7 +988,8 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
|
||||
e->msg_size);
|
||||
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
|
||||
hw->aq.arq_buf_size);
|
||||
|
||||
/* Restore the original datalen and buffer address in the desc,
|
||||
* FW updates datalen to indicate the event message
|
||||
|
@ -75,13 +75,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
|
||||
* @mask: debug mask
|
||||
* @desc: pointer to admin queue descriptor
|
||||
* @buffer: pointer to command buffer
|
||||
* @buf_len: max length of buffer
|
||||
*
|
||||
* Dumps debug log about adminq command with descriptor contents.
|
||||
**/
|
||||
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
|
||||
void *buffer)
|
||||
void *buffer, u16 buf_len)
|
||||
{
|
||||
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
|
||||
u16 len = le16_to_cpu(aq_desc->datalen);
|
||||
u8 *aq_buffer = (u8 *)buffer;
|
||||
u32 data[4];
|
||||
u32 i = 0;
|
||||
@ -105,7 +107,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
|
||||
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
|
||||
memset(data, 0, sizeof(data));
|
||||
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
|
||||
for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
|
||||
if (buf_len < len)
|
||||
len = buf_len;
|
||||
for (i = 0; i < len; i++) {
|
||||
data[((i % 16) / 4)] |=
|
||||
((u32)aq_buffer[i]) << (8 * (i % 4));
|
||||
if ((i % 16) == 15) {
|
||||
|
@ -1356,6 +1356,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
|
||||
"emp reset count: %d\n", pf->empr_count);
|
||||
dev_info(&pf->pdev->dev,
|
||||
"pf reset count: %d\n", pf->pfr_count);
|
||||
dev_info(&pf->pdev->dev,
|
||||
"pf tx sluggish count: %d\n",
|
||||
pf->tx_sluggish_count);
|
||||
} else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
|
||||
struct i40e_aqc_query_port_ets_config_resp *bw_data;
|
||||
struct i40e_dcbx_config *cfg =
|
||||
|
@ -145,6 +145,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
|
||||
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
|
||||
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
|
||||
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
|
||||
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
|
||||
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
|
||||
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
|
||||
|
||||
@ -1977,6 +1978,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
int ret = 0;
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
|
||||
return -EBUSY;
|
||||
|
||||
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
|
||||
return -EBUSY;
|
||||
|
||||
ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
|
||||
|
||||
i40e_fdir_check_and_reenable(pf);
|
||||
@ -2010,6 +2018,13 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
|
||||
if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
|
||||
return -ENOSPC;
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
|
||||
return -EBUSY;
|
||||
|
||||
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
|
||||
return -EBUSY;
|
||||
|
||||
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
|
||||
|
||||
if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
|
||||
|
@ -37,9 +37,9 @@ static const char i40e_driver_string[] =
|
||||
|
||||
#define DRV_KERN "-k"
|
||||
|
||||
#define DRV_VERSION_MAJOR 0
|
||||
#define DRV_VERSION_MINOR 4
|
||||
#define DRV_VERSION_BUILD 21
|
||||
#define DRV_VERSION_MAJOR 1
|
||||
#define DRV_VERSION_MINOR 0
|
||||
#define DRV_VERSION_BUILD 4
|
||||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." \
|
||||
__stringify(DRV_VERSION_BUILD) DRV_KERN
|
||||
@ -1239,8 +1239,11 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
|
||||
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
|
||||
* @vsi: the PF Main VSI - inappropriate for any other VSI
|
||||
* @macaddr: the MAC address
|
||||
*
|
||||
* Some older firmware configurations set up a default promiscuous VLAN
|
||||
* filter that needs to be removed.
|
||||
**/
|
||||
static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
|
||||
static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
|
||||
{
|
||||
struct i40e_aqc_remove_macvlan_element_data element;
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
@ -1248,15 +1251,18 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
|
||||
|
||||
/* Only appropriate for the PF main VSI */
|
||||
if (vsi->type != I40E_VSI_MAIN)
|
||||
return;
|
||||
return -EINVAL;
|
||||
|
||||
memset(&element, 0, sizeof(element));
|
||||
ether_addr_copy(element.mac_addr, macaddr);
|
||||
element.vlan_tag = 0;
|
||||
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
|
||||
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
|
||||
aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
|
||||
if (aq_ret)
|
||||
dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
|
||||
return -ENOENT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1385,18 +1391,30 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
|
||||
{
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
struct sockaddr *addr = p;
|
||||
struct i40e_mac_filter *f;
|
||||
|
||||
if (!is_valid_ether_addr(addr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
|
||||
if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
|
||||
netdev_info(netdev, "already using mac address %pM\n",
|
||||
addr->sa_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
|
||||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
if (ether_addr_equal(hw->mac.addr, addr->sa_data))
|
||||
netdev_info(netdev, "returning to hw mac address %pM\n",
|
||||
hw->mac.addr);
|
||||
else
|
||||
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
|
||||
|
||||
if (vsi->type == I40E_VSI_MAIN) {
|
||||
i40e_status ret;
|
||||
ret = i40e_aq_mac_address_write(&vsi->back->hw,
|
||||
@ -1410,24 +1428,33 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
|
||||
}
|
||||
}
|
||||
|
||||
f = i40e_find_mac(vsi, addr->sa_data, false, true);
|
||||
if (!f) {
|
||||
/* In order to be sure to not drop any packets, add the
|
||||
* new address first then delete the old one.
|
||||
*/
|
||||
f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
|
||||
false, false);
|
||||
if (!f)
|
||||
return -ENOMEM;
|
||||
if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
|
||||
struct i40e_aqc_remove_macvlan_element_data element;
|
||||
|
||||
i40e_sync_vsi_filters(vsi);
|
||||
memset(&element, 0, sizeof(element));
|
||||
ether_addr_copy(element.mac_addr, netdev->dev_addr);
|
||||
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
|
||||
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
|
||||
} else {
|
||||
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
|
||||
false, false);
|
||||
i40e_sync_vsi_filters(vsi);
|
||||
}
|
||||
|
||||
if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
|
||||
struct i40e_aqc_add_macvlan_element_data element;
|
||||
|
||||
memset(&element, 0, sizeof(element));
|
||||
ether_addr_copy(element.mac_addr, hw->mac.addr);
|
||||
element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
|
||||
i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
|
||||
} else {
|
||||
f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
|
||||
false, false);
|
||||
if (f)
|
||||
f->is_laa = true;
|
||||
if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
|
||||
}
|
||||
|
||||
i40e_sync_vsi_filters(vsi);
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
|
||||
return 0;
|
||||
@ -1796,9 +1823,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
|
||||
kfree(add_list);
|
||||
add_list = NULL;
|
||||
|
||||
if (add_happened && (!aq_ret)) {
|
||||
/* do nothing */;
|
||||
} else if (add_happened && (aq_ret)) {
|
||||
if (add_happened && aq_ret &&
|
||||
pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
|
||||
dev_info(&pf->pdev->dev,
|
||||
"add filter failed, err %d, aq_err %d\n",
|
||||
aq_ret, pf->hw.aq.asq_last_status);
|
||||
@ -4480,11 +4506,26 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
|
||||
netif_carrier_on(vsi->netdev);
|
||||
} else if (vsi->netdev) {
|
||||
i40e_print_link_message(vsi, false);
|
||||
/* need to check for qualified module here*/
|
||||
if ((pf->hw.phy.link_info.link_info &
|
||||
I40E_AQ_MEDIA_AVAILABLE) &&
|
||||
(!(pf->hw.phy.link_info.an_info &
|
||||
I40E_AQ_QUALIFIED_MODULE)))
|
||||
netdev_err(vsi->netdev,
|
||||
"the driver failed to link because an unqualified module was detected.");
|
||||
}
|
||||
|
||||
/* replay FDIR SB filters */
|
||||
if (vsi->type == I40E_VSI_FDIR)
|
||||
if (vsi->type == I40E_VSI_FDIR) {
|
||||
/* reset fd counters */
|
||||
pf->fd_add_err = pf->fd_atr_cnt = 0;
|
||||
if (pf->fd_tcp_rule > 0) {
|
||||
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
|
||||
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
|
||||
pf->fd_tcp_rule = 0;
|
||||
}
|
||||
i40e_fdir_filter_restore(vsi);
|
||||
}
|
||||
i40e_service_event_schedule(pf);
|
||||
|
||||
return 0;
|
||||
@ -5125,6 +5166,7 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
|
||||
I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
|
||||
return fcnt_prog;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
|
||||
* @pf: board private structure
|
||||
@ -5133,15 +5175,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
|
||||
{
|
||||
u32 fcnt_prog, fcnt_avail;
|
||||
|
||||
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
|
||||
return;
|
||||
|
||||
/* Check if, FD SB or ATR was auto disabled and if there is enough room
|
||||
* to re-enable
|
||||
*/
|
||||
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
|
||||
(pf->flags & I40E_FLAG_FD_SB_ENABLED))
|
||||
return;
|
||||
fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
|
||||
fcnt_avail = pf->fdir_pf_filter_count;
|
||||
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
|
||||
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
|
||||
(pf->fd_add_err == 0) ||
|
||||
(i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
|
||||
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
|
||||
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
|
||||
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
|
||||
@ -5158,23 +5202,84 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
|
||||
}
|
||||
}
|
||||
|
||||
#define I40E_MIN_FD_FLUSH_INTERVAL 10
|
||||
/**
|
||||
* i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
|
||||
* @pf: board private structure
|
||||
**/
|
||||
static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
|
||||
{
|
||||
int flush_wait_retry = 50;
|
||||
int reg;
|
||||
|
||||
if (time_after(jiffies, pf->fd_flush_timestamp +
|
||||
(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
|
||||
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
|
||||
pf->fd_flush_timestamp = jiffies;
|
||||
pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
|
||||
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
|
||||
/* flush all filters */
|
||||
wr32(&pf->hw, I40E_PFQF_CTL_1,
|
||||
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
|
||||
i40e_flush(&pf->hw);
|
||||
pf->fd_flush_cnt++;
|
||||
pf->fd_add_err = 0;
|
||||
do {
|
||||
/* Check FD flush status every 5-6msec */
|
||||
usleep_range(5000, 6000);
|
||||
reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
|
||||
if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
|
||||
break;
|
||||
} while (flush_wait_retry--);
|
||||
if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
|
||||
dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
|
||||
} else {
|
||||
/* replay sideband filters */
|
||||
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
|
||||
|
||||
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
|
||||
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
|
||||
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
|
||||
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
|
||||
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
|
||||
* @pf: board private structure
|
||||
**/
|
||||
int i40e_get_current_atr_cnt(struct i40e_pf *pf)
|
||||
{
|
||||
return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
|
||||
}
|
||||
|
||||
/* We can see up to 256 filter programming desc in transit if the filters are
|
||||
* being applied really fast; before we see the first
|
||||
* filter miss error on Rx queue 0. Accumulating enough error messages before
|
||||
* reacting will make sure we don't cause flush too often.
|
||||
*/
|
||||
#define I40E_MAX_FD_PROGRAM_ERROR 256
|
||||
|
||||
/**
|
||||
* i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
|
||||
* @pf: board private structure
|
||||
**/
|
||||
static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
|
||||
{
|
||||
if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
|
||||
return;
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__I40E_DOWN, &pf->state))
|
||||
return;
|
||||
|
||||
if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
|
||||
(i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
|
||||
(i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
|
||||
i40e_fdir_flush_and_replay(pf);
|
||||
|
||||
i40e_fdir_check_and_reenable(pf);
|
||||
|
||||
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
|
||||
(pf->flags & I40E_FLAG_FD_SB_ENABLED))
|
||||
pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5420,6 +5525,13 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
|
||||
memcpy(&pf->hw.phy.link_info_old, hw_link_info,
|
||||
sizeof(pf->hw.phy.link_info_old));
|
||||
|
||||
/* check for unqualified module, if link is down */
|
||||
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
|
||||
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
|
||||
(!(status->link_info & I40E_AQ_LINK_UP)))
|
||||
dev_err(&pf->pdev->dev,
|
||||
"The driver failed to link because an unqualified module was detected.\n");
|
||||
|
||||
/* update link status */
|
||||
hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
|
||||
hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
|
||||
@ -7086,6 +7198,11 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
|
||||
}
|
||||
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
|
||||
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
|
||||
/* reset fd counters */
|
||||
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
|
||||
pf->fdir_pf_active_filters = 0;
|
||||
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
|
||||
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
|
||||
/* if ATR was auto disabled it can be re-enabled. */
|
||||
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
|
||||
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
|
||||
@ -7421,14 +7538,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
|
||||
if (vsi->type == I40E_VSI_MAIN) {
|
||||
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
|
||||
ether_addr_copy(mac_addr, hw->mac.perm_addr);
|
||||
/* The following two steps are necessary to prevent reception
|
||||
* of tagged packets - by default the NVM loads a MAC-VLAN
|
||||
* filter that will accept any tagged packet. This is to
|
||||
* prevent that during normal operations until a specific
|
||||
* VLAN tag filter has been set.
|
||||
/* The following steps are necessary to prevent reception
|
||||
* of tagged packets - some older NVM configurations load a
|
||||
* default a MAC-VLAN filter that accepts any tagged packet
|
||||
* which must be replaced by a normal filter.
|
||||
*/
|
||||
i40e_rm_default_mac_filter(vsi, mac_addr);
|
||||
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
|
||||
if (!i40e_rm_default_mac_filter(vsi, mac_addr))
|
||||
i40e_add_filter(vsi, mac_addr,
|
||||
I40E_VLAN_ANY, false, true);
|
||||
} else {
|
||||
/* relate the VSI_VMDQ name to the VSI_MAIN name */
|
||||
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
|
||||
@ -7644,7 +7761,22 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
|
||||
f_count++;
|
||||
|
||||
if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
|
||||
i40e_aq_mac_address_write(&vsi->back->hw,
|
||||
struct i40e_aqc_remove_macvlan_element_data element;
|
||||
|
||||
memset(&element, 0, sizeof(element));
|
||||
ether_addr_copy(element.mac_addr, f->macaddr);
|
||||
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
|
||||
ret = i40e_aq_remove_macvlan(hw, vsi->seid,
|
||||
&element, 1, NULL);
|
||||
if (ret) {
|
||||
/* some older FW has a different default */
|
||||
element.flags |=
|
||||
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
|
||||
i40e_aq_remove_macvlan(hw, vsi->seid,
|
||||
&element, 1, NULL);
|
||||
}
|
||||
|
||||
i40e_aq_mac_address_write(hw,
|
||||
I40E_AQC_WRITE_TYPE_LAA_WOL,
|
||||
f->macaddr, NULL);
|
||||
}
|
||||
|
@ -52,10 +52,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
|
||||
/* debug function for adminq */
|
||||
void i40e_debug_aq(struct i40e_hw *hw,
|
||||
enum i40e_debug_mask mask,
|
||||
void *desc,
|
||||
void *buffer);
|
||||
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
|
||||
void *desc, void *buffer, u16 buf_len);
|
||||
|
||||
void i40e_idle_aq(struct i40e_hw *hw);
|
||||
bool i40e_check_asq_alive(struct i40e_hw *hw);
|
||||
|
@ -224,15 +224,19 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
|
||||
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Filter command send failed for PCTYPE %d (ret = %d)\n",
|
||||
fd_data->pctype, ret);
|
||||
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
|
||||
fd_data->pctype, fd_data->fd_id, ret);
|
||||
err = true;
|
||||
} else {
|
||||
if (add)
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Filter OK for PCTYPE %d (ret = %d)\n",
|
||||
fd_data->pctype, ret);
|
||||
"Filter OK for PCTYPE %d loc = %d\n",
|
||||
fd_data->pctype, fd_data->fd_id);
|
||||
else
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Filter deleted for PCTYPE %d loc = %d\n",
|
||||
fd_data->pctype, fd_data->fd_id);
|
||||
}
|
||||
|
||||
return err ? -EOPNOTSUPP : 0;
|
||||
}
|
||||
|
||||
@ -276,10 +280,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
|
||||
tcp->source = fd_data->src_port;
|
||||
|
||||
if (add) {
|
||||
pf->fd_tcp_rule++;
|
||||
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
|
||||
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
|
||||
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
|
||||
}
|
||||
} else {
|
||||
pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
|
||||
(pf->fd_tcp_rule - 1) : 0;
|
||||
if (pf->fd_tcp_rule == 0) {
|
||||
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
|
||||
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
|
||||
}
|
||||
}
|
||||
|
||||
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
|
||||
@ -287,12 +299,17 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
|
||||
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Filter command send failed for PCTYPE %d (ret = %d)\n",
|
||||
fd_data->pctype, ret);
|
||||
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
|
||||
fd_data->pctype, fd_data->fd_id, ret);
|
||||
err = true;
|
||||
} else {
|
||||
dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
|
||||
fd_data->pctype, ret);
|
||||
if (add)
|
||||
dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
|
||||
fd_data->pctype, fd_data->fd_id);
|
||||
else
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Filter deleted for PCTYPE %d loc = %d\n",
|
||||
fd_data->pctype, fd_data->fd_id);
|
||||
}
|
||||
|
||||
return err ? -EOPNOTSUPP : 0;
|
||||
@ -355,13 +372,18 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
|
||||
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Filter command send failed for PCTYPE %d (ret = %d)\n",
|
||||
fd_data->pctype, ret);
|
||||
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
|
||||
fd_data->pctype, fd_data->fd_id, ret);
|
||||
err = true;
|
||||
} else {
|
||||
if (add)
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Filter OK for PCTYPE %d (ret = %d)\n",
|
||||
fd_data->pctype, ret);
|
||||
"Filter OK for PCTYPE %d loc = %d\n",
|
||||
fd_data->pctype, fd_data->fd_id);
|
||||
else
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Filter deleted for PCTYPE %d loc = %d\n",
|
||||
fd_data->pctype, fd_data->fd_id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -443,9 +465,15 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
|
||||
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
|
||||
|
||||
if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
|
||||
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
|
||||
(I40E_DEBUG_FD & pf->hw.debug_mask))
|
||||
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
|
||||
rx_desc->wb.qword0.hi_dword.fd_id);
|
||||
|
||||
pf->fd_add_err++;
|
||||
/* store the current atr filter count */
|
||||
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
|
||||
|
||||
/* filter programming failed most likely due to table full */
|
||||
fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
|
||||
fcnt_avail = pf->fdir_pf_filter_count;
|
||||
@ -454,29 +482,21 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
|
||||
* FD ATR/SB and then re-enable it when there is room.
|
||||
*/
|
||||
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
|
||||
/* Turn off ATR first */
|
||||
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
|
||||
!(pf->auto_disable_flags &
|
||||
I40E_FLAG_FD_ATR_ENABLED)) {
|
||||
dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
|
||||
pf->auto_disable_flags |=
|
||||
I40E_FLAG_FD_ATR_ENABLED;
|
||||
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
|
||||
} else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
|
||||
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
|
||||
!(pf->auto_disable_flags &
|
||||
I40E_FLAG_FD_SB_ENABLED)) {
|
||||
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
|
||||
pf->auto_disable_flags |=
|
||||
I40E_FLAG_FD_SB_ENABLED;
|
||||
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
|
||||
}
|
||||
} else {
|
||||
dev_info(&pdev->dev, "FD filter programming error\n");
|
||||
dev_info(&pdev->dev,
|
||||
"FD filter programming failed due to incorrect filter parameters\n");
|
||||
}
|
||||
} else if (error ==
|
||||
(0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
|
||||
if (I40E_DEBUG_FD & pf->hw.debug_mask)
|
||||
dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
|
||||
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
|
||||
rx_desc->wb.qword0.hi_dword.fd_id);
|
||||
}
|
||||
}
|
||||
@ -587,6 +607,7 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
||||
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
||||
{
|
||||
u32 tx_pending = i40e_get_tx_pending(tx_ring);
|
||||
struct i40e_pf *pf = tx_ring->vsi->back;
|
||||
bool ret = false;
|
||||
|
||||
clear_check_for_tx_hang(tx_ring);
|
||||
@ -603,10 +624,17 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
||||
* pending but without time to complete it yet.
|
||||
*/
|
||||
if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
|
||||
tx_pending) {
|
||||
(tx_pending >= I40E_MIN_DESC_PENDING)) {
|
||||
/* make sure it is true for two checks in a row */
|
||||
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
|
||||
&tx_ring->state);
|
||||
} else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
|
||||
(tx_pending < I40E_MIN_DESC_PENDING) &&
|
||||
(tx_pending > 0)) {
|
||||
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
|
||||
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
|
||||
tx_pending, tx_ring->queue_index);
|
||||
pf->tx_sluggish_count++;
|
||||
} else {
|
||||
/* update completed stats and disarm the hang check */
|
||||
tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
|
||||
|
@ -121,6 +121,7 @@ enum i40e_dyn_idx_t {
|
||||
/* Tx Descriptors needed, worst case */
|
||||
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
|
||||
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
||||
#define I40E_MIN_DESC_PENDING 4
|
||||
|
||||
#define I40E_TX_FLAGS_CSUM (u32)(1)
|
||||
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
|
||||
|
@ -73,7 +73,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
|
||||
{
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
|
||||
return qid < pf->vsi[vsi_id]->num_queue_pairs;
|
||||
return qid < pf->vsi[vsi_id]->alloc_queue_pairs;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -350,6 +350,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
|
||||
rx_ctx.lrxqthresh = 2;
|
||||
rx_ctx.crcstrip = 1;
|
||||
rx_ctx.prefena = 1;
|
||||
rx_ctx.l2tsel = 1;
|
||||
|
||||
/* clear the context in the HMC */
|
||||
ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
|
||||
@ -468,7 +469,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
|
||||
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
|
||||
|
||||
/* map PF queues to VF queues */
|
||||
for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
|
||||
for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) {
|
||||
u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
|
||||
reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
|
||||
wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
|
||||
@ -477,7 +478,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
|
||||
|
||||
/* map PF queues to VSI */
|
||||
for (j = 0; j < 7; j++) {
|
||||
if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
|
||||
if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) {
|
||||
reg = 0x07FF07FF; /* unused */
|
||||
} else {
|
||||
u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
|
||||
@ -584,7 +585,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
|
||||
ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
|
||||
if (ret)
|
||||
goto error_alloc;
|
||||
total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
|
||||
total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
|
||||
set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
|
||||
|
||||
/* store the total qps number for the runtime
|
||||
@ -1123,7 +1124,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
|
||||
vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
|
||||
vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
|
||||
vfres->vsi_res[i].num_queue_pairs =
|
||||
pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
|
||||
pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
|
||||
memcpy(vfres->vsi_res[i].default_mac_addr,
|
||||
vf->default_lan_addr.addr, ETH_ALEN);
|
||||
i++;
|
||||
@ -1209,6 +1210,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
|
||||
struct i40e_virtchnl_vsi_queue_config_info *qci =
|
||||
(struct i40e_virtchnl_vsi_queue_config_info *)msg;
|
||||
struct i40e_virtchnl_queue_pair_info *qpi;
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
u16 vsi_id, vsi_queue_id;
|
||||
i40e_status aq_ret = 0;
|
||||
int i;
|
||||
@ -1242,6 +1244,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
|
||||
goto error_param;
|
||||
}
|
||||
}
|
||||
/* set vsi num_queue_pairs in use to num configured by vf */
|
||||
pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs;
|
||||
|
||||
error_param:
|
||||
/* send the response to the vf */
|
||||
|
@ -788,7 +788,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
|
||||
|
||||
/* bump the tail */
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
|
||||
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
|
||||
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
|
||||
buff, buff_size);
|
||||
(hw->aq.asq.next_to_use)++;
|
||||
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
|
||||
hw->aq.asq.next_to_use = 0;
|
||||
@ -842,7 +843,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
|
||||
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||
"AQTX: desc and buffer writeback:\n");
|
||||
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
|
||||
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
|
||||
buff_size);
|
||||
|
||||
/* update the error if time out occurred */
|
||||
if ((!cmd_completed) &&
|
||||
@ -938,7 +940,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
|
||||
hw->aq.nvm_busy = false;
|
||||
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
|
||||
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
|
||||
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
|
||||
hw->aq.arq_buf_size);
|
||||
|
||||
/* Restore the original datalen and buffer address in the desc,
|
||||
* FW updates datalen to indicate the event message
|
||||
|
@ -75,13 +75,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
|
||||
* @mask: debug mask
|
||||
* @desc: pointer to admin queue descriptor
|
||||
* @buffer: pointer to command buffer
|
||||
* @buf_len: max length of buffer
|
||||
*
|
||||
* Dumps debug log about adminq command with descriptor contents.
|
||||
**/
|
||||
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
|
||||
void *buffer)
|
||||
void *buffer, u16 buf_len)
|
||||
{
|
||||
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
|
||||
u16 len = le16_to_cpu(aq_desc->datalen);
|
||||
u8 *aq_buffer = (u8 *)buffer;
|
||||
u32 data[4];
|
||||
u32 i = 0;
|
||||
@ -105,7 +107,9 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
|
||||
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
|
||||
memset(data, 0, sizeof(data));
|
||||
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
|
||||
for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
|
||||
if (buf_len < len)
|
||||
len = buf_len;
|
||||
for (i = 0; i < len; i++) {
|
||||
data[((i % 16) / 4)] |=
|
||||
((u32)aq_buffer[i]) << (8 * (i % 4));
|
||||
if ((i % 16) == 15) {
|
||||
|
@ -53,10 +53,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
|
||||
bool i40evf_asq_done(struct i40e_hw *hw);
|
||||
|
||||
/* debug function for adminq */
|
||||
void i40evf_debug_aq(struct i40e_hw *hw,
|
||||
enum i40e_debug_mask mask,
|
||||
void *desc,
|
||||
void *buffer);
|
||||
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
|
||||
void *desc, void *buffer, u16 buf_len);
|
||||
|
||||
void i40e_idle_aq(struct i40e_hw *hw);
|
||||
void i40evf_resume_aq(struct i40e_hw *hw);
|
||||
|
@ -163,11 +163,13 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
||||
* pending but without time to complete it yet.
|
||||
*/
|
||||
if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
|
||||
tx_pending) {
|
||||
(tx_pending >= I40E_MIN_DESC_PENDING)) {
|
||||
/* make sure it is true for two checks in a row */
|
||||
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
|
||||
&tx_ring->state);
|
||||
} else {
|
||||
} else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
|
||||
!(tx_pending < I40E_MIN_DESC_PENDING) ||
|
||||
!(tx_pending > 0)) {
|
||||
/* update completed stats and disarm the hang check */
|
||||
tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
|
||||
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
|
||||
|
@ -121,6 +121,7 @@ enum i40e_dyn_idx_t {
|
||||
/* Tx Descriptors needed, worst case */
|
||||
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
|
||||
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
||||
#define I40E_MIN_DESC_PENDING 4
|
||||
|
||||
#define I40E_TX_FLAGS_CSUM (u32)(1)
|
||||
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
|
||||
|
@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
|
||||
static const char i40evf_driver_string[] =
|
||||
"Intel(R) XL710/X710 Virtual Function Network Driver";
|
||||
|
||||
#define DRV_VERSION "0.9.40"
|
||||
#define DRV_VERSION "1.0.1"
|
||||
const char i40evf_driver_version[] = DRV_VERSION;
|
||||
static const char i40evf_copyright[] =
|
||||
"Copyright (c) 2013 - 2014 Intel Corporation.";
|
||||
|
Loading…
Reference in New Issue
Block a user