Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "One last pull request before heading to Vancouver for LPC, here we have:

   1) Don't forget to free VSI contexts during ice driver unload, from
      Victor Raj.

   2) Don't forget napi delete calls during device remove in ice driver,
      from Dave Ertman.

   3) Don't request VLAN tag insertion of ibmvnic device when SKB
      doesn't have VLAN tags at all.

   4) IPV4 frag handling code has to accomodate the situation where two
      threads try to insert the same fragment into the hash table at the
      same time. From Eric Dumazet.

   5) Relatedly, don't flow separate on protocol ports for fragmented
      frames, also from Eric Dumazet.

   6) Memory leaks in qed driver, from Denis Bolotin.

   7) Correct valid MTU range in smsc95xx driver, from Stefan Wahren.

   8) Validate cls_flower nested policies properly, from Jakub Kicinski.

   9) Clearing of stats counters in mc88e6xxx driver doesn't retain
      important bits in the G1_STATS_OP register causing the chip to
      hang. Fix from Andrew Lunn"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits)
  act_mirred: clear skb->tstamp on redirect
  net: dsa: mv88e6xxx: Fix clearing of stats counters
  tipc: fix link re-establish failure
  net: sched: cls_flower: validate nested enc_opts_policy to avoid warning
  net: mvneta: correct typo
  flow_dissector: do not dissect l4 ports for fragments
  net: qualcomm: rmnet: Fix incorrect assignment of real_dev
  net: aquantia: allow rx checksum offload configuration
  net: aquantia: invalid checksumm offload implementation
  net: aquantia: fixed enable unicast on 32 macvlan
  net: aquantia: fix potential IOMMU fault after driver unbind
  net: aquantia: synchronized flow control between mac/phy
  net: smsc95xx: Fix MTU range
  net: stmmac: Fix RX packet size > 8191
  qed: Fix potential memory corruption
  qed: Fix SPQ entries not returned to pool in error flows
  qed: Fix blocking/unlimited SPQ entries leak
  qed: Fix memory/entry leak in qed_init_sp_request()
  inet: frags: better deal with smp races
  net: hns3: bugfix for not checking return value
  ...
This commit is contained in:
Linus Torvalds 2018-11-11 17:09:48 -06:00
commit 7a3765ed66
54 changed files with 430 additions and 199 deletions

View File

@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
if (err)
return err;
/* Keep the histogram mode bits */
val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);

View File

@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 fc = aq_nic->aq_nic_cfg.flow_control;
pause->autoneg = 0;
if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
pause->rx_pause = 1;
if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
pause->tx_pause = 1;
pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
}
static int aq_ethtool_set_pauseparam(struct net_device *ndev,

View File

@ -204,6 +204,10 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
};
struct aq_fw_ops {
@ -226,6 +230,8 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
int (*set_flow_control)(struct aq_hw_s *self);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,

View File

@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
bool is_lro = false;
int err = 0;
if (aq_cfg->hw_features & NETIF_F_LRO) {
aq_cfg->features = features;
if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
is_lro = features & NETIF_F_LRO;
if (aq_cfg->is_lro != is_lro) {
@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
}
}
}
if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
return 0;
return err;
}
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)

View File

@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
}
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
cfg->hw_features = cfg->aq_hw_caps->hw_features;
cfg->features = cfg->aq_hw_caps->hw_features;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
int err = self->aq_fw_ops->update_link_status(self->aq_hw);
u32 fc = 0;
if (err)
return err;
@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
/* Driver has to update flow control settings on RX block
* on any link event.
* We should query FW whether it negotiated FC.
*/
if (self->aq_fw_ops->get_flow_control)
self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
if (self->aq_hw_ops->hw_set_fc)
self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
self->link_status = self->aq_hw->aq_link_status;
@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
}
}
if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
packet_filter |= IFF_MULTICAST;
self->mc_list.count = i;
self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
/* Asym is when either RX or TX, but not both */
if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);

View File

@ -23,7 +23,7 @@ struct aq_vec_s;
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
u64 hw_features;
u64 features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
u32 vecs; /* vecs==allocated irqs */

View File

@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
return !!budget;
}
static void aq_rx_checksum(struct aq_ring_s *self,
struct aq_ring_buff_s *buff,
struct sk_buff *skb)
{
if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
return;
if (unlikely(buff->is_cso_err)) {
++self->stats.rx.errors;
skb->ip_summed = CHECKSUM_NONE;
return;
}
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
if (buff->is_udp_cso || buff->is_tcp_cso)
__skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
skb->protocol = eth_type_trans(skb, ndev);
if (unlikely(buff->is_cso_err)) {
++self->stats.rx.errors;
skb->ip_summed = CHECKSUM_NONE;
} else {
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
if (buff->is_udp_cso || buff->is_tcp_cso)
__skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
}
aq_rx_checksum(self, buff, skb);
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :

View File

@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
return err;
}
static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
{
hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
return 0;
}
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
NETIF_F_RXCSUM));
hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
NETIF_F_RXCSUM));
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
unsigned int is_err = 1U;
unsigned int is_rx_check_sum_enabled = 0U;
unsigned int pkt_type = 0U;
u8 rx_stat = 0U;
if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
break;
@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->hw_head];
is_err = (0x0000003CU & rxd_wb->status);
rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
is_err &= ~0x20U; /* exclude validity bit */
pkt_type = 0xFFU & (rxd_wb->type >> 4);
if (is_rx_check_sum_enabled) {
if (0x0U == (pkt_type & 0x3U))
buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
if (is_rx_check_sum_enabled & BIT(0) &&
(0x0U == (pkt_type & 0x3U)))
buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
if (is_rx_check_sum_enabled & BIT(1)) {
if (0x4U == (pkt_type & 0x1CU))
buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
!!(rx_stat & BIT(3));
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
/* Checksum offload workaround for small packets */
if (rxd_wb->pkt_len <= 60) {
buff->is_ip_cso = 0U;
buff->is_cso_err = 0U;
}
buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
!!(rx_stat & BIT(3));
}
buff->is_cso_err = !!(rx_stat & 0x6);
/* Checksum offload workaround for small packets */
if (unlikely(rxd_wb->pkt_len <= 60)) {
buff->is_ip_cso = 0U;
buff->is_cso_err = 0U;
}
is_err &= ~0x18U;
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
if (is_err || rxd_wb->type & 0x1000U) {
/* status error or DMA error */
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */
buff->is_error = 1U;
} else {
if (self->aq_nic_cfg->is_rss) {
@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
/* Invalidate Descriptor Cache to prevent writing to the cached
* descriptors and to the data pointer of those descriptors
*/
hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
return aq_hw_err_from_flags(self);
}
@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
.hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_fc = hw_atl_b0_set_fc,
};

View File

@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
init);
}
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc, u32 buffer)
{

View File

@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
/* set rdm rx dma descriptor cache init */
void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer);

View File

@ -293,6 +293,24 @@
/* default value of bitfield desc{d}_reset */
#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
/* rdm_desc_init_i bitfield definitions
* preprocessor definitions for the bitfield rdm_desc_init_i.
* port="pif_rdm_desc_init_i"
*/
/* register address for bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
/* bitmask for bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
/* inverted bitmask for bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
/* lower bit position of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
/* width of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
/* default value of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"

View File

@ -30,6 +30,8 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
return 0;
}
static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
*fcmode = AQ_NIC_FC_RX;
else
*fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
else
if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
*fcmode = AQ_NIC_FC_TX;
else
*fcmode = 0;
return 0;
}
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
.set_flow_control = aq_fw2x_set_flow_control,
.get_flow_control = aq_fw2x_get_flow_control
};

View File

@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
return ret;
if (ret)
return ret;
}
ret = hns3_restore_fd_rules(netdev);

View File

@ -1545,7 +1545,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
if (adapter->vlan_header_insertion) {
if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
}

View File

@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_SCTP_CRC |
@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
hw_features = hw_enc_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
netdev->hw_features |= hw_features;
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;

View File

@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_RESET_WAIT 20
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@ -189,7 +191,6 @@ struct ice_vsi {
u64 tx_linearize;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
unsigned int current_netdev_flags;
u32 tx_restart;
u32 tx_busy;
@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi);
#endif /* _ICE_H_ */

View File

@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_shutdown_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
ice_clear_all_vsi_ctx(hw);
}
/**

View File

@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
}
if (!test_bit(__ICE_DOWN, pf->state)) {
/* Give it a little more time to try to come back */
/* Give it a little more time to try to come back. If still
* down, restart autoneg link or reinitialize the interface.
*/
msleep(75);
if (!test_bit(__ICE_DOWN, pf->state))
return ice_nway_reset(netdev);
ice_down(vsi);
ice_up(vsi);
}
return err;

View File

@ -242,6 +242,8 @@
#define GLNVM_ULD 0x000B6008
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)

View File

@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
vsi->back->hw.adminq.sq_last_status);
goto err_out;
}
@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
* on this wq
*/
if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
ice_napi_del(vsi);
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;

View File

@ -1465,7 +1465,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
* ice_napi_del - Remove NAPI handler for the VSI
* @vsi: VSI for which NAPI handler is to be removed
*/
static void ice_napi_del(struct ice_vsi *vsi)
void ice_napi_del(struct ice_vsi *vsi)
{
int v_idx;
@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
ret = ice_cfg_vlan_pruning(vsi, true);
int ret = ice_cfg_vlan_pruning(vsi, true);
if (ret)
return ret;
}
@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
ret = ice_vsi_add_vlan(vsi, vid);
if (!ret)
set_bit(vid, vsi->active_vlans);
return ret;
return ice_vsi_add_vlan(vsi, vid);
}
/**
@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
if (status)
return status;
clear_bit(vid, vsi->active_vlans);
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
status = ice_cfg_vlan_pruning(vsi, false);
@ -2001,6 +1994,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return 0;
}
/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
*
* There is no error returned here because the driver should be able to handle
* 128 Byte cache lines, so we only print a warning in case issues are seen,
* specifically with Tx.
*/
static void ice_verify_cacheline_size(struct ice_pf *pf)
{
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
dev_warn(&pf->pdev->dev,
"%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
ICE_CACHE_LINE_BYTES);
}
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
ice_verify_cacheline_size(pf);
return 0;
err_alloc_sw_unroll:
@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev)
if (!pf)
return;
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
msleep(100);
}
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
@ -2509,31 +2526,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
return ret;
}
/**
* ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
* @vsi: the VSI being brought back up
*/
static int ice_restore_vlan(struct ice_vsi *vsi)
{
int err;
u16 vid;
if (!vsi->netdev)
return -EINVAL;
err = ice_vsi_vlan_setup(vsi);
if (err)
return err;
for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
if (err)
break;
}
return err;
}
/**
* ice_vsi_cfg - Setup the VSI
* @vsi: the VSI being configured
@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
err = ice_restore_vlan(vsi);
err = ice_vsi_vlan_setup(vsi);
if (err)
return err;
}
@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
enum ice_status ret;
int err;
int err, i;
if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery;
@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf)
}
ice_reset_all_vfs(pf, true);
for (i = 0; i < pf->num_alloc_vsi; i++) {
bool link_up;
if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
continue;
ice_get_link_status(pf->vsi[i]->port_info, &link_up);
if (link_up) {
netif_carrier_on(pf->vsi[i]->netdev);
netif_tx_wake_all_queues(pf->vsi[i]->netdev);
} else {
netif_carrier_off(pf->vsi[i]->netdev);
netif_tx_stop_all_queues(pf->vsi[i]->netdev);
}
}
/* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state);
return;

View File

@ -347,6 +347,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
}
}
/**
* ice_clear_all_vsi_ctx - clear all the VSI context entries
* @hw: pointer to the hw struct
*/
void ice_clear_all_vsi_ctx(struct ice_hw *hw)
{
u16 i;
for (i = 0; i < ICE_MAX_VSI; i++)
ice_clear_vsi_ctx(hw, i);
}
/**
* ice_add_vsi - add VSI context to the hardware and VSI handle list
* @hw: pointer to the hw struct

View File

@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
void ice_clear_all_vsi_ctx(struct ice_hw *hw);
/* Switch config */
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */

View File

@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
/* update gso_segs and bytecount */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount = (first->gso_segs - 1) * off->header_len;
first->bytecount += (first->gso_segs - 1) * off->header_len;
cd_tso_len = skb->len - off->header_len;
cd_mss = skb_shinfo(skb)->gso_size;
@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
* return (((size >> 12) * 85) >> 8) + 1;
* return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
*
* Since multiplication and division are commutative, we can reorder
* operations into:
* return ((size * 85) >> 20) + 1;
* return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
*/
static unsigned int ice_txd_use_count(unsigned int size)
{
return ((size * 85) >> 20) + 1;
return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
}
/**
@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
* + 1 desc for context descriptor,
* otherwise try next time
*/
if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
ICE_DESCS_FOR_CTX_DESC)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}

View File

@ -22,8 +22,21 @@
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
/* Tx Descriptors needed, worst case */
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
* size is 128 bytes. We do it this way because we do not want to read the
* GLPCI_CNF2 register or a variable containing the value on every pass through
* the Tx path.
*/
#define ICE_CACHE_LINE_BYTES 64
#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
sizeof(struct ice_tx_desc))
#define ICE_DESCS_FOR_CTX_DESC 1
#define ICE_DESCS_FOR_SKB_DATA_PTR 1
/* Tx descriptors needed, worst case */
#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
#define ICE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)

View File

@ -92,12 +92,12 @@ struct ice_link_status {
u64 phy_type_low;
u16 max_frame_size;
u16 link_speed;
u16 req_speeds;
u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
u8 ext_info;
u8 pacing;
u8 req_speeds;
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
* ice_aqc_get_phy_caps structure
*/

View File

@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
struct ice_vsi_ctx ctxt = { 0 };
enum ice_status status;
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
ICE_AQ_VSI_PVLAN_INSERT_PVID |
ICE_AQ_VSI_VLAN_EMOD_STR;
ctxt.info.pvid = cpu_to_le16(vid);
@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_vsi_add_vlan(vsi, vid)) {
vf->num_vlan++;
set_bit(vid, vsi->active_vlans);
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid))
@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
*/
if (!ice_vsi_kill_vlan(vsi, vid)) {
vf->num_vlan--;
clear_bit(vid, vsi->active_vlans);
/* Disable VLAN pruning when removing VLAN 0 */
if (unlikely(!vid))

View File

@ -53,13 +53,15 @@
* 2^40 * 10^-9 / 60 = 18.3 minutes.
*
* SYSTIM is converted to real time using a timecounter. As
* timecounter_cyc2time() allows old timestamps, the timecounter
* needs to be updated at least once per half of the SYSTIM interval.
* Scheduling of delayed work is not very accurate, so we aim for 8
* minutes to be sure the actual interval is shorter than 9.16 minutes.
* timecounter_cyc2time() allows old timestamps, the timecounter needs
* to be updated at least once per half of the SYSTIM interval.
* Scheduling of delayed work is not very accurate, and also the NIC
* clock can be adjusted to run up to 6% faster and the system clock
* up to 10% slower, so we aim for 6 minutes to be sure the actual
* interval in the NIC time is shorter than 9.16 minutes.
*/
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)

View File

@ -494,7 +494,7 @@ struct mvneta_port {
#if defined(__LITTLE_ENDIAN)
struct mvneta_tx_desc {
u32 command; /* Options used by HW for packet transmitting.*/
u16 reserverd1; /* csum_l4 (for future use) */
u16 reserved1; /* csum_l4 (for future use) */
u16 data_size; /* Data size of transmitted packet in bytes */
u32 buf_phys_addr; /* Physical addr of transmitted buffer */
u32 reserved2; /* hw_cmd - (for future use, PMT) */
@ -519,7 +519,7 @@ struct mvneta_rx_desc {
#else
struct mvneta_tx_desc {
u16 data_size; /* Data size of transmitted packet in bytes */
u16 reserverd1; /* csum_l4 (for future use) */
u16 reserved1; /* csum_l4 (for future use) */
u32 command; /* Options used by HW for packet transmitting.*/
u32 reserved2; /* hw_cmd - (for future use, PMT) */
u32 buf_phys_addr; /* Physical addr of transmitted buffer */

View File

@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
fcoe_pf_params->num_cqs,
p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
return -EINVAL;
rc = -EINVAL;
goto err;
}
p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
if (rc)
return rc;
goto err;
cxt_info.iid = dummy_cid;
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc) {
DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
dummy_cid);
return rc;
goto err;
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
return rc;
err:
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
static int

View File

@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}

View File

@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
qed_spq_return_entry(p_hwfn, p_ent);
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"%d is not supported yet\n",
p_filter_cmd->opcode);
qed_sp_destroy_request(p_hwfn, *pp_ent);
return -EINVAL;
}
@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
} else {
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
return rc;
goto err;
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
&abs_rx_q_id);
if (rc)
return rc;
goto err;
p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
(u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL);
err:
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,

View File

@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt,
default:
rc = -EINVAL;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
SET_FIELD(p_ramrod->flags1,

View File

@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
rc);
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}

View File

@ -167,6 +167,9 @@ struct qed_spq_entry {
enum spq_mode comp_mode;
struct qed_spq_comp_cb comp_cb;
struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
/* Posted entry for unlimited list entry in EBLOCK mode */
struct qed_spq_entry *post_ent;
};
struct qed_eq {
@ -396,6 +399,17 @@ struct qed_sp_init_data {
struct qed_spq_comp_cb *p_comp_data;
};
/**
* @brief Returns a SPQ entry to the pool / frees the entry if allocated.
* Should be called on in error flows after initializing the SPQ entry
* and before posting it.
*
* @param p_hwfn
* @param p_ent
*/
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd,

View File

@ -47,6 +47,19 @@
#include "qed_sp.h"
#include "qed_sriov.h"
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
/* qed_spq_get_entry() can either get an entry from the free_pool,
* or, if no entries are left, allocate a new entry and add it to
* the unlimited_pending list.
*/
if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
kfree(p_ent);
else
qed_spq_return_entry(p_hwfn, p_ent);
}
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
case QED_SPQ_MODE_BLOCK:
if (!p_data->p_comp_data)
return -EINVAL;
goto err;
p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
default:
DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
return -EINVAL;
goto err;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0;
err:
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)

View File

@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_ptt);
qed_ptt_release(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
goto out;
return 0;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1)
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
out:
qed_ptt_release(p_hwfn, p_ptt);
return 0;
return 0;
}
err:
qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
/* EBLOCK responsible to free the allocated p_ent */
if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
kfree(p_ent);
else
p_ent->post_ent = p_en2;
p_ent = p_en2;
}
@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
SPQ_HIGH_PRI_RESERVE_DEFAULT);
}
/* Avoid overriding of SPQ entries when getting out-of-order completions, by
* marking the completions in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
{
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
struct qed_spq *p_spq = p_hwfn->p_spq;
__set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
__clear_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
}
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) {
/* This is an allocated p_ent which does not need to
* return to pool.
*/
struct qed_spq_entry *p_post_ent = p_ent->post_ent;
kfree(p_ent);
return rc;
/* Return the entry which was actually posted */
p_ent = p_post_ent;
}
if (rc)
@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spq_post_fail2:
spin_lock_bh(&p_spq->lock);
list_del(&p_ent->list);
qed_chain_return_produced(&p_spq->chain);
qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
spq_post_fail:
/* return to the free pool */
@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
list_del(&p_ent->list);
/* Avoid overriding of SPQ entries when getting
* out-of-order completions, by marking the completions
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
__set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
__clear_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
qed_spq_comp_bmap_update(p_hwfn, echo);
p_spq->comp_count++;
found = p_ent;
break;
@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
QED_MSG_SPQ,
"Got a completion without a callback function\n");
if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
/* EBLOCK is responsible for returning its own entry into the
* free list, unless it originally added the entry into the
* unlimited pending list.
* free list.
*/
qed_spq_return_entry(p_hwfn, found);

View File

@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
default:
DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
p_hwfn->hw_info.personality);
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}

View File

@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
struct qlcnic_host_tx_ring *tx_ring)
{
u8 l4proto, opcode = 0, hdr_len = 0;
u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
tag_vlan = 1;
} else if (skb_vlan_tag_present(skb)) {
flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = skb_vlan_tag_get(skb);
tag_vlan = 1;
}
if (unlikely(adapter->tx_pvid)) {
if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
return -EIO;
if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
goto set_flags;
flags = QLCNIC_FLAGS_VLAN_OOB;

View File

@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct net_device *real_dev,
struct rmnet_endpoint *ep)
{
struct rmnet_priv *priv;
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
int rc;
if (ep->egress_dev)
@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
priv->real_dev = real_dev;
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
priv = netdev_priv(rmnet_dev);
priv->mux_id = id;
priv->real_dev = real_dev;
netdev_dbg(rmnet_dev, "rmnet dev created\n");
}

View File

@ -365,7 +365,8 @@ struct dma_features {
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
#define BUF_SIZE_16KiB 16384
#define BUF_SIZE_8KiB 8192
/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
#define BUF_SIZE_8KiB 8188
#define BUF_SIZE_4KiB 4096
#define BUF_SIZE_2KiB 2048

View File

@ -31,7 +31,7 @@
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
<< ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK);

View File

@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);

View File

@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
static int set_16kib_bfsize(int mtu)
{
int ret = 0;
if (unlikely(mtu >= BUF_SIZE_8KiB))
if (unlikely(mtu > BUF_SIZE_8KiB))
ret = BUF_SIZE_16KiB;
return ret;
}

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0+
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
@ -56,7 +56,7 @@
#define DRV_VERSION "v.1.1.4"
#define DRV_RELDATE "Oct 6 2018"
static char version[] =
static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
@ -784,7 +784,7 @@ static void fza_rx(struct net_device *dev)
static void fza_tx_smt(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
struct fza_buffer_tx __iomem *smt_tx_ptr;
int i, len;
u32 own;
@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev)
if (!netif_queue_stopped(dev)) {
if (dev_nit_active(dev)) {
struct fza_buffer_tx *skb_data_ptr;
struct sk_buff *skb;
/* Length must be a multiple of 4 as only word

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: GPL-2.0+ */
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
@ -235,6 +235,7 @@ struct fza_ring_cmd {
#define FZA_RING_CMD 0x200400 /* command ring address */
#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring
* size
*/
/* Command constants. */
#define FZA_RING_CMD_MASK 0x7fffffff
#define FZA_RING_CMD_NOP 0x00000000 /* nop */

View File

@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
return 0;
}
static int bcm5481x_config(struct phy_device *phydev)
static int bcm54xx_config_clock_delay(struct phy_device *phydev)
{
int rc, val;
@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
bcm5481x_config(phydev);
bcm54xx_config_clock_delay(phydev);
if (of_property_read_bool(np, "enet-phy-lane-swap")) {
/* Lane Swap - Undocumented register...magic! */
@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
static int bcm54616s_config_aneg(struct phy_device *phydev)
{
int ret;
/* Aneg firsly. */
ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
bcm54xx_config_clock_delay(phydev);
return ret;
}
static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
{
int val;
@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {

View File

@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
dev->net->min_mtu = ETH_MIN_MTU;
dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
pdata->dev = dev;

View File

@ -1166,8 +1166,8 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
break;
}
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS)) {
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
!(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);

View File

@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
}
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
void *arg)
void *arg,
struct inet_frag_queue **prev)
{
struct inet_frags *f = nf->f;
struct inet_frag_queue *q;
int err;
q = inet_frag_alloc(nf, f, arg);
if (!q)
if (!q) {
*prev = ERR_PTR(-ENOMEM);
return NULL;
}
mod_timer(&q->timer, jiffies + nf->timeout);
err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
f->rhash_params);
if (err < 0) {
*prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
&q->node, f->rhash_params);
if (*prev) {
q->flags |= INET_FRAG_COMPLETE;
inet_frag_kill(q);
inet_frag_destroy(q);
@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
{
struct inet_frag_queue *fq;
struct inet_frag_queue *fq = NULL, *prev;
if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
return NULL;
rcu_read_lock();
fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
if (fq) {
prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
if (!prev)
fq = inet_frag_create(nf, key, &prev);
if (prev && !IS_ERR(prev)) {
fq = prev;
if (!refcount_inc_not_zero(&fq->refcnt))
fq = NULL;
rcu_read_unlock();
return fq;
}
rcu_read_unlock();
return inet_frag_create(nf, key);
return fq;
}
EXPORT_SYMBOL(inet_frag_find);

View File

@ -258,7 +258,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
if (is_redirect) {
skb2->tc_redirected = 1;
skb2->tc_from_ingress = skb2->tc_at_ingress;
if (skb2->tc_from_ingress)
skb2->tstamp = 0;
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) {
res->ingress = want_ingress;

View File

@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
struct netlink_ext_ack *extack)
{
const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
int option_len, key_depth, msk_depth = 0;
int err, option_len, key_depth, msk_depth = 0;
err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
TCA_FLOWER_KEY_ENC_OPTS_MAX,
enc_opts_policy, extack);
if (err)
return err;
nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
TCA_FLOWER_KEY_ENC_OPTS_MAX,
enc_opts_policy, extack);
if (err)
return err;
nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
}

View File

@ -648,15 +648,6 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
*/
skb->dev = qdisc_dev(sch);
#ifdef CONFIG_NET_CLS_ACT
/*
* If it's at ingress let's pretend the delay is
* from the network (tstamp will be updated).
*/
if (skb->tc_redirected && skb->tc_from_ingress)
skb->tstamp = 0;
#endif
if (q->slot.slot_next) {
q->slot.packets_left--;
q->slot.bytes_left -= qdisc_pkt_len(skb);

View File

@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
if (msg_peer_stopping(hdr))
/* If peer is going down we want full re-establish cycle */
if (msg_peer_stopping(hdr)) {
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
else if ((mtyp == RESET_MSG) || !link_is_up(l))
break;
}
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
if (mtyp == RESET_MSG || !link_is_up(l))
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
/* ACTIVATE_MSG takes up link if it was already locally reset */
if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
l->peer_session = msg_session(hdr);