mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 16:30:53 +07:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== This series contains updates to e1000, igb, ixgbe and ixgbevf. Hong Zhiguo provides a fix for e1000 where tx_ring and adapter->tx_ring are already of type "struct e1000_tx_ring" so no need to divide by e1000_tx_ring size in the idx calculation. Emil provides a fix for ixgbevf to remove a redundant workaround related to header split and a fix for ixgbe to resolve an issue where the MTA table can be cleared when the interface is reset while in promisc mode. Todd provides a fix for igb to prevent ethtool from writing to the iNVM in i210/i211 devices. This issue was reported by Marek Vasut <marex@denx.de>. Anton Blanchard provides a fix for ixgbe to reduce memory consumption with larger page sizes, seen on PPC. Don provides a cleanup in ixgbe to replace the IXGBE_DESC_UNUSED macro with the inline function ixgbevf_desc_unused() to make the logic a bit more readable. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
90df06b8a2
@ -3917,8 +3917,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
|
||||
" next_to_watch <%x>\n"
|
||||
" jiffies <%lx>\n"
|
||||
" next_to_watch.status <%x>\n",
|
||||
(unsigned long)((tx_ring - adapter->tx_ring) /
|
||||
sizeof(struct e1000_tx_ring)),
|
||||
(unsigned long)(tx_ring - adapter->tx_ring),
|
||||
readl(hw->hw_addr + tx_ring->tdh),
|
||||
readl(hw->hw_addr + tx_ring->tdt),
|
||||
tx_ring->next_to_use,
|
||||
|
@ -771,8 +771,10 @@ static int igb_set_eeprom(struct net_device *netdev,
|
||||
if (eeprom->len == 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (hw->mac.type == e1000_i211)
|
||||
if ((hw->mac.type >= e1000_i210) &&
|
||||
!igb_get_flash_presence_i210(hw)) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
|
||||
return -EFAULT;
|
||||
|
@ -67,7 +67,11 @@
|
||||
#define IXGBE_MAX_TXD 4096
|
||||
#define IXGBE_MIN_TXD 64
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
#define IXGBE_DEFAULT_RXD 512
|
||||
#else
|
||||
#define IXGBE_DEFAULT_RXD 128
|
||||
#endif
|
||||
#define IXGBE_MAX_RXD 4096
|
||||
#define IXGBE_MIN_RXD 64
|
||||
|
||||
|
@ -3823,14 +3823,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
|
||||
if (netdev->flags & IFF_ALLMULTI) {
|
||||
fctrl |= IXGBE_FCTRL_MPE;
|
||||
vmolr |= IXGBE_VMOLR_MPE;
|
||||
} else {
|
||||
/*
|
||||
* Write addresses to the MTA, if the attempt fails
|
||||
* then we should just turn on promiscuous mode so
|
||||
* that we can at least receive multicast traffic
|
||||
*/
|
||||
hw->mac.ops.update_mc_addr_list(hw, netdev);
|
||||
vmolr |= IXGBE_VMOLR_ROMPE;
|
||||
}
|
||||
ixgbe_vlan_filter_enable(adapter);
|
||||
hw->addr_ctrl.user_set_promisc = false;
|
||||
@ -3847,6 +3839,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
|
||||
vmolr |= IXGBE_VMOLR_ROPE;
|
||||
}
|
||||
|
||||
/* Write addresses to the MTA, if the attempt fails
|
||||
* then we should just turn on promiscuous mode so
|
||||
* that we can at least receive multicast traffic
|
||||
*/
|
||||
hw->mac.ops.update_mc_addr_list(hw, netdev);
|
||||
vmolr |= IXGBE_VMOLR_ROMPE;
|
||||
|
||||
if (adapter->num_vfs)
|
||||
ixgbe_restore_vf_multicasts(adapter);
|
||||
|
||||
|
@ -286,9 +286,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
|
||||
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
|
||||
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
|
||||
|
||||
#define IXGBE_DESC_UNUSED(R) \
|
||||
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
|
||||
(R)->next_to_clean - (R)->next_to_use - 1)
|
||||
static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
|
||||
{
|
||||
u16 ntc = ring->next_to_clean;
|
||||
u16 ntu = ring->next_to_use;
|
||||
|
||||
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
|
||||
}
|
||||
|
||||
#define IXGBEVF_RX_DESC(R, i) \
|
||||
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
|
||||
|
@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
|
||||
|
||||
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
|
||||
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
|
||||
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
|
||||
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
|
||||
/* Make sure that anybody stopping the queue after this
|
||||
* sees the new next_to_clean.
|
||||
*/
|
||||
@ -497,15 +497,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
|
||||
total_rx_bytes += skb->len;
|
||||
total_rx_packets++;
|
||||
|
||||
/*
|
||||
* Work around issue of some types of VM to VM loop back
|
||||
* packets not getting split correctly
|
||||
*/
|
||||
if (staterr & IXGBE_RXD_STAT_LB) {
|
||||
u32 header_fixup_len = skb_headlen(skb);
|
||||
if (header_fixup_len < 14)
|
||||
skb_push(skb, header_fixup_len);
|
||||
}
|
||||
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
||||
|
||||
/* Workaround hardware that can't do proper VEPA multicast
|
||||
@ -538,7 +529,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
|
||||
}
|
||||
|
||||
rx_ring->next_to_clean = i;
|
||||
cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
|
||||
cleaned_count = ixgbevf_desc_unused(rx_ring);
|
||||
|
||||
if (cleaned_count)
|
||||
ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
|
||||
@ -1389,7 +1380,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
|
||||
ixgbevf_alloc_rx_buffers(adapter, ring,
|
||||
IXGBE_DESC_UNUSED(ring));
|
||||
ixgbevf_desc_unused(ring));
|
||||
}
|
||||
}
|
||||
|
||||
@ -3111,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
|
||||
|
||||
/* We need to check again in a case another CPU has just
|
||||
* made room available. */
|
||||
if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
|
||||
if (likely(ixgbevf_desc_unused(tx_ring) < size))
|
||||
return -EBUSY;
|
||||
|
||||
/* A reprieve! - use start_queue because it doesn't call schedule */
|
||||
@ -3122,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
|
||||
|
||||
static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
|
||||
{
|
||||
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
|
||||
if (likely(ixgbevf_desc_unused(tx_ring) >= size))
|
||||
return 0;
|
||||
return __ixgbevf_maybe_stop_tx(tx_ring, size);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user