mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 40GbE Intel Wired LAN Driver Updates 2017-05-31 This series contains updates to i40e and i40evf only. Jesse provides a couple of fixes, starting with cleaning up duplicate lines of code. Fixed a missing line which enables RSS as a negotiated feature. Since the VF does not have any way of reporting FCoE enabled, so just force the code to always report FCoE as disabled. Jake provides several fixes and changes, starting with fixing a race condition in i40e. The hardware has a limitation on transmit PTP packets, which requires us to limit the driver to timestamping a single packet at once. This is done using a state bitlock which enforces that only one timestamp request is honored at a time, unfortunately this suffers from a race condition. Fixed a corner case where we failed to cleanup the bit lock after a failed transmit, and resulted in a state bit being locked forever. Added a new statistic which tracks when a transmit timestamp request is skipped/ignored, since the driver can only handle one transmit timestamp request at a time. Christophe Jaillet fixes a NULL pointer dereference if kzalloc fails. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a99bbf6ed4
@ -502,10 +502,12 @@ struct i40e_pf {
|
||||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_caps;
|
||||
struct sk_buff *ptp_tx_skb;
|
||||
unsigned long ptp_tx_start;
|
||||
struct hwtstamp_config tstamp_config;
|
||||
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
|
||||
u64 ptp_base_adj;
|
||||
u32 tx_hwtstamp_timeouts;
|
||||
u32 tx_hwtstamp_skipped;
|
||||
u32 rx_hwtstamp_cleared;
|
||||
u32 latch_event_flags;
|
||||
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
|
||||
@ -955,7 +957,8 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
|
||||
struct i40e_dcbx_config *old_cfg,
|
||||
struct i40e_dcbx_config *new_cfg);
|
||||
#endif /* CONFIG_I40E_DCB */
|
||||
void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
|
||||
void i40e_ptp_rx_hang(struct i40e_pf *pf);
|
||||
void i40e_ptp_tx_hang(struct i40e_pf *pf);
|
||||
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
|
||||
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
|
||||
void i40e_ptp_set_increment(struct i40e_pf *pf);
|
||||
|
@ -595,6 +595,8 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
|
||||
size = sizeof(struct i40e_qvlist_info) +
|
||||
(sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
|
||||
ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
|
||||
if (!ldev->qvlist_info)
|
||||
return -ENOMEM;
|
||||
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
|
||||
|
||||
for (i = 0; i < qvlist_info->num_vectors; i++) {
|
||||
|
@ -147,6 +147,7 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
|
||||
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
|
||||
I40E_PF_STAT("arq_overflows", arq_overflows),
|
||||
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
|
||||
I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
|
||||
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
|
||||
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
|
||||
I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
|
||||
|
@ -6372,7 +6372,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
|
||||
i40e_update_veb_stats(pf->veb[i]);
|
||||
}
|
||||
|
||||
i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
|
||||
i40e_ptp_rx_hang(pf);
|
||||
i40e_ptp_tx_hang(pf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -269,6 +269,7 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
|
||||
|
||||
/**
|
||||
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
|
||||
* @pf: The PF private data structure
|
||||
* @vsi: The VSI with the rings relevant to 1588
|
||||
*
|
||||
* This watchdog task is scheduled to detect error case where hardware has
|
||||
@ -276,9 +277,8 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
|
||||
* particular error is rare but leaves the device in a state unable to timestamp
|
||||
* any future packets.
|
||||
**/
|
||||
void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
|
||||
void i40e_ptp_rx_hang(struct i40e_pf *pf)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
unsigned int i, cleared = 0;
|
||||
|
||||
@ -327,6 +327,36 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
|
||||
pf->rx_hwtstamp_cleared += cleared;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_ptp_tx_hang - Detect error case when Tx timestamp register is hung
|
||||
* @pf: The PF private data structure
|
||||
*
|
||||
* This watchdog task is run periodically to make sure that we clear the Tx
|
||||
* timestamp logic if we don't obtain a timestamp in a reasonable amount of
|
||||
* time. It is unexpected in the normal case but if it occurs it results in
|
||||
* permanently prevent timestamps of future packets
|
||||
**/
|
||||
void i40e_ptp_tx_hang(struct i40e_pf *pf)
|
||||
{
|
||||
if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
|
||||
return;
|
||||
|
||||
/* Nothing to do if we're not already waiting for a timestamp */
|
||||
if (!test_bit(__I40E_PTP_TX_IN_PROGRESS, pf->state))
|
||||
return;
|
||||
|
||||
/* We already have a handler routine which is run when we are notified
|
||||
* of a Tx timestamp in the hardware. If we don't get an interrupt
|
||||
* within a second it is reasonable to assume that we never will.
|
||||
*/
|
||||
if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
|
||||
dev_kfree_skb_any(pf->ptp_tx_skb);
|
||||
pf->ptp_tx_skb = NULL;
|
||||
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
|
||||
pf->tx_hwtstamp_timeouts++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
|
||||
* @pf: Board private structure
|
||||
@ -338,6 +368,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
|
||||
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
|
||||
{
|
||||
struct skb_shared_hwtstamps shhwtstamps;
|
||||
struct sk_buff *skb = pf->ptp_tx_skb;
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
u32 hi, lo;
|
||||
u64 ns;
|
||||
@ -353,12 +384,19 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
|
||||
hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
|
||||
|
||||
ns = (((u64)hi) << 32) | lo;
|
||||
|
||||
i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
|
||||
skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
|
||||
dev_kfree_skb_any(pf->ptp_tx_skb);
|
||||
|
||||
/* Clear the bit lock as soon as possible after reading the register,
|
||||
* and prior to notifying the stack via skb_tstamp_tx(). Otherwise
|
||||
* applications might wake up and attempt to request another transmit
|
||||
* timestamp prior to the bit lock being cleared.
|
||||
*/
|
||||
pf->ptp_tx_skb = NULL;
|
||||
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
|
||||
|
||||
/* Notify the stack and free the skb after we've unlocked */
|
||||
skb_tstamp_tx(skb, &shhwtstamps);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2628,8 +2628,10 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||
if (pf->ptp_tx &&
|
||||
!test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
pf->ptp_tx_start = jiffies;
|
||||
pf->ptp_tx_skb = skb_get(skb);
|
||||
} else {
|
||||
pf->tx_hwtstamp_skipped++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2932,10 +2934,12 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||
* @hdr_len: size of the packet header
|
||||
* @td_cmd: the command field in the descriptor
|
||||
* @td_offset: offset for checksum or crc
|
||||
*
|
||||
* Returns 0 on success, -1 on failure to DMA
|
||||
**/
|
||||
static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||
struct i40e_tx_buffer *first, u32 tx_flags,
|
||||
const u8 hdr_len, u32 td_cmd, u32 td_offset)
|
||||
static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||
struct i40e_tx_buffer *first, u32 tx_flags,
|
||||
const u8 hdr_len, u32 td_cmd, u32 td_offset)
|
||||
{
|
||||
unsigned int data_len = skb->data_len;
|
||||
unsigned int size = skb_headlen(skb);
|
||||
@ -3093,7 +3097,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
return;
|
||||
return 0;
|
||||
|
||||
dma_error:
|
||||
dev_info(tx_ring->dev, "TX DMA map failed\n");
|
||||
@ -3110,6 +3114,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
tx_ring->next_to_use = i;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3210,8 +3216,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||
*/
|
||||
i40e_atr(tx_ring, skb, tx_flags);
|
||||
|
||||
i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
|
||||
td_cmd, td_offset);
|
||||
if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
|
||||
td_cmd, td_offset))
|
||||
goto cleanup_tx_tstamp;
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
@ -3219,6 +3226,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||
i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
|
||||
dev_kfree_skb_any(first->skb);
|
||||
first->skb = NULL;
|
||||
cleanup_tx_tstamp:
|
||||
if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
|
||||
struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
|
||||
|
||||
dev_kfree_skb_any(pf->ptp_tx_skb);
|
||||
pf->ptp_tx_skb = NULL;
|
||||
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -1105,8 +1105,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
|
||||
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
|
||||
hw->dev_caps.dcb = msg->vf_offload_flags &
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_L2;
|
||||
hw->dev_caps.fcoe = (msg->vf_offload_flags &
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
|
||||
hw->dev_caps.fcoe = 0;
|
||||
for (i = 0; i < msg->num_vsis; i++) {
|
||||
if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
|
||||
ether_addr_copy(hw->mac.perm_addr,
|
||||
|
@ -79,7 +79,7 @@ enum i40e_virtchnl_ops {
|
||||
I40E_VIRTCHNL_OP_DEL_VLAN = 13,
|
||||
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
|
||||
I40E_VIRTCHNL_OP_GET_STATS = 15,
|
||||
I40E_VIRTCHNL_OP_FCOE = 16,
|
||||
I40E_VIRTCHNL_OP_RSVD = 16,
|
||||
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
|
||||
I40E_VIRTCHNL_OP_IWARP = 20,
|
||||
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
|
||||
@ -155,7 +155,6 @@ struct i40e_virtchnl_vsi_resource {
|
||||
/* VF offload flags */
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
|
||||
|
@ -152,9 +152,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
|
||||
{
|
||||
u32 caps;
|
||||
|
||||
adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
|
||||
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
|
||||
|
Loading…
Reference in New Issue
Block a user