mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-17 11:29:00 +07:00
Merge branch 'hns3-fixes'
Huazhong Tan says: ==================== net: hns3: fixes for -net This patchset includes misc fixes for the HNS3 ethernet driver. [patch 1/3] fixes a TX queue not restarted problem. [patch 2/3] fixes a use-after-free issue. [patch 3/3] fixes a VF ID issue for setting VF VLAN. change log: V1->V2: keeps 'ring' as parameter in hns3_nic_maybe_stop_tx() in [patch 1/3], suggestted by David. rewrites [patch 2/3]'s commit log to make it be easier to understand, suggestted by David. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a116f4e238
@ -1287,30 +1287,25 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
|
||||
}
|
||||
|
||||
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||
struct sk_buff **out_skb)
|
||||
struct net_device *netdev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
|
||||
struct sk_buff *skb = *out_skb;
|
||||
unsigned int bd_num;
|
||||
|
||||
bd_num = hns3_tx_bd_num(skb, bd_size);
|
||||
if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
|
||||
struct sk_buff *new_skb;
|
||||
|
||||
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
|
||||
!hns3_skb_need_linearized(skb, bd_size, bd_num))
|
||||
goto out;
|
||||
|
||||
/* manual split the send packet */
|
||||
new_skb = skb_copy(skb, GFP_ATOMIC);
|
||||
if (!new_skb)
|
||||
if (__skb_linearize(skb))
|
||||
return -ENOMEM;
|
||||
dev_kfree_skb_any(skb);
|
||||
*out_skb = new_skb;
|
||||
|
||||
bd_num = hns3_tx_bd_count(new_skb->len);
|
||||
if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
|
||||
(!skb_is_gso(new_skb) &&
|
||||
bd_num = hns3_tx_bd_count(skb->len);
|
||||
if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
|
||||
(!skb_is_gso(skb) &&
|
||||
bd_num > HNS3_MAX_NON_TSO_BD_NUM))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1320,10 +1315,23 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||
}
|
||||
|
||||
out:
|
||||
if (unlikely(ring_space(ring) < bd_num))
|
||||
return -EBUSY;
|
||||
if (likely(ring_space(ring) >= bd_num))
|
||||
return bd_num;
|
||||
|
||||
return bd_num;
|
||||
netif_stop_subqueue(netdev, ring->queue_index);
|
||||
smp_mb(); /* Memory barrier before checking ring_space */
|
||||
|
||||
/* Start queue in case hns3_clean_tx_ring has just made room
|
||||
* available and has not seen the queue stopped state performed
|
||||
* by netif_stop_subqueue above.
|
||||
*/
|
||||
if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
|
||||
!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
|
||||
netif_start_subqueue(netdev, ring->queue_index);
|
||||
return bd_num;
|
||||
}
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
||||
@ -1400,13 +1408,13 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
/* Prefetch the data used later */
|
||||
prefetch(skb->data);
|
||||
|
||||
ret = hns3_nic_maybe_stop_tx(ring, &skb);
|
||||
ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
|
||||
if (unlikely(ret <= 0)) {
|
||||
if (ret == -EBUSY) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.tx_busy++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
goto out_net_tx_busy;
|
||||
return NETDEV_TX_BUSY;
|
||||
} else if (ret == -ENOMEM) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.sw_err_cnt++;
|
||||
@ -1457,12 +1465,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
out_err_tx_ok:
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
out_net_tx_busy:
|
||||
netif_stop_subqueue(netdev, ring->queue_index);
|
||||
smp_mb(); /* Commit all data before submit */
|
||||
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
|
||||
@ -2519,7 +2521,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
|
||||
dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
|
||||
netdev_tx_completed_queue(dev_queue, pkts, bytes);
|
||||
|
||||
if (unlikely(pkts && netif_carrier_ok(netdev) &&
|
||||
if (unlikely(netif_carrier_ok(netdev) &&
|
||||
ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
|
||||
/* Make sure that anybody stopping the queue after this
|
||||
* sees the new next_to_clean.
|
||||
|
@ -8438,13 +8438,16 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
|
||||
if (hdev->pdev->revision == 0x20)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
vport = hclge_get_vf_vport(hdev, vfid);
|
||||
if (!vport)
|
||||
return -EINVAL;
|
||||
|
||||
/* qos is a 3 bits value, so can not be bigger than 7 */
|
||||
if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
|
||||
if (vlan > VLAN_N_VID - 1 || qos > 7)
|
||||
return -EINVAL;
|
||||
if (proto != htons(ETH_P_8021Q))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
vport = &hdev->vport[vfid];
|
||||
state = hclge_get_port_base_vlan_state(vport,
|
||||
vport->port_base_vlan_cfg.state,
|
||||
vlan);
|
||||
@ -8455,21 +8458,12 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
|
||||
vlan_info.qos = qos;
|
||||
vlan_info.vlan_proto = ntohs(proto);
|
||||
|
||||
/* update port based VLAN for PF */
|
||||
if (!vfid) {
|
||||
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
||||
ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
|
||||
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
|
||||
return hclge_update_port_base_vlan_cfg(vport, state,
|
||||
&vlan_info);
|
||||
} else {
|
||||
ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
|
||||
(u8)vfid, state,
|
||||
vport->vport_id, state,
|
||||
vlan, qos,
|
||||
ntohs(proto));
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user