mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 06:10:54 +07:00
4af429d29b
vlan is a stacked device, like tunnels. We should use the lockless
mechanism we are using in tunnels and loopback.
This patch completely removes locking in TX path.
tx stat counters are added into existing percpu stat structure, renamed
from vlan_rx_stats to vlan_pcpu_stats.
Note : this partially reverts commit 2e59af3dcb
(vlan: multiqueue vlan
device)
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
91 lines
2.3 KiB
C
91 lines
2.3 KiB
C
#include <linux/skbuff.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/netpoll.h>
|
|
#include "vlan.h"
|
|
|
|
bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
|
|
{
|
|
struct sk_buff *skb = *skbp;
|
|
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
|
|
struct net_device *vlan_dev;
|
|
struct vlan_pcpu_stats *rx_stats;
|
|
|
|
vlan_dev = vlan_find_dev(skb->dev, vlan_id);
|
|
if (!vlan_dev) {
|
|
if (vlan_id)
|
|
skb->pkt_type = PACKET_OTHERHOST;
|
|
return false;
|
|
}
|
|
|
|
skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
|
|
if (unlikely(!skb))
|
|
return false;
|
|
|
|
skb->dev = vlan_dev;
|
|
skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
|
|
skb->vlan_tci = 0;
|
|
|
|
rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
|
|
|
|
u64_stats_update_begin(&rx_stats->syncp);
|
|
rx_stats->rx_packets++;
|
|
rx_stats->rx_bytes += skb->len;
|
|
|
|
switch (skb->pkt_type) {
|
|
case PACKET_BROADCAST:
|
|
break;
|
|
case PACKET_MULTICAST:
|
|
rx_stats->rx_multicast++;
|
|
break;
|
|
case PACKET_OTHERHOST:
|
|
/* Our lower layer thinks this is not local, let's make sure.
|
|
* This allows the VLAN to have a different MAC than the
|
|
* underlying device, and still route correctly. */
|
|
if (!compare_ether_addr(eth_hdr(skb)->h_dest,
|
|
vlan_dev->dev_addr))
|
|
skb->pkt_type = PACKET_HOST;
|
|
break;
|
|
}
|
|
u64_stats_update_end(&rx_stats->syncp);
|
|
|
|
return true;
|
|
}
|
|
|
|
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
|
|
{
|
|
return vlan_dev_info(dev)->real_dev;
|
|
}
|
|
EXPORT_SYMBOL(vlan_dev_real_dev);
|
|
|
|
u16 vlan_dev_vlan_id(const struct net_device *dev)
|
|
{
|
|
return vlan_dev_info(dev)->vlan_id;
|
|
}
|
|
EXPORT_SYMBOL(vlan_dev_vlan_id);
|
|
|
|
/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
|
|
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
|
|
u16 vlan_tci, int polling)
|
|
{
|
|
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
|
return polling ? netif_receive_skb(skb) : netif_rx(skb);
|
|
}
|
|
EXPORT_SYMBOL(__vlan_hwaccel_rx);
|
|
|
|
gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
|
|
unsigned int vlan_tci, struct sk_buff *skb)
|
|
{
|
|
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
|
return napi_gro_receive(napi, skb);
|
|
}
|
|
EXPORT_SYMBOL(vlan_gro_receive);
|
|
|
|
gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
|
|
unsigned int vlan_tci)
|
|
{
|
|
__vlan_hwaccel_put_tag(napi->skb, vlan_tci);
|
|
return napi_gro_frags(napi);
|
|
}
|
|
EXPORT_SYMBOL(vlan_gro_frags);
|