mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 16:08:58 +07:00
da0f382029
Pull networking fixes from David Miller: "Lots of bug fixes here: 1) Out of bounds access in __bpf_skc_lookup, from Lorenz Bauer. 2) Fix rate reporting in cfg80211_calculate_bitrate_he(), from John Crispin. 3) Use after free in psock backlog workqueue, from John Fastabend. 4) Fix source port matching in fdb peer flow rule of mlx5, from Raed Salem. 5) Use atomic_inc_not_zero() in fl6_sock_lookup(), from Eric Dumazet. 6) Network header needs to be set for packet redirect in nfp, from John Hurley. 7) Fix udp zerocopy refcnt, from Willem de Bruijn. 8) Don't assume linear buffers in vxlan and geneve error handlers, from Stefano Brivio. 9) Fix TOS matching in mlxsw, from Jiri Pirko. 10) More SCTP cookie memory leak fixes, from Neil Horman. 11) Fix VLAN filtering in rtl8366, from Linus Walluij. 12) Various TCP SACK payload size and fragmentation memory limit fixes from Eric Dumazet. 13) Use after free in pneigh_get_next(), also from Eric Dumazet. 14) LAPB control block leak fix from Jeremy Sowden" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (145 commits) lapb: fixed leak of control-blocks. tipc: purge deferredq list for each grp member in tipc_group_delete ax25: fix inconsistent lock state in ax25_destroy_timer neigh: fix use-after-free read in pneigh_get_next tcp: fix compile error if !CONFIG_SYSCTL hv_sock: Suppress bogus "may be used uninitialized" warnings be2net: Fix number of Rx queues used for flow hashing net: handle 802.1P vlan 0 packets properly tcp: enforce tcp_min_snd_mss in tcp_mtu_probing() tcp: add tcp_min_snd_mss sysctl tcp: tcp_fragment() should apply sane memory limits tcp: limit payload size of sacked skbs Revert "net: phylink: set the autoneg state in phylink_phy_change" bpf: fix nested bpf tracepoints with per-cpu data bpf: Fix out of bounds memory access in bpf_sk_storage vsock/virtio: set SOCK_DONE on peer shutdown net: dsa: rtl8366: Fix up VLAN filtering net: phylink: set the autoneg state in phylink_phy_change net: add high_order_alloc_disable sysctl/static key tcp: add tcp_tx_skb_cache sysctl ...
297 lines
6.9 KiB
C
297 lines
6.9 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (c) 2007-2012 Nicira, Inc.
|
|
*/
|
|
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/etherdevice.h>
|
|
#include <linux/ethtool.h>
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <net/dst.h>
|
|
#include <net/xfrm.h>
|
|
#include <net/rtnetlink.h>
|
|
|
|
#include "datapath.h"
|
|
#include "vport-internal_dev.h"
|
|
#include "vport-netdev.h"
|
|
|
|
struct internal_dev {
|
|
struct vport *vport;
|
|
};
|
|
|
|
static struct vport_ops ovs_internal_vport_ops;
|
|
|
|
static struct internal_dev *internal_dev_priv(struct net_device *netdev)
|
|
{
|
|
return netdev_priv(netdev);
|
|
}
|
|
|
|
/* Called with rcu_read_lock_bh. */
|
|
static netdev_tx_t
|
|
internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
{
|
|
int len, err;
|
|
|
|
len = skb->len;
|
|
rcu_read_lock();
|
|
err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
|
|
rcu_read_unlock();
|
|
|
|
if (likely(!err)) {
|
|
struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);
|
|
|
|
u64_stats_update_begin(&tstats->syncp);
|
|
tstats->tx_bytes += len;
|
|
tstats->tx_packets++;
|
|
u64_stats_update_end(&tstats->syncp);
|
|
} else {
|
|
netdev->stats.tx_errors++;
|
|
}
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
static int internal_dev_open(struct net_device *netdev)
|
|
{
|
|
netif_start_queue(netdev);
|
|
return 0;
|
|
}
|
|
|
|
static int internal_dev_stop(struct net_device *netdev)
|
|
{
|
|
netif_stop_queue(netdev);
|
|
return 0;
|
|
}
|
|
|
|
static void internal_dev_getinfo(struct net_device *netdev,
|
|
struct ethtool_drvinfo *info)
|
|
{
|
|
strlcpy(info->driver, "openvswitch", sizeof(info->driver));
|
|
}
|
|
|
|
static const struct ethtool_ops internal_dev_ethtool_ops = {
|
|
.get_drvinfo = internal_dev_getinfo,
|
|
.get_link = ethtool_op_get_link,
|
|
};
|
|
|
|
static void internal_dev_destructor(struct net_device *dev)
|
|
{
|
|
struct vport *vport = ovs_internal_dev_get_vport(dev);
|
|
|
|
ovs_vport_free(vport);
|
|
}
|
|
|
|
static void
|
|
internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
{
|
|
int i;
|
|
|
|
memset(stats, 0, sizeof(*stats));
|
|
stats->rx_errors = dev->stats.rx_errors;
|
|
stats->tx_errors = dev->stats.tx_errors;
|
|
stats->tx_dropped = dev->stats.tx_dropped;
|
|
stats->rx_dropped = dev->stats.rx_dropped;
|
|
|
|
for_each_possible_cpu(i) {
|
|
const struct pcpu_sw_netstats *percpu_stats;
|
|
struct pcpu_sw_netstats local_stats;
|
|
unsigned int start;
|
|
|
|
percpu_stats = per_cpu_ptr(dev->tstats, i);
|
|
|
|
do {
|
|
start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
|
|
local_stats = *percpu_stats;
|
|
} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
|
|
|
|
stats->rx_bytes += local_stats.rx_bytes;
|
|
stats->rx_packets += local_stats.rx_packets;
|
|
stats->tx_bytes += local_stats.tx_bytes;
|
|
stats->tx_packets += local_stats.tx_packets;
|
|
}
|
|
}
|
|
|
|
static const struct net_device_ops internal_dev_netdev_ops = {
|
|
.ndo_open = internal_dev_open,
|
|
.ndo_stop = internal_dev_stop,
|
|
.ndo_start_xmit = internal_dev_xmit,
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
.ndo_get_stats64 = internal_get_stats,
|
|
};
|
|
|
|
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
|
|
.kind = "openvswitch",
|
|
};
|
|
|
|
static void do_setup(struct net_device *netdev)
|
|
{
|
|
ether_setup(netdev);
|
|
|
|
netdev->max_mtu = ETH_MAX_MTU;
|
|
|
|
netdev->netdev_ops = &internal_dev_netdev_ops;
|
|
|
|
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
|
|
IFF_NO_QUEUE;
|
|
netdev->needs_free_netdev = true;
|
|
netdev->priv_destructor = internal_dev_destructor;
|
|
netdev->ethtool_ops = &internal_dev_ethtool_ops;
|
|
netdev->rtnl_link_ops = &internal_dev_link_ops;
|
|
|
|
netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
|
|
NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
|
|
NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
|
|
|
|
netdev->vlan_features = netdev->features;
|
|
netdev->hw_enc_features = netdev->features;
|
|
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
|
|
netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
|
|
|
|
eth_hw_addr_random(netdev);
|
|
}
|
|
|
|
static struct vport *internal_dev_create(const struct vport_parms *parms)
|
|
{
|
|
struct vport *vport;
|
|
struct internal_dev *internal_dev;
|
|
struct net_device *dev;
|
|
int err;
|
|
bool free_vport = true;
|
|
|
|
vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
|
|
if (IS_ERR(vport)) {
|
|
err = PTR_ERR(vport);
|
|
goto error;
|
|
}
|
|
|
|
dev = alloc_netdev(sizeof(struct internal_dev),
|
|
parms->name, NET_NAME_USER, do_setup);
|
|
vport->dev = dev;
|
|
if (!vport->dev) {
|
|
err = -ENOMEM;
|
|
goto error_free_vport;
|
|
}
|
|
vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
|
if (!vport->dev->tstats) {
|
|
err = -ENOMEM;
|
|
goto error_free_netdev;
|
|
}
|
|
|
|
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
|
|
internal_dev = internal_dev_priv(vport->dev);
|
|
internal_dev->vport = vport;
|
|
|
|
/* Restrict bridge port to current netns. */
|
|
if (vport->port_no == OVSP_LOCAL)
|
|
vport->dev->features |= NETIF_F_NETNS_LOCAL;
|
|
|
|
rtnl_lock();
|
|
err = register_netdevice(vport->dev);
|
|
if (err) {
|
|
free_vport = false;
|
|
goto error_unlock;
|
|
}
|
|
|
|
dev_set_promiscuity(vport->dev, 1);
|
|
rtnl_unlock();
|
|
netif_start_queue(vport->dev);
|
|
|
|
return vport;
|
|
|
|
error_unlock:
|
|
rtnl_unlock();
|
|
free_percpu(dev->tstats);
|
|
error_free_netdev:
|
|
free_netdev(dev);
|
|
error_free_vport:
|
|
if (free_vport)
|
|
ovs_vport_free(vport);
|
|
error:
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
static void internal_dev_destroy(struct vport *vport)
|
|
{
|
|
netif_stop_queue(vport->dev);
|
|
rtnl_lock();
|
|
dev_set_promiscuity(vport->dev, -1);
|
|
|
|
/* unregister_netdevice() waits for an RCU grace period. */
|
|
unregister_netdevice(vport->dev);
|
|
free_percpu(vport->dev->tstats);
|
|
rtnl_unlock();
|
|
}
|
|
|
|
static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
|
|
{
|
|
struct net_device *netdev = skb->dev;
|
|
struct pcpu_sw_netstats *stats;
|
|
|
|
if (unlikely(!(netdev->flags & IFF_UP))) {
|
|
kfree_skb(skb);
|
|
netdev->stats.rx_dropped++;
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
skb_dst_drop(skb);
|
|
nf_reset(skb);
|
|
secpath_reset(skb);
|
|
|
|
skb->pkt_type = PACKET_HOST;
|
|
skb->protocol = eth_type_trans(skb, netdev);
|
|
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
|
|
|
stats = this_cpu_ptr(netdev->tstats);
|
|
u64_stats_update_begin(&stats->syncp);
|
|
stats->rx_packets++;
|
|
stats->rx_bytes += skb->len;
|
|
u64_stats_update_end(&stats->syncp);
|
|
|
|
netif_rx(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
static struct vport_ops ovs_internal_vport_ops = {
|
|
.type = OVS_VPORT_TYPE_INTERNAL,
|
|
.create = internal_dev_create,
|
|
.destroy = internal_dev_destroy,
|
|
.send = internal_dev_recv,
|
|
};
|
|
|
|
int ovs_is_internal_dev(const struct net_device *netdev)
|
|
{
|
|
return netdev->netdev_ops == &internal_dev_netdev_ops;
|
|
}
|
|
|
|
struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
|
|
{
|
|
if (!ovs_is_internal_dev(netdev))
|
|
return NULL;
|
|
|
|
return internal_dev_priv(netdev)->vport;
|
|
}
|
|
|
|
int ovs_internal_dev_rtnl_link_register(void)
|
|
{
|
|
int err;
|
|
|
|
err = rtnl_link_register(&internal_dev_link_ops);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
err = ovs_vport_ops_register(&ovs_internal_vport_ops);
|
|
if (err < 0)
|
|
rtnl_link_unregister(&internal_dev_link_ops);
|
|
|
|
return err;
|
|
}
|
|
|
|
void ovs_internal_dev_rtnl_link_unregister(void)
|
|
{
|
|
ovs_vport_ops_unregister(&ovs_internal_vport_ops);
|
|
rtnl_link_unregister(&internal_dev_link_ops);
|
|
}
|