2012-10-01 19:32:35 +07:00
|
|
|
/*
|
2012-11-13 20:29:15 +07:00
|
|
|
* VXLAN: Virtual eXtensible Local Area Network
|
2012-10-01 19:32:35 +07:00
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Vyatta Inc.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* TODO
|
|
|
|
* - use IANA UDP port number (when defined)
|
|
|
|
* - IPv6 (not in RFC)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/udp.h>
|
|
|
|
#include <linux/igmp.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/hash.h>
|
2013-01-30 06:43:07 +07:00
|
|
|
#include <linux/ethtool.h>
|
2012-11-20 09:50:14 +07:00
|
|
|
#include <net/arp.h>
|
|
|
|
#include <net/ndisc.h>
|
2012-10-01 19:32:35 +07:00
|
|
|
#include <net/ip.h>
|
2013-03-25 21:49:35 +07:00
|
|
|
#include <net/ip_tunnels.h>
|
2012-10-01 19:32:35 +07:00
|
|
|
#include <net/icmp.h>
|
|
|
|
#include <net/udp.h>
|
|
|
|
#include <net/rtnetlink.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <net/dsfield.h>
|
|
|
|
#include <net/inet_ecn.h>
|
|
|
|
#include <net/net_namespace.h>
|
|
|
|
#include <net/netns/generic.h>
|
|
|
|
|
|
|
|
#define VXLAN_VERSION "0.1"
|
|
|
|
|
|
|
|
#define VNI_HASH_BITS 10
|
|
|
|
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
|
|
|
|
#define FDB_HASH_BITS 8
|
|
|
|
#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
|
|
|
|
#define FDB_AGE_DEFAULT 300 /* 5 min */
|
|
|
|
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
|
|
|
|
|
|
|
|
#define VXLAN_N_VID (1u << 24)
|
|
|
|
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
|
2012-11-09 20:35:24 +07:00
|
|
|
/* IP header + UDP + VXLAN + Ethernet header */
|
|
|
|
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
|
|
|
|
|
|
|
|
/* VXLAN protocol header */
|
|
|
|
struct vxlanhdr {
|
|
|
|
__be32 vx_flags;
|
|
|
|
__be32 vx_vni;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* UDP port for VXLAN traffic. */
|
|
|
|
static unsigned int vxlan_port __read_mostly = 8472;
|
|
|
|
module_param_named(udp_port, vxlan_port, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(udp_port, "Destination UDP port");
|
|
|
|
|
|
|
|
static bool log_ecn_error = true;
|
|
|
|
module_param(log_ecn_error, bool, 0644);
|
|
|
|
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
|
|
|
|
|
|
|
|
/* per-net private data for this module */
|
|
|
|
static unsigned int vxlan_net_id;
|
|
|
|
struct vxlan_net {
|
|
|
|
struct socket *sock; /* UDP encap socket */
|
|
|
|
struct hlist_head vni_list[VNI_HASH_SIZE];
|
|
|
|
};
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
struct vxlan_rdst {
|
|
|
|
struct rcu_head rcu;
|
|
|
|
__be32 remote_ip;
|
|
|
|
__be16 remote_port;
|
|
|
|
u32 remote_vni;
|
|
|
|
u32 remote_ifindex;
|
|
|
|
struct vxlan_rdst *remote_next;
|
|
|
|
};
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
/* Forwarding table entry */
|
|
|
|
struct vxlan_fdb {
|
|
|
|
struct hlist_node hlist; /* linked list of entries */
|
|
|
|
struct rcu_head rcu;
|
|
|
|
unsigned long updated; /* jiffies */
|
|
|
|
unsigned long used;
|
2013-03-15 11:35:51 +07:00
|
|
|
struct vxlan_rdst remote;
|
2012-10-01 19:32:35 +07:00
|
|
|
u16 state; /* see ndm_state */
|
|
|
|
u8 eth_addr[ETH_ALEN];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Pseudo network device */
|
|
|
|
struct vxlan_dev {
|
|
|
|
struct hlist_node hlist;
|
|
|
|
struct net_device *dev;
|
|
|
|
__u32 vni; /* virtual network id */
|
|
|
|
__be32 gaddr; /* multicast group */
|
|
|
|
__be32 saddr; /* source address */
|
|
|
|
unsigned int link; /* link to multicast over */
|
2012-10-10 03:35:50 +07:00
|
|
|
__u16 port_min; /* source port range */
|
|
|
|
__u16 port_max;
|
2012-10-01 19:32:35 +07:00
|
|
|
__u8 tos; /* TOS override */
|
|
|
|
__u8 ttl;
|
2012-11-20 09:50:14 +07:00
|
|
|
u32 flags; /* VXLAN_F_* below */
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
unsigned long age_interval;
|
|
|
|
struct timer_list age_timer;
|
|
|
|
spinlock_t hash_lock;
|
|
|
|
unsigned int addrcnt;
|
|
|
|
unsigned int addrmax;
|
|
|
|
|
|
|
|
struct hlist_head fdb_head[FDB_HASH_SIZE];
|
|
|
|
};
|
|
|
|
|
2012-11-20 09:50:14 +07:00
|
|
|
#define VXLAN_F_LEARN 0x01
|
|
|
|
#define VXLAN_F_PROXY 0x02
|
|
|
|
#define VXLAN_F_RSC 0x04
|
|
|
|
#define VXLAN_F_L2MISS 0x08
|
|
|
|
#define VXLAN_F_L3MISS 0x10
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
/* salt for hash table */
|
|
|
|
static u32 vxlan_salt __read_mostly;
|
|
|
|
|
|
|
|
static inline struct hlist_head *vni_head(struct net *net, u32 id)
|
|
|
|
{
|
|
|
|
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
|
|
|
|
|
|
|
return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look up VNI in a per net namespace table */
|
|
|
|
static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan;
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
|
2012-10-01 19:32:35 +07:00
|
|
|
if (vxlan->vni == id)
|
|
|
|
return vxlan;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill in neighbour message in skbuff. */
|
|
|
|
static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
|
|
|
|
const struct vxlan_fdb *fdb,
|
2013-03-15 11:35:51 +07:00
|
|
|
u32 portid, u32 seq, int type, unsigned int flags,
|
|
|
|
const struct vxlan_rdst *rdst)
|
2012-10-01 19:32:35 +07:00
|
|
|
{
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
struct nda_cacheinfo ci;
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct ndmsg *ndm;
|
2012-11-20 09:50:14 +07:00
|
|
|
bool send_ip, send_eth;
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
|
|
|
|
if (nlh == NULL)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
ndm = nlmsg_data(nlh);
|
|
|
|
memset(ndm, 0, sizeof(*ndm));
|
2012-11-20 09:50:14 +07:00
|
|
|
|
|
|
|
send_eth = send_ip = true;
|
|
|
|
|
|
|
|
if (type == RTM_GETNEIGH) {
|
|
|
|
ndm->ndm_family = AF_INET;
|
2013-03-15 11:35:51 +07:00
|
|
|
send_ip = rdst->remote_ip != htonl(INADDR_ANY);
|
2012-11-20 09:50:14 +07:00
|
|
|
send_eth = !is_zero_ether_addr(fdb->eth_addr);
|
|
|
|
} else
|
|
|
|
ndm->ndm_family = AF_BRIDGE;
|
2012-10-01 19:32:35 +07:00
|
|
|
ndm->ndm_state = fdb->state;
|
|
|
|
ndm->ndm_ifindex = vxlan->dev->ifindex;
|
|
|
|
ndm->ndm_flags = NTF_SELF;
|
|
|
|
ndm->ndm_type = NDA_DST;
|
|
|
|
|
2012-11-20 09:50:14 +07:00
|
|
|
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
|
2012-10-01 19:32:35 +07:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (rdst->remote_port && rdst->remote_port != vxlan_port &&
|
|
|
|
nla_put_be16(skb, NDA_PORT, rdst->remote_port))
|
|
|
|
goto nla_put_failure;
|
|
|
|
if (rdst->remote_vni != vxlan->vni &&
|
|
|
|
nla_put_be32(skb, NDA_VNI, rdst->remote_vni))
|
|
|
|
goto nla_put_failure;
|
|
|
|
if (rdst->remote_ifindex &&
|
|
|
|
nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
|
2012-10-01 19:32:35 +07:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
|
|
|
|
ci.ndm_confirmed = 0;
|
|
|
|
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
|
|
|
|
ci.ndm_refcnt = 0;
|
|
|
|
|
|
|
|
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
return nlmsg_end(skb, nlh);
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t vxlan_nlmsg_size(void)
|
|
|
|
{
|
|
|
|
return NLMSG_ALIGN(sizeof(struct ndmsg))
|
|
|
|
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
|
|
|
|
+ nla_total_size(sizeof(__be32)) /* NDA_DST */
|
2013-03-15 11:35:51 +07:00
|
|
|
+ nla_total_size(sizeof(__be32)) /* NDA_PORT */
|
|
|
|
+ nla_total_size(sizeof(__be32)) /* NDA_VNI */
|
|
|
|
+ nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
|
2012-10-01 19:32:35 +07:00
|
|
|
+ nla_total_size(sizeof(struct nda_cacheinfo));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
|
|
|
|
const struct vxlan_fdb *fdb, int type)
|
|
|
|
{
|
|
|
|
struct net *net = dev_net(vxlan->dev);
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int err = -ENOBUFS;
|
|
|
|
|
|
|
|
skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
|
|
|
|
if (skb == NULL)
|
|
|
|
goto errout;
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, &fdb->remote);
|
2012-10-01 19:32:35 +07:00
|
|
|
if (err < 0) {
|
|
|
|
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
|
|
|
|
WARN_ON(err == -EMSGSIZE);
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
|
|
|
|
return;
|
|
|
|
errout:
|
|
|
|
if (err < 0)
|
|
|
|
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
|
|
|
|
}
|
|
|
|
|
2012-11-20 09:50:14 +07:00
|
|
|
static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct vxlan_fdb f;
|
|
|
|
|
|
|
|
memset(&f, 0, sizeof f);
|
|
|
|
f.state = NUD_STALE;
|
2013-03-15 11:35:51 +07:00
|
|
|
f.remote.remote_ip = ipa; /* goes to NDA_DST */
|
|
|
|
f.remote.remote_vni = VXLAN_N_VID;
|
2012-11-20 09:50:14 +07:00
|
|
|
|
|
|
|
vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
|
|
|
|
{
|
|
|
|
struct vxlan_fdb f;
|
|
|
|
|
|
|
|
memset(&f, 0, sizeof f);
|
|
|
|
f.state = NUD_STALE;
|
|
|
|
memcpy(f.eth_addr, eth_addr, ETH_ALEN);
|
|
|
|
|
|
|
|
vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
|
|
|
|
}
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
/* Hash Ethernet address */
|
|
|
|
static u32 eth_hash(const unsigned char *addr)
|
|
|
|
{
|
|
|
|
u64 value = get_unaligned((u64 *)addr);
|
|
|
|
|
|
|
|
/* only want 6 bytes */
|
|
|
|
#ifdef __BIG_ENDIAN
|
|
|
|
value >>= 16;
|
2012-10-10 03:35:47 +07:00
|
|
|
#else
|
|
|
|
value <<= 16;
|
2012-10-01 19:32:35 +07:00
|
|
|
#endif
|
|
|
|
return hash_64(value, FDB_HASH_BITS);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hash chain to use given mac address */
|
|
|
|
static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
|
|
|
|
const u8 *mac)
|
|
|
|
{
|
|
|
|
return &vxlan->fdb_head[eth_hash(mac)];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look up Ethernet address in forwarding table */
|
|
|
|
static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
|
|
|
|
const u8 *mac)
|
|
|
|
|
|
|
|
{
|
|
|
|
struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
|
|
|
|
struct vxlan_fdb *f;
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry_rcu(f, head, hlist) {
|
2012-10-01 19:32:35 +07:00
|
|
|
if (compare_ether_addr(mac, f->eth_addr) == 0)
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
/* Add/update destinations for multicast */
|
|
|
|
static int vxlan_fdb_append(struct vxlan_fdb *f,
|
|
|
|
__be32 ip, __u32 port, __u32 vni, __u32 ifindex)
|
|
|
|
{
|
|
|
|
struct vxlan_rdst *rd_prev, *rd;
|
|
|
|
|
|
|
|
rd_prev = NULL;
|
|
|
|
for (rd = &f->remote; rd; rd = rd->remote_next) {
|
|
|
|
if (rd->remote_ip == ip &&
|
|
|
|
rd->remote_port == port &&
|
|
|
|
rd->remote_vni == vni &&
|
|
|
|
rd->remote_ifindex == ifindex)
|
|
|
|
return 0;
|
|
|
|
rd_prev = rd;
|
|
|
|
}
|
|
|
|
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
|
|
|
|
if (rd == NULL)
|
|
|
|
return -ENOBUFS;
|
|
|
|
rd->remote_ip = ip;
|
|
|
|
rd->remote_port = port;
|
|
|
|
rd->remote_vni = vni;
|
|
|
|
rd->remote_ifindex = ifindex;
|
|
|
|
rd->remote_next = NULL;
|
|
|
|
rd_prev->remote_next = rd;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
/* Add new entry to forwarding table -- assumes lock held */
|
|
|
|
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
|
|
|
|
const u8 *mac, __be32 ip,
|
2013-03-15 11:35:51 +07:00
|
|
|
__u16 state, __u16 flags,
|
|
|
|
__u32 port, __u32 vni, __u32 ifindex)
|
2012-10-01 19:32:35 +07:00
|
|
|
{
|
|
|
|
struct vxlan_fdb *f;
|
|
|
|
int notify = 0;
|
|
|
|
|
|
|
|
f = vxlan_find_mac(vxlan, mac);
|
|
|
|
if (f) {
|
|
|
|
if (flags & NLM_F_EXCL) {
|
|
|
|
netdev_dbg(vxlan->dev,
|
|
|
|
"lost race to create %pM\n", mac);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
if (f->state != state) {
|
|
|
|
f->state = state;
|
|
|
|
f->updated = jiffies;
|
|
|
|
notify = 1;
|
|
|
|
}
|
2013-03-15 11:35:51 +07:00
|
|
|
if ((flags & NLM_F_APPEND) &&
|
|
|
|
is_multicast_ether_addr(f->eth_addr)) {
|
|
|
|
int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
|
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
notify |= rc;
|
|
|
|
}
|
2012-10-01 19:32:35 +07:00
|
|
|
} else {
|
|
|
|
if (!(flags & NLM_F_CREATE))
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
|
|
|
|
f = kmalloc(sizeof(*f), GFP_ATOMIC);
|
|
|
|
if (!f)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
notify = 1;
|
2013-03-15 11:35:51 +07:00
|
|
|
f->remote.remote_ip = ip;
|
|
|
|
f->remote.remote_port = port;
|
|
|
|
f->remote.remote_vni = vni;
|
|
|
|
f->remote.remote_ifindex = ifindex;
|
|
|
|
f->remote.remote_next = NULL;
|
2012-10-01 19:32:35 +07:00
|
|
|
f->state = state;
|
|
|
|
f->updated = f->used = jiffies;
|
|
|
|
memcpy(f->eth_addr, mac, ETH_ALEN);
|
|
|
|
|
|
|
|
++vxlan->addrcnt;
|
|
|
|
hlist_add_head_rcu(&f->hlist,
|
|
|
|
vxlan_fdb_head(vxlan, mac));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (notify)
|
|
|
|
vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
void vxlan_fdb_free(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
|
|
|
|
|
|
|
|
while (f->remote.remote_next) {
|
|
|
|
struct vxlan_rdst *rd = f->remote.remote_next;
|
|
|
|
|
|
|
|
f->remote.remote_next = rd->remote_next;
|
|
|
|
kfree(rd);
|
|
|
|
}
|
|
|
|
kfree(f);
|
|
|
|
}
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
|
|
|
|
{
|
|
|
|
netdev_dbg(vxlan->dev,
|
|
|
|
"delete %pM\n", f->eth_addr);
|
|
|
|
|
|
|
|
--vxlan->addrcnt;
|
|
|
|
vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
|
|
|
|
|
|
|
|
hlist_del_rcu(&f->hlist);
|
2013-03-15 11:35:51 +07:00
|
|
|
call_rcu(&f->rcu, vxlan_fdb_free);
|
2012-10-01 19:32:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Add static entry (via netlink) */
|
|
|
|
static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
|
|
|
|
struct net_device *dev,
|
|
|
|
const unsigned char *addr, u16 flags)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
2013-03-15 11:35:51 +07:00
|
|
|
struct net *net = dev_net(vxlan->dev);
|
2012-10-01 19:32:35 +07:00
|
|
|
__be32 ip;
|
2013-03-15 11:35:51 +07:00
|
|
|
u32 port, vni, ifindex;
|
2012-10-01 19:32:35 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
|
|
|
|
pr_info("RTM_NEWNEIGH with invalid state %#x\n",
|
|
|
|
ndm->ndm_state);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb[NDA_DST] == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (nla_len(tb[NDA_DST]) != sizeof(__be32))
|
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
|
|
|
|
ip = nla_get_be32(tb[NDA_DST]);
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
if (tb[NDA_PORT]) {
|
|
|
|
if (nla_len(tb[NDA_PORT]) != sizeof(u32))
|
|
|
|
return -EINVAL;
|
|
|
|
port = nla_get_u32(tb[NDA_PORT]);
|
|
|
|
} else
|
|
|
|
port = vxlan_port;
|
|
|
|
|
|
|
|
if (tb[NDA_VNI]) {
|
|
|
|
if (nla_len(tb[NDA_VNI]) != sizeof(u32))
|
|
|
|
return -EINVAL;
|
|
|
|
vni = nla_get_u32(tb[NDA_VNI]);
|
|
|
|
} else
|
|
|
|
vni = vxlan->vni;
|
|
|
|
|
|
|
|
if (tb[NDA_IFINDEX]) {
|
2013-03-26 15:29:30 +07:00
|
|
|
struct net_device *tdev;
|
2013-03-15 11:35:51 +07:00
|
|
|
|
|
|
|
if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
|
|
|
|
return -EINVAL;
|
|
|
|
ifindex = nla_get_u32(tb[NDA_IFINDEX]);
|
2013-03-26 15:29:30 +07:00
|
|
|
tdev = dev_get_by_index(net, ifindex);
|
|
|
|
if (!tdev)
|
2013-03-15 11:35:51 +07:00
|
|
|
return -EADDRNOTAVAIL;
|
2013-03-26 15:29:30 +07:00
|
|
|
dev_put(tdev);
|
2013-03-15 11:35:51 +07:00
|
|
|
} else
|
|
|
|
ifindex = 0;
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
spin_lock_bh(&vxlan->hash_lock);
|
2013-03-15 11:35:51 +07:00
|
|
|
err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags, port,
|
|
|
|
vni, ifindex);
|
2012-10-01 19:32:35 +07:00
|
|
|
spin_unlock_bh(&vxlan->hash_lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete entry (via netlink) */
|
2013-02-13 19:00:18 +07:00
|
|
|
static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
|
|
|
|
struct net_device *dev,
|
2012-10-01 19:32:35 +07:00
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct vxlan_fdb *f;
|
|
|
|
int err = -ENOENT;
|
|
|
|
|
|
|
|
spin_lock_bh(&vxlan->hash_lock);
|
|
|
|
f = vxlan_find_mac(vxlan, addr);
|
|
|
|
if (f) {
|
|
|
|
vxlan_fdb_destroy(vxlan, f);
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&vxlan->hash_lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Dump forwarding table */
|
|
|
|
static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|
|
|
struct net_device *dev, int idx)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
unsigned int h;
|
|
|
|
|
|
|
|
for (h = 0; h < FDB_HASH_SIZE; ++h) {
|
|
|
|
struct vxlan_fdb *f;
|
|
|
|
int err;
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
|
2013-03-15 11:35:51 +07:00
|
|
|
struct vxlan_rdst *rd;
|
|
|
|
for (rd = &f->remote; rd; rd = rd->remote_next) {
|
|
|
|
if (idx < cb->args[0])
|
|
|
|
goto skip;
|
|
|
|
|
|
|
|
err = vxlan_fdb_info(skb, vxlan, f,
|
|
|
|
NETLINK_CB(cb->skb).portid,
|
|
|
|
cb->nlh->nlmsg_seq,
|
|
|
|
RTM_NEWNEIGH,
|
|
|
|
NLM_F_MULTI, rd);
|
|
|
|
if (err < 0)
|
|
|
|
break;
|
2012-10-01 19:32:35 +07:00
|
|
|
skip:
|
2013-03-15 11:35:51 +07:00
|
|
|
++idx;
|
|
|
|
}
|
2012-10-01 19:32:35 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Watch incoming packets to learn mapping between Ethernet address
|
|
|
|
* and Tunnel endpoint.
|
|
|
|
*/
|
|
|
|
static void vxlan_snoop(struct net_device *dev,
|
|
|
|
__be32 src_ip, const u8 *src_mac)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct vxlan_fdb *f;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
f = vxlan_find_mac(vxlan, src_mac);
|
|
|
|
if (likely(f)) {
|
|
|
|
f->used = jiffies;
|
2013-03-15 11:35:51 +07:00
|
|
|
if (likely(f->remote.remote_ip == src_ip))
|
2012-10-01 19:32:35 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (net_ratelimit())
|
|
|
|
netdev_info(dev,
|
|
|
|
"%pM migrated from %pI4 to %pI4\n",
|
2013-03-15 11:35:51 +07:00
|
|
|
src_mac, &f->remote.remote_ip, &src_ip);
|
2012-10-01 19:32:35 +07:00
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
f->remote.remote_ip = src_ip;
|
2012-10-01 19:32:35 +07:00
|
|
|
f->updated = jiffies;
|
|
|
|
} else {
|
|
|
|
/* learned new entry */
|
|
|
|
spin_lock(&vxlan->hash_lock);
|
|
|
|
err = vxlan_fdb_create(vxlan, src_mac, src_ip,
|
|
|
|
NUD_REACHABLE,
|
2013-03-15 11:35:51 +07:00
|
|
|
NLM_F_EXCL|NLM_F_CREATE,
|
|
|
|
vxlan_port, vxlan->vni, 0);
|
2012-10-01 19:32:35 +07:00
|
|
|
spin_unlock(&vxlan->hash_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* See if multicast group is already in use by other ID */
|
|
|
|
static bool vxlan_group_used(struct vxlan_net *vn,
|
|
|
|
const struct vxlan_dev *this)
|
|
|
|
{
|
|
|
|
const struct vxlan_dev *vxlan;
|
|
|
|
unsigned h;
|
|
|
|
|
|
|
|
for (h = 0; h < VNI_HASH_SIZE; ++h)
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
|
2012-10-01 19:32:35 +07:00
|
|
|
if (vxlan == this)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!netif_running(vxlan->dev))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (vxlan->gaddr == this->gaddr)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* kernel equivalent to IP_ADD_MEMBERSHIP */
|
|
|
|
static int vxlan_join_group(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
|
|
|
|
struct sock *sk = vn->sock->sk;
|
|
|
|
struct ip_mreqn mreq = {
|
2012-12-20 10:36:08 +07:00
|
|
|
.imr_multiaddr.s_addr = vxlan->gaddr,
|
|
|
|
.imr_ifindex = vxlan->link,
|
2012-10-01 19:32:35 +07:00
|
|
|
};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Already a member of group */
|
|
|
|
if (vxlan_group_used(vn, vxlan))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Need to drop RTNL to call multicast join */
|
|
|
|
rtnl_unlock();
|
|
|
|
lock_sock(sk);
|
|
|
|
err = ip_mc_join_group(sk, &mreq);
|
|
|
|
release_sock(sk);
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* kernel equivalent to IP_DROP_MEMBERSHIP */
|
|
|
|
static int vxlan_leave_group(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
|
|
|
|
int err = 0;
|
|
|
|
struct sock *sk = vn->sock->sk;
|
|
|
|
struct ip_mreqn mreq = {
|
2012-12-20 10:36:08 +07:00
|
|
|
.imr_multiaddr.s_addr = vxlan->gaddr,
|
|
|
|
.imr_ifindex = vxlan->link,
|
2012-10-01 19:32:35 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Only leave group when last vxlan is done. */
|
|
|
|
if (vxlan_group_used(vn, vxlan))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Need to drop RTNL to call multicast leave */
|
|
|
|
rtnl_unlock();
|
|
|
|
lock_sock(sk);
|
|
|
|
err = ip_mc_leave_group(sk, &mreq);
|
|
|
|
release_sock(sk);
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Callback from net/ipv4/udp.c to receive packets */
|
|
|
|
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct iphdr *oip;
|
|
|
|
struct vxlanhdr *vxh;
|
|
|
|
struct vxlan_dev *vxlan;
|
2013-03-25 21:49:46 +07:00
|
|
|
struct pcpu_tstats *stats;
|
2012-10-01 19:32:35 +07:00
|
|
|
__u32 vni;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* pop off outer UDP header */
|
|
|
|
__skb_pull(skb, sizeof(struct udphdr));
|
|
|
|
|
|
|
|
/* Need Vxlan and inner Ethernet header to be present */
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* Drop packets with reserved bits set */
|
|
|
|
vxh = (struct vxlanhdr *) skb->data;
|
|
|
|
if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
|
|
|
|
(vxh->vx_vni & htonl(0xff))) {
|
|
|
|
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
|
|
|
|
ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
__skb_pull(skb, sizeof(struct vxlanhdr));
|
|
|
|
|
|
|
|
/* Is this VNI defined? */
|
|
|
|
vni = ntohl(vxh->vx_vni) >> 8;
|
|
|
|
vxlan = vxlan_find_vni(sock_net(sk), vni);
|
|
|
|
if (!vxlan) {
|
|
|
|
netdev_dbg(skb->dev, "unknown vni %d\n", vni);
|
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pskb_may_pull(skb, ETH_HLEN)) {
|
|
|
|
vxlan->dev->stats.rx_length_errors++;
|
|
|
|
vxlan->dev->stats.rx_errors++;
|
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
|
2012-11-20 09:50:14 +07:00
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
/* Re-examine inner Ethernet packet */
|
|
|
|
oip = ip_hdr(skb);
|
|
|
|
skb->protocol = eth_type_trans(skb, vxlan->dev);
|
|
|
|
|
|
|
|
/* Ignore packet loops (and multicast echo) */
|
|
|
|
if (compare_ether_addr(eth_hdr(skb)->h_source,
|
|
|
|
vxlan->dev->dev_addr) == 0)
|
|
|
|
goto drop;
|
|
|
|
|
2012-11-20 09:50:14 +07:00
|
|
|
if (vxlan->flags & VXLAN_F_LEARN)
|
2012-10-01 19:32:35 +07:00
|
|
|
vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
|
|
|
|
|
|
|
|
__skb_tunnel_rx(skb, vxlan->dev);
|
|
|
|
skb_reset_network_header(skb);
|
2012-12-07 21:14:18 +07:00
|
|
|
|
|
|
|
/* If the NIC driver gave us an encapsulated packet with
|
|
|
|
* CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
|
|
|
|
* leave the CHECKSUM_UNNECESSARY, the device checksummed it
|
|
|
|
* for us. Otherwise force the upper layers to verify it.
|
|
|
|
*/
|
|
|
|
if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
|
|
|
|
!(vxlan->dev->features & NETIF_F_RXCSUM))
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
|
|
|
|
skb->encapsulation = 0;
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
err = IP_ECN_decapsulate(oip, skb);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
if (log_ecn_error)
|
|
|
|
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
|
|
|
|
&oip->saddr, oip->tos);
|
|
|
|
if (err > 1) {
|
|
|
|
++vxlan->dev->stats.rx_frame_errors;
|
|
|
|
++vxlan->dev->stats.rx_errors;
|
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-25 21:49:46 +07:00
|
|
|
stats = this_cpu_ptr(vxlan->dev->tstats);
|
2012-10-01 19:32:35 +07:00
|
|
|
u64_stats_update_begin(&stats->syncp);
|
|
|
|
stats->rx_packets++;
|
|
|
|
stats->rx_bytes += skb->len;
|
|
|
|
u64_stats_update_end(&stats->syncp);
|
|
|
|
|
|
|
|
netif_rx(skb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
error:
|
|
|
|
/* Put UDP header back */
|
|
|
|
__skb_push(skb, sizeof(struct udphdr));
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
drop:
|
|
|
|
/* Consume bad packet */
|
|
|
|
kfree_skb(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-11-20 09:50:14 +07:00
|
|
|
static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct arphdr *parp;
|
|
|
|
u8 *arpptr, *sha;
|
|
|
|
__be32 sip, tip;
|
|
|
|
struct neighbour *n;
|
|
|
|
|
|
|
|
if (dev->flags & IFF_NOARP)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
|
|
|
|
dev->stats.tx_dropped++;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
parp = arp_hdr(skb);
|
|
|
|
|
|
|
|
if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
|
|
|
|
parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
|
|
|
|
parp->ar_pro != htons(ETH_P_IP) ||
|
|
|
|
parp->ar_op != htons(ARPOP_REQUEST) ||
|
|
|
|
parp->ar_hln != dev->addr_len ||
|
|
|
|
parp->ar_pln != 4)
|
|
|
|
goto out;
|
|
|
|
arpptr = (u8 *)parp + sizeof(struct arphdr);
|
|
|
|
sha = arpptr;
|
|
|
|
arpptr += dev->addr_len; /* sha */
|
|
|
|
memcpy(&sip, arpptr, sizeof(sip));
|
|
|
|
arpptr += sizeof(sip);
|
|
|
|
arpptr += dev->addr_len; /* tha */
|
|
|
|
memcpy(&tip, arpptr, sizeof(tip));
|
|
|
|
|
|
|
|
if (ipv4_is_loopback(tip) ||
|
|
|
|
ipv4_is_multicast(tip))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
n = neigh_lookup(&arp_tbl, &tip, dev);
|
|
|
|
|
|
|
|
if (n) {
|
|
|
|
struct vxlan_fdb *f;
|
|
|
|
struct sk_buff *reply;
|
|
|
|
|
|
|
|
if (!(n->nud_state & NUD_CONNECTED)) {
|
|
|
|
neigh_release(n);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
f = vxlan_find_mac(vxlan, n->ha);
|
2013-03-15 11:35:51 +07:00
|
|
|
if (f && f->remote.remote_ip == htonl(INADDR_ANY)) {
|
2012-11-20 09:50:14 +07:00
|
|
|
/* bridge-local neighbor */
|
|
|
|
neigh_release(n);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
|
|
|
|
n->ha, sha);
|
|
|
|
|
|
|
|
neigh_release(n);
|
|
|
|
|
|
|
|
skb_reset_mac_header(reply);
|
|
|
|
__skb_pull(reply, skb_network_offset(reply));
|
|
|
|
reply->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
reply->pkt_type = PACKET_HOST;
|
|
|
|
|
|
|
|
if (netif_rx_ni(reply) == NET_RX_DROP)
|
|
|
|
dev->stats.rx_dropped++;
|
|
|
|
} else if (vxlan->flags & VXLAN_F_L3MISS)
|
|
|
|
vxlan_ip_miss(dev, tip);
|
|
|
|
out:
|
|
|
|
consume_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct neighbour *n;
|
|
|
|
struct iphdr *pip;
|
|
|
|
|
|
|
|
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
n = NULL;
|
|
|
|
switch (ntohs(eth_hdr(skb)->h_proto)) {
|
|
|
|
case ETH_P_IP:
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
|
|
|
return false;
|
|
|
|
pip = ip_hdr(skb);
|
|
|
|
n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n) {
|
|
|
|
bool diff;
|
|
|
|
|
|
|
|
diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
|
|
|
|
if (diff) {
|
|
|
|
memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
|
|
|
|
dev->addr_len);
|
|
|
|
memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
|
|
|
|
}
|
|
|
|
neigh_release(n);
|
|
|
|
return diff;
|
|
|
|
} else if (vxlan->flags & VXLAN_F_L3MISS)
|
|
|
|
vxlan_ip_miss(dev, pip->daddr);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-10-10 03:35:49 +07:00
|
|
|
static void vxlan_sock_free(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
sock_put(skb->sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* On transmit, associate with the tunnel socket */
|
|
|
|
static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
|
|
|
|
struct sock *sk = vn->sock->sk;
|
|
|
|
|
|
|
|
skb_orphan(skb);
|
|
|
|
sock_hold(sk);
|
|
|
|
skb->sk = sk;
|
|
|
|
skb->destructor = vxlan_sock_free;
|
|
|
|
}
|
|
|
|
|
2012-10-10 03:35:50 +07:00
|
|
|
/* Compute source port for outgoing packet
|
|
|
|
* first choice to use L4 flow hash since it will spread
|
|
|
|
* better and maybe available from hardware
|
|
|
|
* secondary choice is to use jhash on the Ethernet header
|
|
|
|
*/
|
|
|
|
static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
|
|
|
|
u32 hash;
|
|
|
|
|
|
|
|
hash = skb_get_rxhash(skb);
|
|
|
|
if (!hash)
|
|
|
|
hash = jhash(skb->data, 2 * ETH_ALEN,
|
|
|
|
(__force u32) skb->protocol);
|
|
|
|
|
|
|
|
return (((u64) hash * range) >> 32) + vxlan->port_min;
|
|
|
|
}
|
|
|
|
|
2013-03-07 20:22:36 +07:00
|
|
|
static int handle_offloads(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (skb_is_gso(skb)) {
|
|
|
|
int err = skb_unclone(skb, GFP_ATOMIC);
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
skb_shinfo(skb)->gso_type |= (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP);
|
|
|
|
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-02 19:31:52 +07:00
|
|
|
/* Bypass encapsulation if the destination is local */
|
|
|
|
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|
|
|
struct vxlan_dev *dst_vxlan)
|
|
|
|
{
|
|
|
|
struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
|
|
|
|
struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
|
|
|
|
|
|
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
skb->encapsulation = 0;
|
|
|
|
skb->dev = dst_vxlan->dev;
|
|
|
|
__skb_pull(skb, skb_network_offset(skb));
|
|
|
|
|
|
|
|
if (dst_vxlan->flags & VXLAN_F_LEARN)
|
|
|
|
vxlan_snoop(skb->dev, INADDR_LOOPBACK, eth_hdr(skb)->h_source);
|
|
|
|
|
|
|
|
u64_stats_update_begin(&tx_stats->syncp);
|
|
|
|
tx_stats->tx_packets++;
|
|
|
|
tx_stats->tx_bytes += skb->len;
|
|
|
|
u64_stats_update_end(&tx_stats->syncp);
|
|
|
|
|
|
|
|
if (netif_rx(skb) == NET_RX_SUCCESS) {
|
|
|
|
u64_stats_update_begin(&rx_stats->syncp);
|
|
|
|
rx_stats->rx_packets++;
|
|
|
|
rx_stats->rx_bytes += skb->len;
|
|
|
|
u64_stats_update_end(&rx_stats->syncp);
|
|
|
|
} else {
|
|
|
|
skb->dev->stats.rx_dropped++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct vxlan_rdst *rdst, bool did_rsc)
|
2012-10-01 19:32:35 +07:00
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct rtable *rt;
|
|
|
|
const struct iphdr *old_iph;
|
|
|
|
struct iphdr *iph;
|
|
|
|
struct vxlanhdr *vxh;
|
|
|
|
struct udphdr *uh;
|
|
|
|
struct flowi4 fl4;
|
|
|
|
__be32 dst;
|
2013-03-15 11:35:51 +07:00
|
|
|
__u16 src_port, dst_port;
|
|
|
|
u32 vni;
|
2012-10-01 19:32:35 +07:00
|
|
|
__be16 df = 0;
|
|
|
|
__u8 tos, ttl;
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
dst_port = rdst->remote_port ? rdst->remote_port : vxlan_port;
|
|
|
|
vni = rdst->remote_vni;
|
|
|
|
dst = rdst->remote_ip;
|
2012-11-20 09:50:14 +07:00
|
|
|
|
|
|
|
if (!dst) {
|
|
|
|
if (did_rsc) {
|
|
|
|
/* short-circuited back to local bridge */
|
2013-04-02 19:31:52 +07:00
|
|
|
vxlan_encap_bypass(skb, vxlan, vxlan);
|
2012-11-20 09:50:14 +07:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
2012-10-10 03:35:46 +07:00
|
|
|
goto drop;
|
2012-11-20 09:50:14 +07:00
|
|
|
}
|
2012-10-10 03:35:46 +07:00
|
|
|
|
2012-12-07 21:14:16 +07:00
|
|
|
if (!skb->encapsulation) {
|
|
|
|
skb_reset_inner_headers(skb);
|
|
|
|
skb->encapsulation = 1;
|
|
|
|
}
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
/* Need space for new headers (invalidates iph ptr) */
|
|
|
|
if (skb_cow_head(skb, VXLAN_HEADROOM))
|
|
|
|
goto drop;
|
|
|
|
|
|
|
|
old_iph = ip_hdr(skb);
|
|
|
|
|
|
|
|
ttl = vxlan->ttl;
|
|
|
|
if (!ttl && IN_MULTICAST(ntohl(dst)))
|
|
|
|
ttl = 1;
|
|
|
|
|
|
|
|
tos = vxlan->tos;
|
|
|
|
if (tos == 1)
|
2013-03-25 21:49:53 +07:00
|
|
|
tos = ip_tunnel_get_dsfield(old_iph, skb);
|
2012-10-01 19:32:35 +07:00
|
|
|
|
2012-10-10 03:35:50 +07:00
|
|
|
src_port = vxlan_src_port(vxlan, skb);
|
2012-10-01 19:32:35 +07:00
|
|
|
|
2012-10-10 03:35:48 +07:00
|
|
|
memset(&fl4, 0, sizeof(fl4));
|
2013-03-15 11:35:51 +07:00
|
|
|
fl4.flowi4_oif = rdst->remote_ifindex;
|
2012-10-10 03:35:48 +07:00
|
|
|
fl4.flowi4_tos = RT_TOS(tos);
|
|
|
|
fl4.daddr = dst;
|
|
|
|
fl4.saddr = vxlan->saddr;
|
|
|
|
|
|
|
|
rt = ip_route_output_key(dev_net(dev), &fl4);
|
2012-10-01 19:32:35 +07:00
|
|
|
if (IS_ERR(rt)) {
|
|
|
|
netdev_dbg(dev, "no route to %pI4\n", &dst);
|
|
|
|
dev->stats.tx_carrier_errors++;
|
|
|
|
goto tx_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rt->dst.dev == dev) {
|
|
|
|
netdev_dbg(dev, "circular route to %pI4\n", &dst);
|
|
|
|
ip_rt_put(rt);
|
|
|
|
dev->stats.collisions++;
|
|
|
|
goto tx_error;
|
|
|
|
}
|
|
|
|
|
2013-04-02 19:31:52 +07:00
|
|
|
/* Bypass encapsulation if the destination is local */
|
|
|
|
if (rt->rt_flags & RTCF_LOCAL) {
|
|
|
|
struct vxlan_dev *dst_vxlan;
|
|
|
|
|
|
|
|
ip_rt_put(rt);
|
|
|
|
dst_vxlan = vxlan_find_vni(dev_net(dev), vni);
|
|
|
|
if (!dst_vxlan)
|
|
|
|
goto tx_error;
|
|
|
|
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
|
|
|
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
|
|
|
|
IPSKB_REROUTED);
|
|
|
|
skb_dst_drop(skb);
|
|
|
|
skb_dst_set(skb, &rt->dst);
|
|
|
|
|
|
|
|
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
|
|
|
|
vxh->vx_flags = htonl(VXLAN_FLAGS);
|
2013-03-15 11:35:51 +07:00
|
|
|
vxh->vx_vni = htonl(vni << 8);
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
__skb_push(skb, sizeof(*uh));
|
|
|
|
skb_reset_transport_header(skb);
|
|
|
|
uh = udp_hdr(skb);
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
uh->dest = htons(dst_port);
|
2012-10-10 03:35:50 +07:00
|
|
|
uh->source = htons(src_port);
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
uh->len = htons(skb->len);
|
|
|
|
uh->check = 0;
|
|
|
|
|
|
|
|
__skb_push(skb, sizeof(*iph));
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
iph = ip_hdr(skb);
|
|
|
|
iph->version = 4;
|
|
|
|
iph->ihl = sizeof(struct iphdr) >> 2;
|
|
|
|
iph->frag_off = df;
|
|
|
|
iph->protocol = IPPROTO_UDP;
|
2013-03-25 21:49:53 +07:00
|
|
|
iph->tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
|
2012-10-10 03:35:48 +07:00
|
|
|
iph->daddr = dst;
|
2012-10-01 19:32:35 +07:00
|
|
|
iph->saddr = fl4.saddr;
|
|
|
|
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
|
2013-02-22 14:30:40 +07:00
|
|
|
tunnel_ip_select_ident(skb, old_iph, &rt->dst);
|
2012-10-01 19:32:35 +07:00
|
|
|
|
2013-03-04 13:07:34 +07:00
|
|
|
nf_reset(skb);
|
|
|
|
|
2012-10-10 03:35:49 +07:00
|
|
|
vxlan_set_owner(dev, skb);
|
|
|
|
|
2013-03-07 20:22:36 +07:00
|
|
|
if (handle_offloads(skb))
|
|
|
|
goto drop;
|
2012-10-01 19:32:35 +07:00
|
|
|
|
2013-03-09 23:38:39 +07:00
|
|
|
iptunnel_xmit(skb, dev);
|
2012-10-01 19:32:35 +07:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
drop:
|
|
|
|
dev->stats.tx_dropped++;
|
|
|
|
goto tx_free;
|
|
|
|
|
|
|
|
tx_error:
|
|
|
|
dev->stats.tx_errors++;
|
|
|
|
tx_free:
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2013-03-15 11:35:51 +07:00
|
|
|
/* Transmit local packets over Vxlan
|
|
|
|
*
|
|
|
|
* Outer IP header inherits ECN and DF from inner header.
|
|
|
|
* Outer UDP destination is the VXLAN assigned port.
|
|
|
|
* source port is based on hash of flow
|
|
|
|
*/
|
|
|
|
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
struct ethhdr *eth;
|
|
|
|
bool did_rsc = false;
|
|
|
|
struct vxlan_rdst group, *rdst0, *rdst;
|
|
|
|
struct vxlan_fdb *f;
|
|
|
|
int rc1, rc;
|
|
|
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
eth = eth_hdr(skb);
|
|
|
|
|
|
|
|
if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
|
|
|
|
return arp_reduce(dev, skb);
|
|
|
|
else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
|
|
|
|
did_rsc = route_shortcircuit(dev, skb);
|
|
|
|
|
|
|
|
f = vxlan_find_mac(vxlan, eth->h_dest);
|
|
|
|
if (f == NULL) {
|
|
|
|
did_rsc = false;
|
|
|
|
group.remote_port = vxlan_port;
|
|
|
|
group.remote_vni = vxlan->vni;
|
|
|
|
group.remote_ip = vxlan->gaddr;
|
|
|
|
group.remote_ifindex = vxlan->link;
|
|
|
|
group.remote_next = 0;
|
|
|
|
rdst0 = &group;
|
|
|
|
|
|
|
|
if (group.remote_ip == htonl(INADDR_ANY) &&
|
|
|
|
(vxlan->flags & VXLAN_F_L2MISS) &&
|
|
|
|
!is_multicast_ether_addr(eth->h_dest))
|
|
|
|
vxlan_fdb_miss(vxlan, eth->h_dest);
|
|
|
|
} else
|
|
|
|
rdst0 = &f->remote;
|
|
|
|
|
|
|
|
rc = NETDEV_TX_OK;
|
|
|
|
|
|
|
|
/* if there are multiple destinations, send copies */
|
|
|
|
for (rdst = rdst0->remote_next; rdst; rdst = rdst->remote_next) {
|
|
|
|
struct sk_buff *skb1;
|
|
|
|
|
|
|
|
skb1 = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
|
|
|
|
if (rc == NETDEV_TX_OK)
|
|
|
|
rc = rc1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
|
|
|
|
if (rc == NETDEV_TX_OK)
|
|
|
|
rc = rc1;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
/* Walk the forwarding table and purge stale entries */
|
|
|
|
static void vxlan_cleanup(unsigned long arg)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
|
|
|
|
unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
|
|
|
|
unsigned int h;
|
|
|
|
|
|
|
|
if (!netif_running(vxlan->dev))
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_bh(&vxlan->hash_lock);
|
|
|
|
for (h = 0; h < FDB_HASH_SIZE; ++h) {
|
|
|
|
struct hlist_node *p, *n;
|
|
|
|
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
|
|
|
|
struct vxlan_fdb *f
|
|
|
|
= container_of(p, struct vxlan_fdb, hlist);
|
|
|
|
unsigned long timeout;
|
|
|
|
|
2012-10-26 13:24:34 +07:00
|
|
|
if (f->state & NUD_PERMANENT)
|
2012-10-01 19:32:35 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
timeout = f->used + vxlan->age_interval * HZ;
|
|
|
|
if (time_before_eq(timeout, jiffies)) {
|
|
|
|
netdev_dbg(vxlan->dev,
|
|
|
|
"garbage collect %pM\n",
|
|
|
|
f->eth_addr);
|
|
|
|
f->state = NUD_STALE;
|
|
|
|
vxlan_fdb_destroy(vxlan, f);
|
|
|
|
} else if (time_before(timeout, next_timer))
|
|
|
|
next_timer = timeout;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&vxlan->hash_lock);
|
|
|
|
|
|
|
|
mod_timer(&vxlan->age_timer, next_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup stats when device is created */
|
|
|
|
static int vxlan_init(struct net_device *dev)
|
|
|
|
{
|
2013-03-25 21:49:46 +07:00
|
|
|
dev->tstats = alloc_percpu(struct pcpu_tstats);
|
|
|
|
if (!dev->tstats)
|
2012-10-01 19:32:35 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Start ageing timer and join group when device is brought up */
|
|
|
|
static int vxlan_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (vxlan->gaddr) {
|
|
|
|
err = vxlan_join_group(dev);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vxlan->age_interval)
|
|
|
|
mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Purge the forwarding table */
|
|
|
|
static void vxlan_flush(struct vxlan_dev *vxlan)
|
|
|
|
{
|
|
|
|
unsigned h;
|
|
|
|
|
|
|
|
spin_lock_bh(&vxlan->hash_lock);
|
|
|
|
for (h = 0; h < FDB_HASH_SIZE; ++h) {
|
|
|
|
struct hlist_node *p, *n;
|
|
|
|
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
|
|
|
|
struct vxlan_fdb *f
|
|
|
|
= container_of(p, struct vxlan_fdb, hlist);
|
|
|
|
vxlan_fdb_destroy(vxlan, f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&vxlan->hash_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cleanup timer and forwarding table on shutdown */
|
|
|
|
static int vxlan_stop(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (vxlan->gaddr)
|
|
|
|
vxlan_leave_group(dev);
|
|
|
|
|
|
|
|
del_timer_sync(&vxlan->age_timer);
|
|
|
|
|
|
|
|
vxlan_flush(vxlan);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stub, nothing needs to be done. */
|
|
|
|
static void vxlan_set_multicast_list(struct net_device *dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_device_ops vxlan_netdev_ops = {
|
|
|
|
.ndo_init = vxlan_init,
|
|
|
|
.ndo_open = vxlan_open,
|
|
|
|
.ndo_stop = vxlan_stop,
|
|
|
|
.ndo_start_xmit = vxlan_xmit,
|
2013-03-25 21:49:46 +07:00
|
|
|
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
2012-10-01 19:32:35 +07:00
|
|
|
.ndo_set_rx_mode = vxlan_set_multicast_list,
|
|
|
|
.ndo_change_mtu = eth_change_mtu,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
|
|
.ndo_fdb_add = vxlan_fdb_add,
|
|
|
|
.ndo_fdb_del = vxlan_fdb_delete,
|
|
|
|
.ndo_fdb_dump = vxlan_fdb_dump,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Info for udev, that this is a virtual tunnel endpoint */
|
|
|
|
static struct device_type vxlan_type = {
|
|
|
|
.name = "vxlan",
|
|
|
|
};
|
|
|
|
|
|
|
|
static void vxlan_free(struct net_device *dev)
|
|
|
|
{
|
2013-03-25 21:49:46 +07:00
|
|
|
free_percpu(dev->tstats);
|
2012-10-01 19:32:35 +07:00
|
|
|
free_netdev(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the device structure. */
|
|
|
|
static void vxlan_setup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
unsigned h;
|
2012-10-10 03:35:50 +07:00
|
|
|
int low, high;
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
eth_hw_addr_random(dev);
|
|
|
|
ether_setup(dev);
|
2012-10-10 03:35:51 +07:00
|
|
|
dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
dev->netdev_ops = &vxlan_netdev_ops;
|
|
|
|
dev->destructor = vxlan_free;
|
|
|
|
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
|
|
|
|
|
|
|
|
dev->tx_queue_len = 0;
|
|
|
|
dev->features |= NETIF_F_LLTX;
|
|
|
|
dev->features |= NETIF_F_NETNS_LOCAL;
|
2012-12-07 21:14:16 +07:00
|
|
|
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
|
2012-12-07 21:14:18 +07:00
|
|
|
dev->features |= NETIF_F_RXCSUM;
|
2013-03-07 20:22:36 +07:00
|
|
|
dev->features |= NETIF_F_GSO_SOFTWARE;
|
2012-12-07 21:14:18 +07:00
|
|
|
|
|
|
|
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
2013-03-07 20:22:36 +07:00
|
|
|
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
|
2012-10-01 19:32:35 +07:00
|
|
|
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
2012-12-31 19:00:21 +07:00
|
|
|
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
spin_lock_init(&vxlan->hash_lock);
|
|
|
|
|
|
|
|
init_timer_deferrable(&vxlan->age_timer);
|
|
|
|
vxlan->age_timer.function = vxlan_cleanup;
|
|
|
|
vxlan->age_timer.data = (unsigned long) vxlan;
|
|
|
|
|
2012-10-10 03:35:50 +07:00
|
|
|
inet_get_local_port_range(&low, &high);
|
|
|
|
vxlan->port_min = low;
|
|
|
|
vxlan->port_max = high;
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
vxlan->dev = dev;
|
|
|
|
|
|
|
|
for (h = 0; h < FDB_HASH_SIZE; ++h)
|
|
|
|
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
|
|
|
|
[IFLA_VXLAN_ID] = { .type = NLA_U32 },
|
|
|
|
[IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
|
|
|
|
[IFLA_VXLAN_LINK] = { .type = NLA_U32 },
|
|
|
|
[IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
|
|
|
|
[IFLA_VXLAN_TOS] = { .type = NLA_U8 },
|
|
|
|
[IFLA_VXLAN_TTL] = { .type = NLA_U8 },
|
|
|
|
[IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
|
|
|
|
[IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
|
|
|
|
[IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
|
2012-10-10 03:35:50 +07:00
|
|
|
[IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
|
2012-11-20 09:50:14 +07:00
|
|
|
[IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
|
|
|
|
[IFLA_VXLAN_RSC] = { .type = NLA_U8 },
|
|
|
|
[IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
|
|
|
|
[IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
|
2012-10-01 19:32:35 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
|
|
|
|
{
|
|
|
|
if (tb[IFLA_ADDRESS]) {
|
|
|
|
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
|
|
|
|
pr_debug("invalid link address (not ethernet)\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
|
|
|
|
pr_debug("invalid all zero ethernet address\n");
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_ID]) {
|
|
|
|
__u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
|
|
|
|
if (id >= VXLAN_VID_MASK)
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_GROUP]) {
|
|
|
|
__be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
|
|
|
|
if (!IN_MULTICAST(ntohl(gaddr))) {
|
|
|
|
pr_debug("group address is not IPv4 multicast\n");
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
|
|
|
}
|
2012-10-10 03:35:50 +07:00
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_PORT_RANGE]) {
|
|
|
|
const struct ifla_vxlan_port_range *p
|
|
|
|
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
|
|
|
|
|
|
|
|
if (ntohs(p->high) < ntohs(p->low)) {
|
|
|
|
pr_debug("port range %u .. %u not valid\n",
|
|
|
|
ntohs(p->low), ntohs(p->high));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-30 06:43:07 +07:00
|
|
|
static void vxlan_get_drvinfo(struct net_device *netdev,
|
|
|
|
struct ethtool_drvinfo *drvinfo)
|
|
|
|
{
|
|
|
|
strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
|
|
|
|
strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ethtool_ops vxlan_ethtool_ops = {
|
|
|
|
.get_drvinfo = vxlan_get_drvinfo,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
};
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
static int vxlan_newlink(struct net *net, struct net_device *dev,
|
|
|
|
struct nlattr *tb[], struct nlattr *data[])
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
__u32 vni;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!data[IFLA_VXLAN_ID])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
|
|
|
|
if (vxlan_find_vni(net, vni)) {
|
|
|
|
pr_info("duplicate VNI %u\n", vni);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
vxlan->vni = vni;
|
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_GROUP])
|
|
|
|
vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
|
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_LOCAL])
|
|
|
|
vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
|
|
|
|
|
2012-10-10 03:35:53 +07:00
|
|
|
if (data[IFLA_VXLAN_LINK] &&
|
|
|
|
(vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
|
|
|
|
struct net_device *lowerdev
|
|
|
|
= __dev_get_by_index(net, vxlan->link);
|
|
|
|
|
|
|
|
if (!lowerdev) {
|
|
|
|
pr_info("ifindex %d does not exist\n", vxlan->link);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
2012-10-01 19:32:35 +07:00
|
|
|
|
2012-10-10 03:35:53 +07:00
|
|
|
if (!tb[IFLA_MTU])
|
2012-10-01 19:32:35 +07:00
|
|
|
dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
|
2012-11-13 20:10:59 +07:00
|
|
|
|
|
|
|
/* update header length based on lower device */
|
|
|
|
dev->hard_header_len = lowerdev->hard_header_len +
|
|
|
|
VXLAN_HEADROOM;
|
2012-10-01 19:32:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_TOS])
|
|
|
|
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
|
|
|
|
|
2012-10-30 17:27:16 +07:00
|
|
|
if (data[IFLA_VXLAN_TTL])
|
|
|
|
vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
|
2012-11-20 09:50:14 +07:00
|
|
|
vxlan->flags |= VXLAN_F_LEARN;
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_AGEING])
|
|
|
|
vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
|
|
|
|
else
|
|
|
|
vxlan->age_interval = FDB_AGE_DEFAULT;
|
|
|
|
|
2012-11-20 09:50:14 +07:00
|
|
|
if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
|
|
|
|
vxlan->flags |= VXLAN_F_PROXY;
|
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
|
|
|
|
vxlan->flags |= VXLAN_F_RSC;
|
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
|
|
|
|
vxlan->flags |= VXLAN_F_L2MISS;
|
|
|
|
|
|
|
|
if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
|
|
|
|
vxlan->flags |= VXLAN_F_L3MISS;
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
if (data[IFLA_VXLAN_LIMIT])
|
|
|
|
vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
|
|
|
|
|
2012-10-10 03:35:50 +07:00
|
|
|
if (data[IFLA_VXLAN_PORT_RANGE]) {
|
|
|
|
const struct ifla_vxlan_port_range *p
|
|
|
|
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
|
|
|
|
vxlan->port_min = ntohs(p->low);
|
|
|
|
vxlan->port_max = ntohs(p->high);
|
|
|
|
}
|
|
|
|
|
2013-01-30 06:43:07 +07:00
|
|
|
SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
err = register_netdevice(dev);
|
|
|
|
if (!err)
|
|
|
|
hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
|
|
|
|
{
|
|
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
|
|
|
|
|
|
hlist_del_rcu(&vxlan->hlist);
|
|
|
|
|
|
|
|
unregister_netdevice_queue(dev, head);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t vxlan_get_size(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
|
|
|
|
return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
|
|
|
|
nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
|
|
|
|
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
|
|
|
|
nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
|
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
|
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
|
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
|
2012-11-20 09:50:14 +07:00
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
|
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
|
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
|
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
|
2012-10-01 19:32:35 +07:00
|
|
|
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
|
|
|
|
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
|
2012-10-10 03:35:50 +07:00
|
|
|
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
|
2012-10-01 19:32:35 +07:00
|
|
|
0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|
|
|
{
|
|
|
|
const struct vxlan_dev *vxlan = netdev_priv(dev);
|
2012-10-10 03:35:50 +07:00
|
|
|
struct ifla_vxlan_port_range ports = {
|
|
|
|
.low = htons(vxlan->port_min),
|
|
|
|
.high = htons(vxlan->port_max),
|
|
|
|
};
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2012-10-09 04:55:30 +07:00
|
|
|
if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
|
2012-10-01 19:32:35 +07:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2012-10-09 04:55:30 +07:00
|
|
|
if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
|
2012-10-01 19:32:35 +07:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
|
|
|
|
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
|
2012-11-20 09:50:14 +07:00
|
|
|
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
|
|
|
|
!!(vxlan->flags & VXLAN_F_LEARN)) ||
|
|
|
|
nla_put_u8(skb, IFLA_VXLAN_PROXY,
|
|
|
|
!!(vxlan->flags & VXLAN_F_PROXY)) ||
|
|
|
|
nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
|
|
|
|
nla_put_u8(skb, IFLA_VXLAN_L2MISS,
|
|
|
|
!!(vxlan->flags & VXLAN_F_L2MISS)) ||
|
|
|
|
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
|
|
|
|
!!(vxlan->flags & VXLAN_F_L3MISS)) ||
|
2012-10-01 19:32:35 +07:00
|
|
|
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
|
|
|
|
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2012-10-10 03:35:50 +07:00
|
|
|
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2012-10-01 19:32:35 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
|
|
|
|
.kind = "vxlan",
|
|
|
|
.maxtype = IFLA_VXLAN_MAX,
|
|
|
|
.policy = vxlan_policy,
|
|
|
|
.priv_size = sizeof(struct vxlan_dev),
|
|
|
|
.setup = vxlan_setup,
|
|
|
|
.validate = vxlan_validate,
|
|
|
|
.newlink = vxlan_newlink,
|
|
|
|
.dellink = vxlan_dellink,
|
|
|
|
.get_size = vxlan_get_size,
|
|
|
|
.fill_info = vxlan_fill_info,
|
|
|
|
};
|
|
|
|
|
|
|
|
static __net_init int vxlan_init_net(struct net *net)
|
|
|
|
{
|
|
|
|
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
|
|
|
struct sock *sk;
|
|
|
|
struct sockaddr_in vxlan_addr = {
|
|
|
|
.sin_family = AF_INET,
|
|
|
|
.sin_addr.s_addr = htonl(INADDR_ANY),
|
|
|
|
};
|
|
|
|
int rc;
|
|
|
|
unsigned h;
|
|
|
|
|
|
|
|
/* Create UDP socket for encapsulation receive. */
|
|
|
|
rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
|
|
|
|
if (rc < 0) {
|
|
|
|
pr_debug("UDP socket create failed\n");
|
|
|
|
return rc;
|
|
|
|
}
|
2012-10-02 01:49:21 +07:00
|
|
|
/* Put in proper namespace */
|
|
|
|
sk = vn->sock->sk;
|
|
|
|
sk_change_net(sk, net);
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
vxlan_addr.sin_port = htons(vxlan_port);
|
|
|
|
|
|
|
|
rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
|
|
|
|
sizeof(vxlan_addr));
|
|
|
|
if (rc < 0) {
|
|
|
|
pr_debug("bind for UDP socket %pI4:%u (%d)\n",
|
|
|
|
&vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
|
2012-10-02 01:49:21 +07:00
|
|
|
sk_release_kernel(sk);
|
2012-10-01 19:32:35 +07:00
|
|
|
vn->sock = NULL;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable multicast loopback */
|
|
|
|
inet_sk(sk)->mc_loop = 0;
|
|
|
|
|
|
|
|
/* Mark socket as an encapsulation socket. */
|
|
|
|
udp_sk(sk)->encap_type = 1;
|
|
|
|
udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
|
|
|
|
udp_encap_enable();
|
|
|
|
|
|
|
|
for (h = 0; h < VNI_HASH_SIZE; ++h)
|
|
|
|
INIT_HLIST_HEAD(&vn->vni_list[h]);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __net_exit void vxlan_exit_net(struct net *net)
|
|
|
|
{
|
|
|
|
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
2013-03-06 11:37:37 +07:00
|
|
|
struct vxlan_dev *vxlan;
|
|
|
|
unsigned h;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
for (h = 0; h < VNI_HASH_SIZE; ++h)
|
|
|
|
hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist)
|
|
|
|
dev_close(vxlan->dev);
|
|
|
|
rtnl_unlock();
|
2012-10-01 19:32:35 +07:00
|
|
|
|
|
|
|
if (vn->sock) {
|
2012-10-02 01:49:21 +07:00
|
|
|
sk_release_kernel(vn->sock->sk);
|
2012-10-01 19:32:35 +07:00
|
|
|
vn->sock = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations vxlan_net_ops = {
|
|
|
|
.init = vxlan_init_net,
|
|
|
|
.exit = vxlan_exit_net,
|
|
|
|
.id = &vxlan_net_id,
|
|
|
|
.size = sizeof(struct vxlan_net),
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init vxlan_init_module(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
|
|
|
|
|
|
|
|
rc = register_pernet_device(&vxlan_net_ops);
|
|
|
|
if (rc)
|
|
|
|
goto out1;
|
|
|
|
|
|
|
|
rc = rtnl_link_register(&vxlan_link_ops);
|
|
|
|
if (rc)
|
|
|
|
goto out2;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out2:
|
|
|
|
unregister_pernet_device(&vxlan_net_ops);
|
|
|
|
out1:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
module_init(vxlan_init_module);
|
|
|
|
|
|
|
|
static void __exit vxlan_cleanup_module(void)
|
|
|
|
{
|
|
|
|
rtnl_link_unregister(&vxlan_link_ops);
|
|
|
|
unregister_pernet_device(&vxlan_net_ops);
|
2013-03-15 11:35:51 +07:00
|
|
|
rcu_barrier();
|
2012-10-01 19:32:35 +07:00
|
|
|
}
|
|
|
|
module_exit(vxlan_cleanup_module);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_VERSION(VXLAN_VERSION);
|
|
|
|
MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
|
|
|
|
MODULE_ALIAS_RTNL_LINK("vxlan");
|