2019-05-27 13:55:01 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2010-02-28 02:41:45 +07:00
|
|
|
/*
|
|
|
|
* Bridge multicast support.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
2014-06-07 23:26:28 +07:00
|
|
|
#include <linux/export.h>
|
2010-02-28 02:41:45 +07:00
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/igmp.h>
|
2019-01-21 13:26:28 +07:00
|
|
|
#include <linux/in.h>
|
2010-02-28 02:41:45 +07:00
|
|
|
#include <linux/jhash.h>
|
|
|
|
#include <linux/kernel.h>
|
2010-02-28 02:41:51 +07:00
|
|
|
#include <linux/log2.h>
|
2010-02-28 02:41:45 +07:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/netfilter_bridge.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/timer.h>
|
2013-05-22 04:52:54 +07:00
|
|
|
#include <linux/inetdevice.h>
|
2016-10-31 19:21:05 +07:00
|
|
|
#include <linux/mroute.h>
|
2010-02-28 02:41:45 +07:00
|
|
|
#include <net/ip.h>
|
2017-02-09 20:54:40 +07:00
|
|
|
#include <net/switchdev.h>
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2019-01-21 13:26:28 +07:00
|
|
|
#include <linux/icmpv6.h>
|
2010-04-22 23:54:22 +07:00
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/mld.h>
|
2010-04-28 00:16:54 +07:00
|
|
|
#include <net/ip6_checksum.h>
|
2013-09-04 07:13:39 +07:00
|
|
|
#include <net/addrconf.h>
|
2010-04-22 23:54:22 +07:00
|
|
|
#endif
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
#include "br_private.h"
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
static const struct rhashtable_params br_mdb_rht_params = {
|
|
|
|
.head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
|
|
|
|
.key_offset = offsetof(struct net_bridge_mdb_entry, addr),
|
|
|
|
.key_len = sizeof(struct br_ip),
|
|
|
|
.automatic_shrinking = true,
|
|
|
|
};
|
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
static void br_multicast_start_querier(struct net_bridge *br,
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_own_query *query);
|
2015-06-19 15:22:57 +07:00
|
|
|
static void br_multicast_add_router(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port);
|
2015-07-13 19:28:37 +07:00
|
|
|
static void br_ip4_multicast_leave_group(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
__be32 group,
|
2017-01-22 03:01:32 +07:00
|
|
|
__u16 vid,
|
|
|
|
const unsigned char *src);
|
|
|
|
|
2017-02-09 20:54:41 +07:00
|
|
|
static void __del_port_router(struct net_bridge_port *p);
|
2015-07-13 19:28:37 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static void br_ip6_multicast_leave_group(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
const struct in6_addr *group,
|
2017-01-22 03:01:32 +07:00
|
|
|
__u16 vid, const unsigned char *src);
|
2015-07-13 19:28:37 +07:00
|
|
|
#endif
|
2012-04-13 09:37:42 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
|
|
|
|
struct br_ip *dst)
|
2010-04-22 23:54:22 +07:00
|
|
|
{
|
2018-12-05 20:14:24 +07:00
|
|
|
return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
|
|
|
|
struct br_ip *dst)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2018-12-05 20:14:24 +07:00
|
|
|
struct net_bridge_mdb_entry *ent;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
lockdep_assert_held_once(&br->multicast_lock);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
|
|
|
|
rcu_read_unlock();
|
2010-07-05 21:50:08 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
return ent;
|
2010-07-05 21:50:08 +07:00
|
|
|
}
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
|
|
|
|
__be32 dst, __u16 vid)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2010-04-18 10:42:07 +07:00
|
|
|
struct br_ip br_dst;
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
memset(&br_dst, 0, sizeof(br_dst));
|
2010-04-18 10:42:07 +07:00
|
|
|
br_dst.u.ip4 = dst;
|
|
|
|
br_dst.proto = htons(ETH_P_IP);
|
2013-02-13 19:00:17 +07:00
|
|
|
br_dst.vid = vid;
|
2010-03-16 10:38:25 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
return br_mdb_ip_get(br, &br_dst);
|
2010-04-18 10:42:07 +07:00
|
|
|
}
|
|
|
|
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2018-12-05 20:14:24 +07:00
|
|
|
static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
|
|
|
|
const struct in6_addr *dst,
|
|
|
|
__u16 vid)
|
2010-04-22 23:54:22 +07:00
|
|
|
{
|
|
|
|
struct br_ip br_dst;
|
2010-03-16 10:38:25 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
memset(&br_dst, 0, sizeof(br_dst));
|
2011-11-21 10:39:03 +07:00
|
|
|
br_dst.u.ip6 = *dst;
|
2010-04-22 23:54:22 +07:00
|
|
|
br_dst.proto = htons(ETH_P_IPV6);
|
2013-02-13 19:00:17 +07:00
|
|
|
br_dst.vid = vid;
|
2010-04-22 23:54:22 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
return br_mdb_ip_get(br, &br_dst);
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
|
2013-03-07 10:05:33 +07:00
|
|
|
struct sk_buff *skb, u16 vid)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2010-04-18 10:42:07 +07:00
|
|
|
struct br_ip ip;
|
|
|
|
|
2018-09-26 21:01:03 +07:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
2010-02-28 02:41:45 +07:00
|
|
|
return NULL;
|
|
|
|
|
2010-04-18 10:42:07 +07:00
|
|
|
if (BR_INPUT_SKB_CB(skb)->igmp)
|
2010-02-28 02:41:45 +07:00
|
|
|
return NULL;
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
memset(&ip, 0, sizeof(ip));
|
2010-04-18 10:42:07 +07:00
|
|
|
ip.proto = skb->protocol;
|
2013-03-07 10:05:33 +07:00
|
|
|
ip.vid = vid;
|
2010-04-18 10:42:07 +07:00
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
switch (skb->protocol) {
|
|
|
|
case htons(ETH_P_IP):
|
2010-04-18 10:42:07 +07:00
|
|
|
ip.u.ip4 = ip_hdr(skb)->daddr;
|
|
|
|
break;
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 23:54:22 +07:00
|
|
|
case htons(ETH_P_IPV6):
|
2011-11-21 10:39:03 +07:00
|
|
|
ip.u.ip6 = ipv6_hdr(skb)->daddr;
|
2010-04-22 23:54:22 +07:00
|
|
|
break;
|
|
|
|
#endif
|
2010-04-18 10:42:07 +07:00
|
|
|
default:
|
|
|
|
return NULL;
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
return br_mdb_ip_get_rcu(br, &ip);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_multicast_group_expired(struct timer_list *t)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
|
2010-02-28 02:41:45 +07:00
|
|
|
struct net_bridge *br = mp->br;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (!netif_running(br->dev) || timer_pending(&mp->timer))
|
|
|
|
goto out;
|
|
|
|
|
2019-08-17 18:22:13 +07:00
|
|
|
br_multicast_host_leave(mp, true);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
if (mp->ports)
|
|
|
|
goto out;
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
|
|
|
|
br_mdb_rht_params);
|
|
|
|
hlist_del_rcu(&mp->mdb_node);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2018-12-05 20:14:25 +07:00
|
|
|
kfree_rcu(mp, rcu);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_multicast_del_pg(struct net_bridge *br,
|
|
|
|
struct net_bridge_port_group *pg)
|
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct net_bridge_port_group *p;
|
2010-11-15 13:38:10 +07:00
|
|
|
struct net_bridge_port_group __rcu **pp;
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
mp = br_mdb_ip_get(br, &pg->addr);
|
2010-02-28 02:41:45 +07:00
|
|
|
if (WARN_ON(!mp))
|
|
|
|
return;
|
|
|
|
|
2010-11-15 13:38:10 +07:00
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, br)) != NULL;
|
|
|
|
pp = &p->next) {
|
2010-02-28 02:41:45 +07:00
|
|
|
if (p != pg)
|
|
|
|
continue;
|
|
|
|
|
2010-04-27 22:01:06 +07:00
|
|
|
rcu_assign_pointer(*pp, p->next);
|
2010-02-28 02:41:45 +07:00
|
|
|
hlist_del_init(&p->mglist);
|
|
|
|
del_timer(&p->timer);
|
2016-04-21 17:52:45 +07:00
|
|
|
br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
|
|
|
|
p->flags);
|
2018-12-05 20:14:25 +07:00
|
|
|
kfree_rcu(p, rcu);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2017-11-10 05:10:57 +07:00
|
|
|
if (!mp->ports && !mp->host_joined &&
|
2010-02-28 02:41:45 +07:00
|
|
|
netif_running(br->dev))
|
|
|
|
mod_timer(&mp->timer, jiffies);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_multicast_port_group_expired(struct timer_list *t)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge_port_group *pg = from_timer(pg, t, timer);
|
2010-02-28 02:41:45 +07:00
|
|
|
struct net_bridge *br = pg->port->br;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
|
2016-02-03 15:57:05 +07:00
|
|
|
hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
|
2010-02-28 02:41:45 +07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
br_multicast_del_pg(br, pg);
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2010-04-18 10:42:07 +07:00
|
|
|
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
|
2016-06-28 21:57:06 +07:00
|
|
|
__be32 group,
|
|
|
|
u8 *igmp_type)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2016-11-21 19:03:24 +07:00
|
|
|
struct igmpv3_query *ihv3;
|
|
|
|
size_t igmp_hdr_size;
|
2010-02-28 02:41:45 +07:00
|
|
|
struct sk_buff *skb;
|
|
|
|
struct igmphdr *ih;
|
|
|
|
struct ethhdr *eth;
|
|
|
|
struct iphdr *iph;
|
|
|
|
|
2016-11-21 19:03:24 +07:00
|
|
|
igmp_hdr_size = sizeof(*ih);
|
|
|
|
if (br->multicast_igmp_version == 3)
|
|
|
|
igmp_hdr_size = sizeof(*ihv3);
|
2010-02-28 02:41:45 +07:00
|
|
|
skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
|
2016-11-21 19:03:24 +07:00
|
|
|
igmp_hdr_size + 4);
|
2010-02-28 02:41:45 +07:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
skb->protocol = htons(ETH_P_IP);
|
|
|
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
eth = eth_hdr(skb);
|
|
|
|
|
2014-02-23 15:05:25 +07:00
|
|
|
ether_addr_copy(eth->h_source, br->dev->dev_addr);
|
2010-02-28 02:41:45 +07:00
|
|
|
eth->h_dest[0] = 1;
|
|
|
|
eth->h_dest[1] = 0;
|
|
|
|
eth->h_dest[2] = 0x5e;
|
|
|
|
eth->h_dest[3] = 0;
|
|
|
|
eth->h_dest[4] = 0;
|
|
|
|
eth->h_dest[5] = 1;
|
|
|
|
eth->h_proto = htons(ETH_P_IP);
|
|
|
|
skb_put(skb, sizeof(*eth));
|
|
|
|
|
|
|
|
skb_set_network_header(skb, skb->len);
|
|
|
|
iph = ip_hdr(skb);
|
|
|
|
|
|
|
|
iph->version = 4;
|
|
|
|
iph->ihl = 6;
|
|
|
|
iph->tos = 0xc0;
|
2016-11-21 19:03:24 +07:00
|
|
|
iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
|
2010-02-28 02:41:45 +07:00
|
|
|
iph->id = 0;
|
|
|
|
iph->frag_off = htons(IP_DF);
|
|
|
|
iph->ttl = 1;
|
|
|
|
iph->protocol = IPPROTO_IGMP;
|
2018-09-26 21:01:04 +07:00
|
|
|
iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
|
2013-05-22 04:52:54 +07:00
|
|
|
inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
|
2010-02-28 02:41:45 +07:00
|
|
|
iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
|
|
|
|
((u8 *)&iph[1])[0] = IPOPT_RA;
|
|
|
|
((u8 *)&iph[1])[1] = 4;
|
|
|
|
((u8 *)&iph[1])[2] = 0;
|
|
|
|
((u8 *)&iph[1])[3] = 0;
|
|
|
|
ip_send_check(iph);
|
|
|
|
skb_put(skb, 24);
|
|
|
|
|
|
|
|
skb_set_transport_header(skb, skb->len);
|
2016-06-28 21:57:06 +07:00
|
|
|
*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2016-11-21 19:03:24 +07:00
|
|
|
switch (br->multicast_igmp_version) {
|
|
|
|
case 2:
|
|
|
|
ih = igmp_hdr(skb);
|
|
|
|
ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
|
|
|
|
ih->code = (group ? br->multicast_last_member_interval :
|
|
|
|
br->multicast_query_response_interval) /
|
|
|
|
(HZ / IGMP_TIMER_SCALE);
|
|
|
|
ih->group = group;
|
|
|
|
ih->csum = 0;
|
|
|
|
ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
ihv3 = igmpv3_query_hdr(skb);
|
|
|
|
ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
|
|
|
|
ihv3->code = (group ? br->multicast_last_member_interval :
|
|
|
|
br->multicast_query_response_interval) /
|
|
|
|
(HZ / IGMP_TIMER_SCALE);
|
|
|
|
ihv3->group = group;
|
|
|
|
ihv3->qqic = br->multicast_query_interval / HZ;
|
|
|
|
ihv3->nsrcs = 0;
|
|
|
|
ihv3->resv = 0;
|
|
|
|
ihv3->suppress = 0;
|
|
|
|
ihv3->qrv = 2;
|
|
|
|
ihv3->csum = 0;
|
|
|
|
ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb_put(skb, igmp_hdr_size);
|
2010-02-28 02:41:45 +07:00
|
|
|
__skb_pull(skb, sizeof(*eth));
|
|
|
|
|
|
|
|
out:
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 23:54:22 +07:00
|
|
|
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
|
2016-06-28 21:57:06 +07:00
|
|
|
const struct in6_addr *grp,
|
|
|
|
u8 *igmp_type)
|
2010-04-22 23:54:22 +07:00
|
|
|
{
|
2016-11-21 19:03:25 +07:00
|
|
|
struct mld2_query *mld2q;
|
|
|
|
unsigned long interval;
|
2010-04-22 23:54:22 +07:00
|
|
|
struct ipv6hdr *ip6h;
|
|
|
|
struct mld_msg *mldq;
|
2016-11-21 19:03:25 +07:00
|
|
|
size_t mld_hdr_size;
|
|
|
|
struct sk_buff *skb;
|
2010-04-22 23:54:22 +07:00
|
|
|
struct ethhdr *eth;
|
|
|
|
u8 *hopopt;
|
|
|
|
|
2016-11-21 19:03:25 +07:00
|
|
|
mld_hdr_size = sizeof(*mldq);
|
|
|
|
if (br->multicast_mld_version == 2)
|
|
|
|
mld_hdr_size = sizeof(*mld2q);
|
2010-04-22 23:54:22 +07:00
|
|
|
skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
|
2016-11-21 19:03:25 +07:00
|
|
|
8 + mld_hdr_size);
|
2010-04-22 23:54:22 +07:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
skb->protocol = htons(ETH_P_IPV6);
|
|
|
|
|
|
|
|
/* Ethernet header */
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
eth = eth_hdr(skb);
|
|
|
|
|
2014-02-23 15:05:25 +07:00
|
|
|
ether_addr_copy(eth->h_source, br->dev->dev_addr);
|
2010-04-22 23:54:22 +07:00
|
|
|
eth->h_proto = htons(ETH_P_IPV6);
|
|
|
|
skb_put(skb, sizeof(*eth));
|
|
|
|
|
|
|
|
/* IPv6 header + HbH option */
|
|
|
|
skb_set_network_header(skb, skb->len);
|
|
|
|
ip6h = ipv6_hdr(skb);
|
|
|
|
|
|
|
|
*(__force __be32 *)ip6h = htonl(0x60000000);
|
2016-11-21 19:03:25 +07:00
|
|
|
ip6h->payload_len = htons(8 + mld_hdr_size);
|
2010-04-22 23:54:22 +07:00
|
|
|
ip6h->nexthdr = IPPROTO_HOPOPTS;
|
|
|
|
ip6h->hop_limit = 1;
|
2011-03-22 18:40:32 +07:00
|
|
|
ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
|
2012-03-05 11:52:44 +07:00
|
|
|
if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
|
|
|
|
&ip6h->saddr)) {
|
|
|
|
kfree_skb(skb);
|
2018-09-26 21:01:04 +07:00
|
|
|
br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
|
2012-03-05 11:52:44 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
2016-06-24 17:35:18 +07:00
|
|
|
|
2018-09-26 21:01:04 +07:00
|
|
|
br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
|
2011-02-17 15:17:51 +07:00
|
|
|
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
|
2010-04-22 23:54:22 +07:00
|
|
|
|
|
|
|
hopopt = (u8 *)(ip6h + 1);
|
|
|
|
hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
|
|
|
|
hopopt[1] = 0; /* length of HbH */
|
|
|
|
hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
|
|
|
|
hopopt[3] = 2; /* Length of RA Option */
|
|
|
|
hopopt[4] = 0; /* Type = 0x0000 (MLD) */
|
|
|
|
hopopt[5] = 0;
|
2012-05-17 13:00:25 +07:00
|
|
|
hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
|
|
|
|
hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
|
2010-04-22 23:54:22 +07:00
|
|
|
|
|
|
|
skb_put(skb, sizeof(*ip6h) + 8);
|
|
|
|
|
|
|
|
/* ICMPv6 */
|
|
|
|
skb_set_transport_header(skb, skb->len);
|
2016-06-28 21:57:06 +07:00
|
|
|
interval = ipv6_addr_any(grp) ?
|
2013-06-17 04:20:34 +07:00
|
|
|
br->multicast_query_response_interval :
|
|
|
|
br->multicast_last_member_interval;
|
2016-06-28 21:57:06 +07:00
|
|
|
*igmp_type = ICMPV6_MGM_QUERY;
|
2016-11-21 19:03:25 +07:00
|
|
|
switch (br->multicast_mld_version) {
|
|
|
|
case 1:
|
|
|
|
mldq = (struct mld_msg *)icmp6_hdr(skb);
|
|
|
|
mldq->mld_type = ICMPV6_MGM_QUERY;
|
|
|
|
mldq->mld_code = 0;
|
|
|
|
mldq->mld_cksum = 0;
|
|
|
|
mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
|
|
|
|
mldq->mld_reserved = 0;
|
|
|
|
mldq->mld_mca = *grp;
|
|
|
|
mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
|
|
|
|
sizeof(*mldq), IPPROTO_ICMPV6,
|
|
|
|
csum_partial(mldq,
|
|
|
|
sizeof(*mldq),
|
|
|
|
0));
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
mld2q = (struct mld2_query *)icmp6_hdr(skb);
|
2017-01-17 06:11:35 +07:00
|
|
|
mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
|
2016-11-21 19:03:25 +07:00
|
|
|
mld2q->mld2q_type = ICMPV6_MGM_QUERY;
|
|
|
|
mld2q->mld2q_code = 0;
|
|
|
|
mld2q->mld2q_cksum = 0;
|
|
|
|
mld2q->mld2q_resv1 = 0;
|
|
|
|
mld2q->mld2q_resv2 = 0;
|
|
|
|
mld2q->mld2q_suppress = 0;
|
|
|
|
mld2q->mld2q_qrv = 2;
|
|
|
|
mld2q->mld2q_nsrcs = 0;
|
|
|
|
mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
|
|
|
|
mld2q->mld2q_mca = *grp;
|
|
|
|
mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
|
|
|
|
sizeof(*mld2q),
|
|
|
|
IPPROTO_ICMPV6,
|
|
|
|
csum_partial(mld2q,
|
|
|
|
sizeof(*mld2q),
|
|
|
|
0));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
skb_put(skb, mld_hdr_size);
|
2010-04-22 23:54:22 +07:00
|
|
|
|
|
|
|
__skb_pull(skb, sizeof(*eth));
|
|
|
|
|
|
|
|
out:
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-04-18 10:42:07 +07:00
|
|
|
static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
|
2016-06-28 21:57:06 +07:00
|
|
|
struct br_ip *addr,
|
|
|
|
u8 *igmp_type)
|
2010-04-18 10:42:07 +07:00
|
|
|
{
|
|
|
|
switch (addr->proto) {
|
|
|
|
case htons(ETH_P_IP):
|
2016-06-28 21:57:06 +07:00
|
|
|
return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 23:54:22 +07:00
|
|
|
case htons(ETH_P_IPV6):
|
2016-06-28 21:57:06 +07:00
|
|
|
return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
|
|
|
|
igmp_type);
|
2010-04-22 23:54:22 +07:00
|
|
|
#endif
|
2010-04-18 10:42:07 +07:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-12-12 05:23:08 +07:00
|
|
|
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
|
2016-11-21 19:03:24 +07:00
|
|
|
struct br_ip *group)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
2010-12-10 10:18:04 +07:00
|
|
|
int err;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
mp = br_mdb_ip_get(br, group);
|
|
|
|
if (mp)
|
|
|
|
return mp;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
|
|
|
|
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
|
|
|
|
return ERR_PTR(-E2BIG);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
|
|
|
|
if (unlikely(!mp))
|
2010-12-10 10:18:04 +07:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
mp->br = br;
|
2010-04-18 10:42:07 +07:00
|
|
|
mp->addr = *group;
|
2017-11-03 13:21:10 +07:00
|
|
|
timer_setup(&mp->timer, br_multicast_group_expired, 0);
|
2018-12-05 20:14:24 +07:00
|
|
|
err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
|
|
|
|
br_mdb_rht_params);
|
|
|
|
if (err) {
|
|
|
|
kfree(mp);
|
|
|
|
mp = ERR_PTR(err);
|
|
|
|
} else {
|
|
|
|
hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
|
|
|
|
}
|
2013-07-20 10:07:16 +07:00
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
return mp;
|
|
|
|
}
|
|
|
|
|
2012-12-12 05:23:08 +07:00
|
|
|
struct net_bridge_port_group *br_multicast_new_port_group(
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
struct br_ip *group,
|
2012-12-15 05:09:51 +07:00
|
|
|
struct net_bridge_port_group __rcu *next,
|
2017-01-22 03:01:32 +07:00
|
|
|
unsigned char flags,
|
|
|
|
const unsigned char *src)
|
2012-12-12 05:23:08 +07:00
|
|
|
{
|
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
|
|
|
|
p = kzalloc(sizeof(*p), GFP_ATOMIC);
|
|
|
|
if (unlikely(!p))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
p->addr = *group;
|
|
|
|
p->port = port;
|
2016-02-03 15:57:05 +07:00
|
|
|
p->flags = flags;
|
2012-12-13 13:51:28 +07:00
|
|
|
rcu_assign_pointer(p->next, next);
|
2012-12-12 05:23:08 +07:00
|
|
|
hlist_add_head(&p->mglist, &port->mglist);
|
2017-11-03 13:21:10 +07:00
|
|
|
timer_setup(&p->timer, br_multicast_port_group_expired, 0);
|
2017-01-22 03:01:32 +07:00
|
|
|
|
|
|
|
if (src)
|
|
|
|
memcpy(p->eth_addr, src, ETH_ALEN);
|
|
|
|
else
|
2019-03-20 09:06:57 +07:00
|
|
|
eth_broadcast_addr(p->eth_addr);
|
2017-01-22 03:01:32 +07:00
|
|
|
|
2012-12-12 05:23:08 +07:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2017-01-22 03:01:32 +07:00
|
|
|
static bool br_port_group_equal(struct net_bridge_port_group *p,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
const unsigned char *src)
|
|
|
|
{
|
|
|
|
if (p->port != port)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(port->flags & BR_MULTICAST_TO_UNICAST))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return ether_addr_equal(src, p->eth_addr);
|
|
|
|
}
|
|
|
|
|
2019-08-17 18:22:13 +07:00
|
|
|
void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
|
|
|
|
{
|
|
|
|
if (!mp->host_joined) {
|
|
|
|
mp->host_joined = true;
|
|
|
|
if (notify)
|
|
|
|
br_mdb_notify(mp->br->dev, NULL, &mp->addr,
|
|
|
|
RTM_NEWMDB, 0);
|
|
|
|
}
|
|
|
|
mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
|
|
|
|
{
|
|
|
|
if (!mp->host_joined)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mp->host_joined = false;
|
|
|
|
if (notify)
|
|
|
|
br_mdb_notify(mp->br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
|
|
|
|
}
|
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
static int br_multicast_add_group(struct net_bridge *br,
|
2010-04-18 10:42:07 +07:00
|
|
|
struct net_bridge_port *port,
|
2017-01-22 03:01:32 +07:00
|
|
|
struct br_ip *group,
|
|
|
|
const unsigned char *src)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2010-11-15 13:38:10 +07:00
|
|
|
struct net_bridge_port_group __rcu **pp;
|
2016-11-21 19:03:24 +07:00
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
2013-10-20 05:58:57 +07:00
|
|
|
unsigned long now = jiffies;
|
2010-02-28 02:41:45 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (!netif_running(br->dev) ||
|
|
|
|
(port && port->state == BR_STATE_DISABLED))
|
|
|
|
goto out;
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
mp = br_multicast_new_group(br, group);
|
2010-02-28 02:41:45 +07:00
|
|
|
err = PTR_ERR(mp);
|
2010-12-10 10:18:04 +07:00
|
|
|
if (IS_ERR(mp))
|
2010-02-28 02:41:45 +07:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (!port) {
|
2019-08-17 18:22:13 +07:00
|
|
|
br_multicast_host_join(mp, true);
|
2010-02-28 02:41:45 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-11-15 13:38:10 +07:00
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, br)) != NULL;
|
|
|
|
pp = &p->next) {
|
2017-01-22 03:01:32 +07:00
|
|
|
if (br_port_group_equal(p, port, src))
|
2013-10-20 05:58:57 +07:00
|
|
|
goto found;
|
2010-02-28 02:41:45 +07:00
|
|
|
if ((unsigned long)p->port < (unsigned long)port)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-01-22 03:01:32 +07:00
|
|
|
p = br_multicast_new_port_group(port, group, *pp, 0, src);
|
2010-02-28 02:41:45 +07:00
|
|
|
if (unlikely(!p))
|
|
|
|
goto err;
|
|
|
|
rcu_assign_pointer(*pp, p);
|
2016-04-21 17:52:45 +07:00
|
|
|
br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2013-10-20 05:58:57 +07:00
|
|
|
found:
|
|
|
|
mod_timer(&p->timer, now + br->multicast_membership_interval);
|
2010-02-28 02:41:45 +07:00
|
|
|
out:
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-04-18 10:42:07 +07:00
|
|
|
static int br_ip4_multicast_add_group(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-02-13 19:00:17 +07:00
|
|
|
__be32 group,
|
2017-01-22 03:01:32 +07:00
|
|
|
__u16 vid,
|
|
|
|
const unsigned char *src)
|
2010-04-18 10:42:07 +07:00
|
|
|
{
|
|
|
|
struct br_ip br_group;
|
|
|
|
|
|
|
|
if (ipv4_is_local_multicast(group))
|
|
|
|
return 0;
|
|
|
|
|
2019-04-04 03:27:24 +07:00
|
|
|
memset(&br_group, 0, sizeof(br_group));
|
2010-04-18 10:42:07 +07:00
|
|
|
br_group.u.ip4 = group;
|
|
|
|
br_group.proto = htons(ETH_P_IP);
|
2013-02-13 19:00:17 +07:00
|
|
|
br_group.vid = vid;
|
2010-04-18 10:42:07 +07:00
|
|
|
|
2017-01-22 03:01:32 +07:00
|
|
|
return br_multicast_add_group(br, port, &br_group, src);
|
2010-04-18 10:42:07 +07:00
|
|
|
}
|
|
|
|
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 23:54:22 +07:00
|
|
|
static int br_ip6_multicast_add_group(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-02-13 19:00:17 +07:00
|
|
|
const struct in6_addr *group,
|
2017-01-22 03:01:32 +07:00
|
|
|
__u16 vid,
|
|
|
|
const unsigned char *src)
|
2010-04-22 23:54:22 +07:00
|
|
|
{
|
|
|
|
struct br_ip br_group;
|
|
|
|
|
2013-09-04 07:13:39 +07:00
|
|
|
if (ipv6_addr_is_ll_all_nodes(group))
|
2010-04-22 23:54:22 +07:00
|
|
|
return 0;
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
memset(&br_group, 0, sizeof(br_group));
|
2011-11-21 10:39:03 +07:00
|
|
|
br_group.u.ip6 = *group;
|
2011-02-15 20:19:17 +07:00
|
|
|
br_group.proto = htons(ETH_P_IPV6);
|
2013-02-13 19:00:17 +07:00
|
|
|
br_group.vid = vid;
|
2010-04-22 23:54:22 +07:00
|
|
|
|
2017-01-22 03:01:32 +07:00
|
|
|
return br_multicast_add_group(br, port, &br_group, src);
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_multicast_router_expired(struct timer_list *t)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge_port *port =
|
|
|
|
from_timer(port, t, multicast_router_timer);
|
2010-02-28 02:41:45 +07:00
|
|
|
struct net_bridge *br = port->br;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
2016-02-27 03:20:03 +07:00
|
|
|
if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
|
|
|
|
port->multicast_router == MDB_RTR_TYPE_PERM ||
|
2017-02-09 20:54:41 +07:00
|
|
|
timer_pending(&port->multicast_router_timer))
|
2010-02-28 02:41:45 +07:00
|
|
|
goto out;
|
|
|
|
|
2017-02-09 20:54:41 +07:00
|
|
|
__del_port_router(port);
|
2010-02-28 02:41:45 +07:00
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2017-10-09 16:15:31 +07:00
|
|
|
static void br_mc_router_state_change(struct net_bridge *p,
|
|
|
|
bool is_mc_router)
|
|
|
|
{
|
|
|
|
struct switchdev_attr attr = {
|
|
|
|
.orig_dev = p->dev,
|
|
|
|
.id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
|
|
|
|
.flags = SWITCHDEV_F_DEFER,
|
|
|
|
.u.mrouter = is_mc_router,
|
|
|
|
};
|
|
|
|
|
|
|
|
switchdev_port_attr_set(p->dev, &attr);
|
|
|
|
}
|
|
|
|
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_multicast_local_router_expired(struct timer_list *t)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge *br = from_timer(br, t, multicast_router_timer);
|
2017-10-09 16:15:31 +07:00
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
|
|
|
|
br->multicast_router == MDB_RTR_TYPE_PERM ||
|
|
|
|
timer_pending(&br->multicast_router_timer))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
br_mc_router_state_change(br, false);
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
static void br_multicast_querier_expired(struct net_bridge *br,
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_own_query *query)
|
2012-04-13 09:37:42 +07:00
|
|
|
{
|
|
|
|
spin_lock(&br->multicast_lock);
|
2018-09-26 21:01:03 +07:00
|
|
|
if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
2012-04-13 09:37:42 +07:00
|
|
|
goto out;
|
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
br_multicast_start_querier(br, query);
|
2012-04-13 09:37:42 +07:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_ip4_multicast_querier_expired(struct timer_list *t)
|
2013-08-30 22:28:17 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
|
2013-08-30 22:28:17 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_querier_expired(br, &br->ip4_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_ip6_multicast_querier_expired(struct timer_list *t)
|
2013-08-30 22:28:17 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
|
2013-08-30 22:28:17 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_querier_expired(br, &br->ip6_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
static void br_multicast_select_own_querier(struct net_bridge *br,
|
|
|
|
struct br_ip *ip,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (ip->proto == htons(ETH_P_IP))
|
|
|
|
br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
|
|
|
br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2010-04-18 10:42:07 +07:00
|
|
|
static void __br_multicast_send_query(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
struct br_ip *ip)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
2016-06-28 21:57:06 +07:00
|
|
|
u8 igmp_type;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2016-06-28 21:57:06 +07:00
|
|
|
skb = br_multicast_alloc_query(br, ip, &igmp_type);
|
2010-02-28 02:41:45 +07:00
|
|
|
if (!skb)
|
2010-04-18 10:42:07 +07:00
|
|
|
return;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
if (port) {
|
|
|
|
skb->dev = port->dev;
|
2016-07-07 02:12:21 +07:00
|
|
|
br_multicast_count(br, port, skb, igmp_type,
|
2016-06-28 21:57:06 +07:00
|
|
|
BR_MCAST_DIR_TX);
|
2015-09-16 08:04:16 +07:00
|
|
|
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
|
|
|
|
dev_net(port->dev), NULL, skb, NULL, skb->dev,
|
2014-11-17 18:20:28 +07:00
|
|
|
br_dev_queue_push_xmit);
|
2014-06-07 23:26:27 +07:00
|
|
|
} else {
|
|
|
|
br_multicast_select_own_querier(br, ip, skb);
|
2016-07-07 02:12:21 +07:00
|
|
|
br_multicast_count(br, port, skb, igmp_type,
|
2016-06-28 21:57:06 +07:00
|
|
|
BR_MCAST_DIR_RX);
|
2010-02-28 02:41:45 +07:00
|
|
|
netif_rx(skb);
|
2014-06-07 23:26:27 +07:00
|
|
|
}
|
2010-04-18 10:42:07 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void br_multicast_send_query(struct net_bridge *br,
|
2013-08-30 22:28:17 +07:00
|
|
|
struct net_bridge_port *port,
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_own_query *own_query)
|
2010-04-18 10:42:07 +07:00
|
|
|
{
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_other_query *other_query = NULL;
|
2016-11-21 19:03:24 +07:00
|
|
|
struct br_ip br_group;
|
|
|
|
unsigned long time;
|
2010-04-18 10:42:07 +07:00
|
|
|
|
2018-09-26 21:01:03 +07:00
|
|
|
if (!netif_running(br->dev) ||
|
|
|
|
!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
|
2018-09-26 21:01:04 +07:00
|
|
|
!br_opt_get(br, BROPT_MULTICAST_QUERIER))
|
2010-04-18 10:42:07 +07:00
|
|
|
return;
|
|
|
|
|
2010-04-22 23:54:22 +07:00
|
|
|
memset(&br_group.u, 0, sizeof(br_group.u));
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
if (port ? (own_query == &port->ip4_own_query) :
|
|
|
|
(own_query == &br->ip4_own_query)) {
|
|
|
|
other_query = &br->ip4_other_query;
|
2013-08-30 22:28:17 +07:00
|
|
|
br_group.proto = htons(ETH_P_IP);
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2013-08-30 22:28:17 +07:00
|
|
|
} else {
|
2014-06-07 23:26:26 +07:00
|
|
|
other_query = &br->ip6_other_query;
|
2013-08-30 22:28:17 +07:00
|
|
|
br_group.proto = htons(ETH_P_IPV6);
|
2010-04-22 23:54:22 +07:00
|
|
|
#endif
|
2013-08-30 22:28:17 +07:00
|
|
|
}
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
if (!other_query || timer_pending(&other_query->timer))
|
2013-08-30 22:28:17 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
__br_multicast_send_query(br, port, &br_group);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
time = jiffies;
|
2014-06-07 23:26:26 +07:00
|
|
|
time += own_query->startup_sent < br->multicast_startup_query_count ?
|
2010-02-28 02:41:45 +07:00
|
|
|
br->multicast_startup_query_interval :
|
|
|
|
br->multicast_query_interval;
|
2014-06-07 23:26:26 +07:00
|
|
|
mod_timer(&own_query->timer, time);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
static void
|
|
|
|
br_multicast_port_query_expired(struct net_bridge_port *port,
|
|
|
|
struct bridge_mcast_own_query *query)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
|
|
|
struct net_bridge *br = port->br;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
2010-03-06 08:14:09 +07:00
|
|
|
if (port->state == BR_STATE_DISABLED ||
|
|
|
|
port->state == BR_STATE_BLOCKING)
|
2010-02-28 02:41:45 +07:00
|
|
|
goto out;
|
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
if (query->startup_sent < br->multicast_startup_query_count)
|
|
|
|
query->startup_sent++;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
br_multicast_send_query(port->br, port, query);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_ip4_multicast_port_query_expired(struct timer_list *t)
|
2013-08-30 22:28:17 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
|
2013-08-30 22:28:17 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_port_query_expired(port, &port->ip4_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_ip6_multicast_port_query_expired(struct timer_list *t)
|
2013-08-30 22:28:17 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
|
2013-08-30 22:28:17 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_port_query_expired(port, &port->ip6_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-02-09 20:54:40 +07:00
|
|
|
static void br_mc_disabled_update(struct net_device *dev, bool value)
|
|
|
|
{
|
|
|
|
struct switchdev_attr attr = {
|
|
|
|
.orig_dev = dev,
|
|
|
|
.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
|
|
|
|
.flags = SWITCHDEV_F_DEFER,
|
2018-09-26 21:01:03 +07:00
|
|
|
.u.mc_disabled = !value,
|
2017-02-09 20:54:40 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
switchdev_port_attr_set(dev, &attr);
|
|
|
|
}
|
|
|
|
|
2016-06-28 21:57:06 +07:00
|
|
|
int br_multicast_add_port(struct net_bridge_port *port)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2016-02-27 03:20:01 +07:00
|
|
|
port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2017-11-03 13:21:10 +07:00
|
|
|
timer_setup(&port->multicast_router_timer,
|
|
|
|
br_multicast_router_expired, 0);
|
|
|
|
timer_setup(&port->ip4_own_query.timer,
|
|
|
|
br_ip4_multicast_port_query_expired, 0);
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2017-11-03 13:21:10 +07:00
|
|
|
timer_setup(&port->ip6_own_query.timer,
|
|
|
|
br_ip6_multicast_port_query_expired, 0);
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2018-09-26 21:01:03 +07:00
|
|
|
br_mc_disabled_update(port->dev,
|
|
|
|
br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
|
2017-02-09 20:54:40 +07:00
|
|
|
|
2016-06-28 21:57:06 +07:00
|
|
|
port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
|
|
|
|
if (!port->mcast_stats)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_del_port(struct net_bridge_port *port)
|
|
|
|
{
|
2015-07-15 21:16:51 +07:00
|
|
|
struct net_bridge *br = port->br;
|
|
|
|
struct net_bridge_port_group *pg;
|
|
|
|
struct hlist_node *n;
|
|
|
|
|
|
|
|
/* Take care of the remaining groups, only perm ones should be left */
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
|
|
|
|
br_multicast_del_pg(br, pg);
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
2010-02-28 02:41:45 +07:00
|
|
|
del_timer_sync(&port->multicast_router_timer);
|
2016-06-28 21:57:06 +07:00
|
|
|
free_percpu(port->mcast_stats);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
static void br_multicast_enable(struct bridge_mcast_own_query *query)
|
2010-02-28 02:41:50 +07:00
|
|
|
{
|
2013-08-30 22:28:17 +07:00
|
|
|
query->startup_sent = 0;
|
2010-02-28 02:41:50 +07:00
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
if (try_to_del_timer_sync(&query->timer) >= 0 ||
|
|
|
|
del_timer(&query->timer))
|
|
|
|
mod_timer(&query->timer, jiffies);
|
2010-02-28 02:41:50 +07:00
|
|
|
}
|
|
|
|
|
2016-10-18 23:09:48 +07:00
|
|
|
static void __br_multicast_enable_port(struct net_bridge_port *port)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
|
|
|
struct net_bridge *br = port->br;
|
|
|
|
|
2018-09-26 21:01:03 +07:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
|
2016-10-18 23:09:48 +07:00
|
|
|
return;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_enable(&port->ip4_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_enable(&port->ip6_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2016-02-27 03:20:01 +07:00
|
|
|
if (port->multicast_router == MDB_RTR_TYPE_PERM &&
|
|
|
|
hlist_unhashed(&port->rlist))
|
2015-06-19 15:22:57 +07:00
|
|
|
br_multicast_add_router(br, port);
|
2016-10-18 23:09:48 +07:00
|
|
|
}
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2016-10-18 23:09:48 +07:00
|
|
|
void br_multicast_enable_port(struct net_bridge_port *port)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = port->br;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
__br_multicast_enable_port(port);
|
2010-02-28 02:41:45 +07:00
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_disable_port(struct net_bridge_port *port)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = port->br;
|
|
|
|
struct net_bridge_port_group *pg;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
struct hlist_node *n;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
|
2016-02-03 15:57:05 +07:00
|
|
|
if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
|
2015-07-15 21:16:51 +07:00
|
|
|
br_multicast_del_pg(br, pg);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2017-02-09 20:54:41 +07:00
|
|
|
__del_port_router(port);
|
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
del_timer(&port->multicast_router_timer);
|
2014-06-07 23:26:26 +07:00
|
|
|
del_timer(&port->ip4_own_query.timer);
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2014-06-07 23:26:26 +07:00
|
|
|
del_timer(&port->ip6_own_query.timer);
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2010-02-28 02:41:45 +07:00
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2010-04-18 10:42:07 +07:00
|
|
|
static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-10-29 02:45:07 +07:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2017-01-22 03:01:32 +07:00
|
|
|
const unsigned char *src;
|
2010-02-28 02:41:45 +07:00
|
|
|
struct igmpv3_report *ih;
|
|
|
|
struct igmpv3_grec *grec;
|
|
|
|
int i;
|
|
|
|
int len;
|
|
|
|
int num;
|
|
|
|
int type;
|
|
|
|
int err = 0;
|
|
|
|
__be32 group;
|
2019-07-02 19:00:18 +07:00
|
|
|
u16 nsrcs;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
ih = igmpv3_report_hdr(skb);
|
|
|
|
num = ntohs(ih->ngrec);
|
2015-09-11 23:39:48 +07:00
|
|
|
len = skb_transport_offset(skb) + sizeof(*ih);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
len += sizeof(*grec);
|
2019-01-21 13:26:25 +07:00
|
|
|
if (!ip_mc_may_pull(skb, len))
|
2010-02-28 02:41:45 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-04-08 11:20:47 +07:00
|
|
|
grec = (void *)(skb->data + len - sizeof(*grec));
|
2010-02-28 02:41:45 +07:00
|
|
|
group = grec->grec_mca;
|
|
|
|
type = grec->grec_type;
|
2019-07-02 19:00:18 +07:00
|
|
|
nsrcs = ntohs(grec->grec_nsrcs);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2019-07-02 19:00:18 +07:00
|
|
|
len += nsrcs * 4;
|
2019-01-21 13:26:25 +07:00
|
|
|
if (!ip_mc_may_pull(skb, len))
|
2010-02-28 02:41:45 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* We treat this as an IGMPv2 report for now. */
|
|
|
|
switch (type) {
|
|
|
|
case IGMPV3_MODE_IS_INCLUDE:
|
|
|
|
case IGMPV3_MODE_IS_EXCLUDE:
|
|
|
|
case IGMPV3_CHANGE_TO_INCLUDE:
|
|
|
|
case IGMPV3_CHANGE_TO_EXCLUDE:
|
|
|
|
case IGMPV3_ALLOW_NEW_SOURCES:
|
|
|
|
case IGMPV3_BLOCK_OLD_SOURCES:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-01-22 03:01:32 +07:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2015-07-13 19:28:37 +07:00
|
|
|
if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
|
|
|
|
type == IGMPV3_MODE_IS_INCLUDE) &&
|
2019-07-02 19:00:18 +07:00
|
|
|
nsrcs == 0) {
|
2017-01-22 03:01:32 +07:00
|
|
|
br_ip4_multicast_leave_group(br, port, group, vid, src);
|
2015-07-13 19:28:37 +07:00
|
|
|
} else {
|
2017-01-22 03:01:32 +07:00
|
|
|
err = br_ip4_multicast_add_group(br, port, group, vid,
|
|
|
|
src);
|
2015-07-13 19:28:37 +07:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 23:54:22 +07:00
|
|
|
static int br_ip6_multicast_mld2_report(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-10-29 02:45:07 +07:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-04-22 23:54:22 +07:00
|
|
|
{
|
2019-01-21 13:26:25 +07:00
|
|
|
unsigned int nsrcs_offset;
|
2017-01-22 03:01:32 +07:00
|
|
|
const unsigned char *src;
|
2010-04-22 23:54:22 +07:00
|
|
|
struct icmp6hdr *icmp6h;
|
|
|
|
struct mld2_grec *grec;
|
2019-01-21 13:26:25 +07:00
|
|
|
unsigned int grec_len;
|
2010-04-22 23:54:22 +07:00
|
|
|
int i;
|
|
|
|
int len;
|
|
|
|
int num;
|
|
|
|
int err = 0;
|
|
|
|
|
2019-01-21 13:26:25 +07:00
|
|
|
if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
|
2010-04-22 23:54:22 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
icmp6h = icmp6_hdr(skb);
|
|
|
|
num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
|
2015-09-11 23:39:48 +07:00
|
|
|
len = skb_transport_offset(skb) + sizeof(*icmp6h);
|
2010-04-22 23:54:22 +07:00
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
2019-07-02 19:00:18 +07:00
|
|
|
__be16 *_nsrcs, __nsrcs;
|
|
|
|
u16 nsrcs;
|
2010-04-22 23:54:22 +07:00
|
|
|
|
2019-01-21 13:26:25 +07:00
|
|
|
nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
|
|
|
|
|
|
|
|
if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
|
|
|
|
nsrcs_offset + sizeof(_nsrcs))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-02 19:00:18 +07:00
|
|
|
_nsrcs = skb_header_pointer(skb, nsrcs_offset,
|
|
|
|
sizeof(__nsrcs), &__nsrcs);
|
|
|
|
if (!_nsrcs)
|
2010-04-22 23:54:22 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-02 19:00:18 +07:00
|
|
|
nsrcs = ntohs(*_nsrcs);
|
|
|
|
grec_len = struct_size(grec, grec_src, nsrcs);
|
2019-01-21 13:26:25 +07:00
|
|
|
|
|
|
|
if (!ipv6_mc_may_pull(skb, len + grec_len))
|
2010-04-22 23:54:22 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
grec = (struct mld2_grec *)(skb->data + len);
|
2019-01-21 13:26:25 +07:00
|
|
|
len += grec_len;
|
2010-04-22 23:54:22 +07:00
|
|
|
|
|
|
|
/* We treat these as MLDv1 reports for now. */
|
|
|
|
switch (grec->grec_type) {
|
|
|
|
case MLD2_MODE_IS_INCLUDE:
|
|
|
|
case MLD2_MODE_IS_EXCLUDE:
|
|
|
|
case MLD2_CHANGE_TO_INCLUDE:
|
|
|
|
case MLD2_CHANGE_TO_EXCLUDE:
|
|
|
|
case MLD2_ALLOW_NEW_SOURCES:
|
|
|
|
case MLD2_BLOCK_OLD_SOURCES:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-01-22 03:01:32 +07:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2015-07-13 19:28:37 +07:00
|
|
|
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
|
|
|
|
grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
|
2019-07-02 19:00:18 +07:00
|
|
|
nsrcs == 0) {
|
2015-07-13 19:28:37 +07:00
|
|
|
br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
|
2017-01-22 03:01:32 +07:00
|
|
|
vid, src);
|
2015-07-13 19:28:37 +07:00
|
|
|
} else {
|
|
|
|
err = br_ip6_multicast_add_group(br, port,
|
2017-01-22 03:01:32 +07:00
|
|
|
&grec->grec_mca, vid,
|
|
|
|
src);
|
2016-08-31 19:16:44 +07:00
|
|
|
if (err)
|
2015-07-13 19:28:37 +07:00
|
|
|
break;
|
|
|
|
}
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
static bool br_ip4_multicast_select_querier(struct net_bridge *br,
|
2014-06-07 23:26:29 +07:00
|
|
|
struct net_bridge_port *port,
|
2014-06-07 23:26:27 +07:00
|
|
|
__be32 saddr)
|
|
|
|
{
|
|
|
|
if (!timer_pending(&br->ip4_own_query.timer) &&
|
|
|
|
!timer_pending(&br->ip4_other_query.timer))
|
|
|
|
goto update;
|
|
|
|
|
|
|
|
if (!br->ip4_querier.addr.u.ip4)
|
|
|
|
goto update;
|
|
|
|
|
|
|
|
if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
|
|
|
|
goto update;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
update:
|
|
|
|
br->ip4_querier.addr.u.ip4 = saddr;
|
|
|
|
|
2014-06-07 23:26:29 +07:00
|
|
|
/* update protected by general multicast_lock by caller */
|
|
|
|
rcu_assign_pointer(br->ip4_querier.port, port);
|
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static bool br_ip6_multicast_select_querier(struct net_bridge *br,
|
2014-06-07 23:26:29 +07:00
|
|
|
struct net_bridge_port *port,
|
2014-06-07 23:26:27 +07:00
|
|
|
struct in6_addr *saddr)
|
|
|
|
{
|
|
|
|
if (!timer_pending(&br->ip6_own_query.timer) &&
|
|
|
|
!timer_pending(&br->ip6_other_query.timer))
|
|
|
|
goto update;
|
|
|
|
|
|
|
|
if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
|
|
|
|
goto update;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
update:
|
|
|
|
br->ip6_querier.addr.u.ip6 = *saddr;
|
|
|
|
|
2014-06-07 23:26:29 +07:00
|
|
|
/* update protected by general multicast_lock by caller */
|
|
|
|
rcu_assign_pointer(br->ip6_querier.port, port);
|
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static bool br_multicast_select_querier(struct net_bridge *br,
|
2014-06-07 23:26:29 +07:00
|
|
|
struct net_bridge_port *port,
|
2014-06-07 23:26:27 +07:00
|
|
|
struct br_ip *saddr)
|
|
|
|
{
|
|
|
|
switch (saddr->proto) {
|
|
|
|
case htons(ETH_P_IP):
|
2014-06-07 23:26:29 +07:00
|
|
|
return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
|
2014-06-07 23:26:27 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
case htons(ETH_P_IPV6):
|
2014-06-07 23:26:29 +07:00
|
|
|
return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
|
2014-06-07 23:26:27 +07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
static void
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_update_query_timer(struct net_bridge *br,
|
|
|
|
struct bridge_mcast_other_query *query,
|
|
|
|
unsigned long max_delay)
|
2013-08-01 06:06:20 +07:00
|
|
|
{
|
2014-06-07 23:26:26 +07:00
|
|
|
if (!timer_pending(&query->timer))
|
|
|
|
query->delay_time = jiffies + max_delay;
|
2013-08-01 06:06:20 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
|
2013-08-01 06:06:20 +07:00
|
|
|
}
|
|
|
|
|
2017-02-09 20:54:42 +07:00
|
|
|
static void br_port_mc_router_state_change(struct net_bridge_port *p,
|
|
|
|
bool is_mc_router)
|
|
|
|
{
|
|
|
|
struct switchdev_attr attr = {
|
|
|
|
.orig_dev = p->dev,
|
|
|
|
.id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
|
|
|
|
.flags = SWITCHDEV_F_DEFER,
|
|
|
|
.u.mrouter = is_mc_router,
|
|
|
|
};
|
|
|
|
|
|
|
|
switchdev_port_attr_set(p->dev, &attr);
|
|
|
|
}
|
|
|
|
|
2010-04-27 22:01:04 +07:00
|
|
|
/*
|
2013-06-21 14:37:25 +07:00
|
|
|
* Add port to router_list
|
2010-04-27 22:01:04 +07:00
|
|
|
* list is maintained ordered by pointer value
|
|
|
|
* and locked by br->multicast_lock and RCU
|
|
|
|
*/
|
2010-02-28 02:41:49 +07:00
|
|
|
static void br_multicast_add_router(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port)
|
|
|
|
{
|
2010-04-27 14:13:11 +07:00
|
|
|
struct net_bridge_port *p;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
struct hlist_node *slot = NULL;
|
2010-04-27 14:13:11 +07:00
|
|
|
|
2015-06-10 00:23:57 +07:00
|
|
|
if (!hlist_unhashed(&port->rlist))
|
|
|
|
return;
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry(p, &br->router_list, rlist) {
|
2010-04-27 22:01:04 +07:00
|
|
|
if ((unsigned long) port >= (unsigned long) p)
|
|
|
|
break;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
slot = &p->rlist;
|
2010-04-27 14:13:11 +07:00
|
|
|
}
|
|
|
|
|
2010-04-27 22:01:04 +07:00
|
|
|
if (slot)
|
2014-08-07 06:09:16 +07:00
|
|
|
hlist_add_behind_rcu(&port->rlist, slot);
|
2010-04-27 14:13:11 +07:00
|
|
|
else
|
|
|
|
hlist_add_head_rcu(&port->rlist, &br->router_list);
|
2015-07-23 19:00:53 +07:00
|
|
|
br_rtr_notify(br->dev, port, RTM_NEWMDB);
|
2017-02-09 20:54:42 +07:00
|
|
|
br_port_mc_router_state_change(port, true);
|
2010-02-28 02:41:49 +07:00
|
|
|
}
|
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
static void br_multicast_mark_router(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port)
|
|
|
|
{
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
|
|
|
|
if (!port) {
|
2017-10-09 16:15:31 +07:00
|
|
|
if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
|
|
|
|
if (!timer_pending(&br->multicast_router_timer))
|
|
|
|
br_mc_router_state_change(br, true);
|
2010-02-28 02:41:45 +07:00
|
|
|
mod_timer(&br->multicast_router_timer,
|
|
|
|
now + br->multicast_querier_interval);
|
2017-10-09 16:15:31 +07:00
|
|
|
}
|
2010-02-28 02:41:45 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-02-27 03:20:03 +07:00
|
|
|
if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
|
|
|
|
port->multicast_router == MDB_RTR_TYPE_PERM)
|
2010-02-28 02:41:45 +07:00
|
|
|
return;
|
|
|
|
|
2010-02-28 02:41:49 +07:00
|
|
|
br_multicast_add_router(br, port);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
mod_timer(&port->multicast_router_timer,
|
|
|
|
now + br->multicast_querier_interval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_multicast_query_received(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_other_query *query,
|
2014-06-07 23:26:27 +07:00
|
|
|
struct br_ip *saddr,
|
2013-08-01 06:06:20 +07:00
|
|
|
unsigned long max_delay)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2014-06-07 23:26:29 +07:00
|
|
|
if (!br_multicast_select_querier(br, port, saddr))
|
2010-02-28 02:41:45 +07:00
|
|
|
return;
|
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
br_multicast_update_query_timer(br, query, max_delay);
|
2019-02-22 20:22:32 +07:00
|
|
|
br_multicast_mark_router(br, port);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2018-08-06 10:07:23 +07:00
|
|
|
static void br_ip4_multicast_query(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2019-01-21 13:26:25 +07:00
|
|
|
unsigned int transport_len = ip_transport_len(skb);
|
2011-04-22 11:53:02 +07:00
|
|
|
const struct iphdr *iph = ip_hdr(skb);
|
2010-02-28 02:41:45 +07:00
|
|
|
struct igmphdr *ih = igmp_hdr(skb);
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct igmpv3_query *ih3;
|
|
|
|
struct net_bridge_port_group *p;
|
2010-11-15 13:38:10 +07:00
|
|
|
struct net_bridge_port_group __rcu **pp;
|
2014-06-07 23:26:27 +07:00
|
|
|
struct br_ip saddr;
|
2010-02-28 02:41:45 +07:00
|
|
|
unsigned long max_delay;
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
__be32 group;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (!netif_running(br->dev) ||
|
|
|
|
(port && port->state == BR_STATE_DISABLED))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
group = ih->group;
|
|
|
|
|
2019-01-21 13:26:25 +07:00
|
|
|
if (transport_len == sizeof(*ih)) {
|
2010-02-28 02:41:45 +07:00
|
|
|
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
|
|
|
|
|
|
|
|
if (!max_delay) {
|
|
|
|
max_delay = 10 * HZ;
|
|
|
|
group = 0;
|
|
|
|
}
|
2019-01-21 13:26:25 +07:00
|
|
|
} else if (transport_len >= sizeof(*ih3)) {
|
2010-02-28 02:41:45 +07:00
|
|
|
ih3 = igmpv3_query_hdr(skb);
|
|
|
|
if (ih3->nsrcs)
|
2010-03-14 03:27:21 +07:00
|
|
|
goto out;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2010-03-16 02:27:00 +07:00
|
|
|
max_delay = ih3->code ?
|
|
|
|
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
|
2015-05-02 19:01:07 +07:00
|
|
|
} else {
|
2014-03-11 04:25:24 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
if (!group) {
|
|
|
|
saddr.proto = htons(ETH_P_IP);
|
|
|
|
saddr.u.ip4 = iph->saddr;
|
2013-08-01 06:06:20 +07:00
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
br_multicast_query_received(br, port, &br->ip4_other_query,
|
|
|
|
&saddr, max_delay);
|
2010-02-28 02:41:45 +07:00
|
|
|
goto out;
|
2014-06-07 23:26:27 +07:00
|
|
|
}
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
mp = br_mdb_ip4_get(br, group, vid);
|
2010-02-28 02:41:45 +07:00
|
|
|
if (!mp)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
max_delay *= br->multicast_last_member_count;
|
|
|
|
|
2017-11-10 05:10:57 +07:00
|
|
|
if (mp->host_joined &&
|
2010-02-28 02:41:45 +07:00
|
|
|
(timer_pending(&mp->timer) ?
|
|
|
|
time_after(mp->timer.expires, now + max_delay) :
|
|
|
|
try_to_del_timer_sync(&mp->timer) >= 0))
|
|
|
|
mod_timer(&mp->timer, now + max_delay);
|
|
|
|
|
2010-11-15 13:38:10 +07:00
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, br)) != NULL;
|
|
|
|
pp = &p->next) {
|
2010-02-28 02:41:45 +07:00
|
|
|
if (timer_pending(&p->timer) ?
|
|
|
|
time_after(p->timer.expires, now + max_delay) :
|
|
|
|
try_to_del_timer_sync(&p->timer) >= 0)
|
2011-02-11 19:42:07 +07:00
|
|
|
mod_timer(&p->timer, now + max_delay);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 23:54:22 +07:00
|
|
|
static int br_ip6_multicast_query(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-10-29 02:45:07 +07:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-04-22 23:54:22 +07:00
|
|
|
{
|
2019-01-21 13:26:25 +07:00
|
|
|
unsigned int transport_len = ipv6_transport_len(skb);
|
2012-12-13 13:51:28 +07:00
|
|
|
struct mld_msg *mld;
|
2010-04-22 23:54:22 +07:00
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct mld2_query *mld2q;
|
2010-11-15 13:38:10 +07:00
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
struct net_bridge_port_group __rcu **pp;
|
2014-06-07 23:26:27 +07:00
|
|
|
struct br_ip saddr;
|
2010-04-22 23:54:22 +07:00
|
|
|
unsigned long max_delay;
|
|
|
|
unsigned long now = jiffies;
|
2016-05-04 22:25:02 +07:00
|
|
|
unsigned int offset = skb_transport_offset(skb);
|
2011-04-22 11:53:02 +07:00
|
|
|
const struct in6_addr *group = NULL;
|
2014-03-11 04:25:24 +07:00
|
|
|
bool is_general_query;
|
2010-04-22 23:54:22 +07:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (!netif_running(br->dev) ||
|
|
|
|
(port && port->state == BR_STATE_DISABLED))
|
|
|
|
goto out;
|
|
|
|
|
2019-01-21 13:26:25 +07:00
|
|
|
if (transport_len == sizeof(*mld)) {
|
2016-05-04 22:25:02 +07:00
|
|
|
if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
|
2010-04-22 23:54:22 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
mld = (struct mld_msg *) icmp6_hdr(skb);
|
2012-07-10 06:56:12 +07:00
|
|
|
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
|
2010-04-22 23:54:22 +07:00
|
|
|
if (max_delay)
|
|
|
|
group = &mld->mld_mca;
|
2013-08-06 05:32:05 +07:00
|
|
|
} else {
|
2016-05-04 22:25:02 +07:00
|
|
|
if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
|
2010-04-22 23:54:22 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
mld2q = (struct mld2_query *)icmp6_hdr(skb);
|
|
|
|
if (!mld2q->mld2q_nsrcs)
|
|
|
|
group = &mld2q->mld2q_mca;
|
2013-09-04 05:19:39 +07:00
|
|
|
|
|
|
|
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
|
2014-03-11 04:25:24 +07:00
|
|
|
is_general_query = group && ipv6_addr_any(group);
|
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
if (is_general_query) {
|
|
|
|
saddr.proto = htons(ETH_P_IPV6);
|
2019-07-02 19:00:19 +07:00
|
|
|
saddr.u.ip6 = ipv6_hdr(skb)->saddr;
|
2013-08-01 06:06:20 +07:00
|
|
|
|
2014-06-07 23:26:27 +07:00
|
|
|
br_multicast_query_received(br, port, &br->ip6_other_query,
|
|
|
|
&saddr, max_delay);
|
2010-04-22 23:54:22 +07:00
|
|
|
goto out;
|
2014-06-12 06:41:23 +07:00
|
|
|
} else if (!group) {
|
|
|
|
goto out;
|
2014-06-07 23:26:27 +07:00
|
|
|
}
|
2010-04-22 23:54:22 +07:00
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
mp = br_mdb_ip6_get(br, group, vid);
|
2010-04-22 23:54:22 +07:00
|
|
|
if (!mp)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
max_delay *= br->multicast_last_member_count;
|
2017-11-10 05:10:57 +07:00
|
|
|
if (mp->host_joined &&
|
2010-04-22 23:54:22 +07:00
|
|
|
(timer_pending(&mp->timer) ?
|
|
|
|
time_after(mp->timer.expires, now + max_delay) :
|
|
|
|
try_to_del_timer_sync(&mp->timer) >= 0))
|
|
|
|
mod_timer(&mp->timer, now + max_delay);
|
|
|
|
|
2010-11-15 13:38:10 +07:00
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, br)) != NULL;
|
|
|
|
pp = &p->next) {
|
2010-04-22 23:54:22 +07:00
|
|
|
if (timer_pending(&p->timer) ?
|
|
|
|
time_after(p->timer.expires, now + max_delay) :
|
|
|
|
try_to_del_timer_sync(&p->timer) >= 0)
|
2011-02-11 19:42:07 +07:00
|
|
|
mod_timer(&p->timer, now + max_delay);
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
static void
|
|
|
|
br_multicast_leave_group(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
struct br_ip *group,
|
|
|
|
struct bridge_mcast_other_query *other_query,
|
2017-01-22 03:01:32 +07:00
|
|
|
struct bridge_mcast_own_query *own_query,
|
|
|
|
const unsigned char *src)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
unsigned long now;
|
|
|
|
unsigned long time;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (!netif_running(br->dev) ||
|
2015-07-28 17:28:27 +07:00
|
|
|
(port && port->state == BR_STATE_DISABLED))
|
2010-02-28 02:41:45 +07:00
|
|
|
goto out;
|
|
|
|
|
2018-12-05 20:14:24 +07:00
|
|
|
mp = br_mdb_ip_get(br, group);
|
2010-02-28 02:41:45 +07:00
|
|
|
if (!mp)
|
|
|
|
goto out;
|
|
|
|
|
2015-07-28 17:28:27 +07:00
|
|
|
if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
|
|
|
|
struct net_bridge_port_group __rcu **pp;
|
|
|
|
|
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, br)) != NULL;
|
|
|
|
pp = &p->next) {
|
2017-01-22 03:01:32 +07:00
|
|
|
if (!br_port_group_equal(p, port, src))
|
2015-07-28 17:28:27 +07:00
|
|
|
continue;
|
|
|
|
|
2019-07-30 18:21:00 +07:00
|
|
|
if (p->flags & MDB_PG_FLAGS_PERMANENT)
|
|
|
|
break;
|
|
|
|
|
2015-07-28 17:28:27 +07:00
|
|
|
rcu_assign_pointer(*pp, p->next);
|
|
|
|
hlist_del_init(&p->mglist);
|
|
|
|
del_timer(&p->timer);
|
2018-12-05 20:14:25 +07:00
|
|
|
kfree_rcu(p, rcu);
|
2016-04-21 17:52:45 +07:00
|
|
|
br_mdb_notify(br->dev, port, group, RTM_DELMDB,
|
2019-07-30 19:20:41 +07:00
|
|
|
p->flags | MDB_PG_FLAGS_FAST_LEAVE);
|
2015-07-28 17:28:27 +07:00
|
|
|
|
2017-11-10 05:10:57 +07:00
|
|
|
if (!mp->ports && !mp->host_joined &&
|
2015-07-28 17:28:27 +07:00
|
|
|
netif_running(br->dev))
|
|
|
|
mod_timer(&mp->timer, jiffies);
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timer_pending(&other_query->timer))
|
|
|
|
goto out;
|
|
|
|
|
2018-09-26 21:01:04 +07:00
|
|
|
if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
|
2013-05-22 04:52:56 +07:00
|
|
|
__br_multicast_send_query(br, port, &mp->addr);
|
|
|
|
|
|
|
|
time = jiffies + br->multicast_last_member_count *
|
|
|
|
br->multicast_last_member_interval;
|
2013-08-30 22:28:17 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
mod_timer(&own_query->timer, time);
|
2013-05-22 04:52:56 +07:00
|
|
|
|
|
|
|
for (p = mlock_dereference(mp->ports, br);
|
|
|
|
p != NULL;
|
|
|
|
p = mlock_dereference(p->next, br)) {
|
2017-01-22 03:01:32 +07:00
|
|
|
if (!br_port_group_equal(p, port, src))
|
2013-05-22 04:52:56 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!hlist_unhashed(&p->mglist) &&
|
|
|
|
(timer_pending(&p->timer) ?
|
|
|
|
time_after(p->timer.expires, time) :
|
|
|
|
try_to_del_timer_sync(&p->timer) >= 0)) {
|
|
|
|
mod_timer(&p->timer, time);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
now = jiffies;
|
|
|
|
time = now + br->multicast_last_member_count *
|
|
|
|
br->multicast_last_member_interval;
|
|
|
|
|
|
|
|
if (!port) {
|
2017-11-10 05:10:57 +07:00
|
|
|
if (mp->host_joined &&
|
2010-02-28 02:41:45 +07:00
|
|
|
(timer_pending(&mp->timer) ?
|
|
|
|
time_after(mp->timer.expires, time) :
|
|
|
|
try_to_del_timer_sync(&mp->timer) >= 0)) {
|
|
|
|
mod_timer(&mp->timer, time);
|
|
|
|
}
|
2013-10-20 05:58:57 +07:00
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (p = mlock_dereference(mp->ports, br);
|
|
|
|
p != NULL;
|
|
|
|
p = mlock_dereference(p->next, br)) {
|
|
|
|
if (p->port != port)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!hlist_unhashed(&p->mglist) &&
|
|
|
|
(timer_pending(&p->timer) ?
|
|
|
|
time_after(p->timer.expires, time) :
|
|
|
|
try_to_del_timer_sync(&p->timer) >= 0)) {
|
|
|
|
mod_timer(&p->timer, time);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2010-04-18 10:42:07 +07:00
|
|
|
static void br_ip4_multicast_leave_group(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-02-13 19:00:17 +07:00
|
|
|
__be32 group,
|
2017-01-22 03:01:32 +07:00
|
|
|
__u16 vid,
|
|
|
|
const unsigned char *src)
|
2010-04-18 10:42:07 +07:00
|
|
|
{
|
|
|
|
struct br_ip br_group;
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_own_query *own_query;
|
2010-04-18 10:42:07 +07:00
|
|
|
|
|
|
|
if (ipv4_is_local_multicast(group))
|
|
|
|
return;
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
|
|
|
|
|
2019-04-04 03:27:24 +07:00
|
|
|
memset(&br_group, 0, sizeof(br_group));
|
2010-04-18 10:42:07 +07:00
|
|
|
br_group.u.ip4 = group;
|
|
|
|
br_group.proto = htons(ETH_P_IP);
|
2013-02-13 19:00:17 +07:00
|
|
|
br_group.vid = vid;
|
2010-04-18 10:42:07 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
|
2017-01-22 03:01:32 +07:00
|
|
|
own_query, src);
|
2010-04-18 10:42:07 +07:00
|
|
|
}
|
|
|
|
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 23:54:22 +07:00
|
|
|
static void br_ip6_multicast_leave_group(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-02-13 19:00:17 +07:00
|
|
|
const struct in6_addr *group,
|
2017-01-22 03:01:32 +07:00
|
|
|
__u16 vid,
|
|
|
|
const unsigned char *src)
|
2010-04-22 23:54:22 +07:00
|
|
|
{
|
|
|
|
struct br_ip br_group;
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_own_query *own_query;
|
2010-04-22 23:54:22 +07:00
|
|
|
|
2013-09-04 07:13:39 +07:00
|
|
|
if (ipv6_addr_is_ll_all_nodes(group))
|
2010-04-22 23:54:22 +07:00
|
|
|
return;
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
|
|
|
|
|
2019-04-04 03:27:24 +07:00
|
|
|
memset(&br_group, 0, sizeof(br_group));
|
2011-11-21 10:39:03 +07:00
|
|
|
br_group.u.ip6 = *group;
|
2010-04-22 23:54:22 +07:00
|
|
|
br_group.proto = htons(ETH_P_IPV6);
|
2013-02-13 19:00:17 +07:00
|
|
|
br_group.vid = vid;
|
2010-04-22 23:54:22 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
|
2017-01-22 03:01:32 +07:00
|
|
|
own_query, src);
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
#endif
|
2010-04-18 10:42:07 +07:00
|
|
|
|
2016-06-28 21:57:06 +07:00
|
|
|
static void br_multicast_err_count(const struct net_bridge *br,
|
|
|
|
const struct net_bridge_port *p,
|
|
|
|
__be16 proto)
|
|
|
|
{
|
|
|
|
struct bridge_mcast_stats __percpu *stats;
|
|
|
|
struct bridge_mcast_stats *pstats;
|
|
|
|
|
2018-09-26 21:01:04 +07:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
|
2016-06-28 21:57:06 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (p)
|
|
|
|
stats = p->mcast_stats;
|
|
|
|
else
|
|
|
|
stats = br->mcast_stats;
|
|
|
|
if (WARN_ON(!stats))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pstats = this_cpu_ptr(stats);
|
|
|
|
|
|
|
|
u64_stats_update_begin(&pstats->syncp);
|
|
|
|
switch (proto) {
|
|
|
|
case htons(ETH_P_IP):
|
|
|
|
pstats->mstats.igmp_parse_errors++;
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
pstats->mstats.mld_parse_errors++;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
u64_stats_update_end(&pstats->syncp);
|
|
|
|
}
|
|
|
|
|
2016-10-31 19:21:05 +07:00
|
|
|
static void br_multicast_pim(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
unsigned int offset = skb_transport_offset(skb);
|
|
|
|
struct pimhdr *pimhdr, _pimhdr;
|
|
|
|
|
|
|
|
pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
|
|
|
|
if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
|
|
|
|
pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
|
|
|
|
return;
|
|
|
|
|
|
|
|
br_multicast_mark_router(br, port);
|
|
|
|
}
|
|
|
|
|
2019-01-21 13:26:28 +07:00
|
|
|
static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
|
|
|
|
igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
|
|
|
|
return -ENOMSG;
|
|
|
|
|
|
|
|
br_multicast_mark_router(br, port);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
static int br_multicast_ipv4_rcv(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-10-29 02:45:07 +07:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2017-01-22 03:01:32 +07:00
|
|
|
const unsigned char *src;
|
2010-02-28 02:41:45 +07:00
|
|
|
struct igmphdr *ih;
|
|
|
|
int err;
|
|
|
|
|
2019-01-21 13:26:25 +07:00
|
|
|
err = ip_mc_check_igmp(skb);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2015-05-02 19:01:07 +07:00
|
|
|
if (err == -ENOMSG) {
|
2016-10-31 19:21:05 +07:00
|
|
|
if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
|
2011-06-23 09:39:12 +07:00
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
|
2016-10-31 19:21:05 +07:00
|
|
|
} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
|
|
|
|
if (ip_hdr(skb)->protocol == IPPROTO_PIM)
|
|
|
|
br_multicast_pim(br, port, skb);
|
2019-01-21 13:26:28 +07:00
|
|
|
} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
|
2019-02-19 09:17:09 +07:00
|
|
|
br_ip4_multicast_mrd_rcv(br, port, skb);
|
2016-10-31 19:21:05 +07:00
|
|
|
}
|
2019-01-21 13:26:28 +07:00
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
return 0;
|
2015-05-02 19:01:07 +07:00
|
|
|
} else if (err < 0) {
|
2016-06-28 21:57:06 +07:00
|
|
|
br_multicast_err_count(br, port, skb->protocol);
|
2015-05-02 19:01:07 +07:00
|
|
|
return err;
|
2011-06-23 09:39:12 +07:00
|
|
|
}
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2015-05-02 19:01:07 +07:00
|
|
|
ih = igmp_hdr(skb);
|
2017-01-22 03:01:32 +07:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2016-06-28 21:57:06 +07:00
|
|
|
BR_INPUT_SKB_CB(skb)->igmp = ih->type;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
switch (ih->type) {
|
|
|
|
case IGMP_HOST_MEMBERSHIP_REPORT:
|
|
|
|
case IGMPV2_HOST_MEMBERSHIP_REPORT:
|
2011-06-13 22:04:43 +07:00
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
|
2017-01-22 03:01:32 +07:00
|
|
|
err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
|
2010-02-28 02:41:45 +07:00
|
|
|
break;
|
|
|
|
case IGMPV3_HOST_MEMBERSHIP_REPORT:
|
2019-01-21 13:26:25 +07:00
|
|
|
err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
|
2010-02-28 02:41:45 +07:00
|
|
|
break;
|
|
|
|
case IGMP_HOST_MEMBERSHIP_QUERY:
|
2019-01-21 13:26:25 +07:00
|
|
|
br_ip4_multicast_query(br, port, skb, vid);
|
2010-02-28 02:41:45 +07:00
|
|
|
break;
|
|
|
|
case IGMP_HOST_LEAVE_MESSAGE:
|
2017-01-22 03:01:32 +07:00
|
|
|
br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
|
2010-02-28 02:41:45 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-07-07 02:12:21 +07:00
|
|
|
br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
|
2016-06-28 21:57:06 +07:00
|
|
|
BR_MCAST_DIR_RX);
|
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2019-01-21 13:26:28 +07:00
|
|
|
static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
|
|
|
|
return -ENOMSG;
|
|
|
|
|
|
|
|
ret = ipv6_mc_check_icmpv6(skb);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
|
|
|
|
return -ENOMSG;
|
|
|
|
|
|
|
|
br_multicast_mark_router(br, port);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-22 23:54:22 +07:00
|
|
|
static int br_multicast_ipv6_rcv(struct net_bridge *br,
|
|
|
|
struct net_bridge_port *port,
|
2013-10-29 02:45:07 +07:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-04-22 23:54:22 +07:00
|
|
|
{
|
2017-01-22 03:01:32 +07:00
|
|
|
const unsigned char *src;
|
2015-05-02 19:01:07 +07:00
|
|
|
struct mld_msg *mld;
|
2010-04-22 23:54:22 +07:00
|
|
|
int err;
|
|
|
|
|
2019-01-21 13:26:25 +07:00
|
|
|
err = ipv6_mc_check_mld(skb);
|
2010-04-22 23:54:22 +07:00
|
|
|
|
2015-05-02 19:01:07 +07:00
|
|
|
if (err == -ENOMSG) {
|
|
|
|
if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
|
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
|
2019-01-21 13:26:28 +07:00
|
|
|
|
|
|
|
if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
|
|
|
|
err = br_ip6_multicast_mrd_rcv(br, port, skb);
|
|
|
|
|
|
|
|
if (err < 0 && err != -ENOMSG) {
|
|
|
|
br_multicast_err_count(br, port, skb->protocol);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-22 23:54:22 +07:00
|
|
|
return 0;
|
2015-05-02 19:01:07 +07:00
|
|
|
} else if (err < 0) {
|
2016-06-28 21:57:06 +07:00
|
|
|
br_multicast_err_count(br, port, skb->protocol);
|
2015-05-02 19:01:07 +07:00
|
|
|
return err;
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
|
2015-05-02 19:01:07 +07:00
|
|
|
mld = (struct mld_msg *)skb_transport_header(skb);
|
2016-06-28 21:57:06 +07:00
|
|
|
BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
|
2010-04-22 23:54:22 +07:00
|
|
|
|
2015-05-02 19:01:07 +07:00
|
|
|
switch (mld->mld_type) {
|
2010-04-22 23:54:22 +07:00
|
|
|
case ICMPV6_MGM_REPORT:
|
2017-01-22 03:01:32 +07:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2011-06-13 22:06:58 +07:00
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
|
2017-01-22 03:01:32 +07:00
|
|
|
err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
|
|
|
|
src);
|
2010-04-22 23:54:22 +07:00
|
|
|
break;
|
|
|
|
case ICMPV6_MLD2_REPORT:
|
2019-01-21 13:26:25 +07:00
|
|
|
err = br_ip6_multicast_mld2_report(br, port, skb, vid);
|
2010-04-22 23:54:22 +07:00
|
|
|
break;
|
|
|
|
case ICMPV6_MGM_QUERY:
|
2019-01-21 13:26:25 +07:00
|
|
|
err = br_ip6_multicast_query(br, port, skb, vid);
|
2010-04-22 23:54:22 +07:00
|
|
|
break;
|
|
|
|
case ICMPV6_MGM_REDUCTION:
|
2017-01-22 03:01:32 +07:00
|
|
|
src = eth_hdr(skb)->h_source;
|
|
|
|
br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
|
2015-05-02 19:01:07 +07:00
|
|
|
break;
|
2010-04-22 23:54:22 +07:00
|
|
|
}
|
|
|
|
|
2016-07-07 02:12:21 +07:00
|
|
|
br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
|
2016-06-28 21:57:06 +07:00
|
|
|
BR_MCAST_DIR_RX);
|
|
|
|
|
2010-04-22 23:54:22 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
|
2013-10-29 02:45:07 +07:00
|
|
|
struct sk_buff *skb, u16 vid)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2016-06-28 21:57:06 +07:00
|
|
|
int ret = 0;
|
|
|
|
|
2010-04-25 15:06:40 +07:00
|
|
|
BR_INPUT_SKB_CB(skb)->igmp = 0;
|
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
|
|
|
|
|
2018-09-26 21:01:03 +07:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
2010-02-28 02:41:45 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (skb->protocol) {
|
|
|
|
case htons(ETH_P_IP):
|
2016-06-28 21:57:06 +07:00
|
|
|
ret = br_multicast_ipv4_rcv(br, port, skb, vid);
|
|
|
|
break;
|
2011-12-10 16:48:31 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 23:54:22 +07:00
|
|
|
case htons(ETH_P_IPV6):
|
2016-06-28 21:57:06 +07:00
|
|
|
ret = br_multicast_ipv6_rcv(br, port, skb, vid);
|
|
|
|
break;
|
2010-04-22 23:54:22 +07:00
|
|
|
#endif
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2016-06-28 21:57:06 +07:00
|
|
|
return ret;
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
static void br_multicast_query_expired(struct net_bridge *br,
|
2014-06-07 23:26:29 +07:00
|
|
|
struct bridge_mcast_own_query *query,
|
|
|
|
struct bridge_mcast_querier *querier)
|
2013-08-30 22:28:17 +07:00
|
|
|
{
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (query->startup_sent < br->multicast_startup_query_count)
|
|
|
|
query->startup_sent++;
|
|
|
|
|
2015-05-28 18:42:54 +07:00
|
|
|
RCU_INIT_POINTER(querier->port, NULL);
|
2013-08-30 22:28:17 +07:00
|
|
|
br_multicast_send_query(br, NULL, query);
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_ip4_multicast_query_expired(struct timer_list *t)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2014-06-07 23:26:29 +07:00
|
|
|
br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
|
2013-08-30 22:28:17 +07:00
|
|
|
}
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2017-11-03 13:21:10 +07:00
|
|
|
static void br_ip6_multicast_query_expired(struct timer_list *t)
|
2013-08-30 22:28:17 +07:00
|
|
|
{
|
2017-11-03 13:21:10 +07:00
|
|
|
struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2014-06-07 23:26:29 +07:00
|
|
|
br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
void br_multicast_init(struct net_bridge *br)
|
|
|
|
{
|
2018-12-05 20:14:27 +07:00
|
|
|
br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2016-02-27 03:20:01 +07:00
|
|
|
br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
|
2010-02-28 02:41:45 +07:00
|
|
|
br->multicast_last_member_count = 2;
|
|
|
|
br->multicast_startup_query_count = 2;
|
|
|
|
|
|
|
|
br->multicast_last_member_interval = HZ;
|
|
|
|
br->multicast_query_response_interval = 10 * HZ;
|
|
|
|
br->multicast_startup_query_interval = 125 * HZ / 4;
|
|
|
|
br->multicast_query_interval = 125 * HZ;
|
|
|
|
br->multicast_querier_interval = 255 * HZ;
|
|
|
|
br->multicast_membership_interval = 260 * HZ;
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br->ip4_other_query.delay_time = 0;
|
2014-06-07 23:26:29 +07:00
|
|
|
br->ip4_querier.port = NULL;
|
2016-11-21 19:03:25 +07:00
|
|
|
br->multicast_igmp_version = 2;
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2016-11-21 19:03:25 +07:00
|
|
|
br->multicast_mld_version = 1;
|
2014-06-07 23:26:26 +07:00
|
|
|
br->ip6_other_query.delay_time = 0;
|
2014-06-07 23:26:29 +07:00
|
|
|
br->ip6_querier.port = NULL;
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2018-10-01 15:57:01 +07:00
|
|
|
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
|
2018-09-26 21:01:04 +07:00
|
|
|
br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
|
2013-08-01 06:06:20 +07:00
|
|
|
|
2010-02-28 02:41:45 +07:00
|
|
|
spin_lock_init(&br->multicast_lock);
|
2017-11-03 13:21:10 +07:00
|
|
|
timer_setup(&br->multicast_router_timer,
|
|
|
|
br_multicast_local_router_expired, 0);
|
|
|
|
timer_setup(&br->ip4_other_query.timer,
|
|
|
|
br_ip4_multicast_querier_expired, 0);
|
|
|
|
timer_setup(&br->ip4_own_query.timer,
|
|
|
|
br_ip4_multicast_query_expired, 0);
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2017-11-03 13:21:10 +07:00
|
|
|
timer_setup(&br->ip6_other_query.timer,
|
|
|
|
br_ip6_multicast_querier_expired, 0);
|
|
|
|
timer_setup(&br->ip6_own_query.timer,
|
|
|
|
br_ip6_multicast_query_expired, 0);
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2018-12-05 20:14:24 +07:00
|
|
|
INIT_HLIST_HEAD(&br->mdb_list);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
2019-01-21 13:26:27 +07:00
|
|
|
static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct in_device *in_dev = in_dev_get(br->dev);
|
|
|
|
|
|
|
|
if (!in_dev)
|
|
|
|
return;
|
|
|
|
|
2019-02-02 11:20:52 +07:00
|
|
|
__ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
|
2019-01-21 13:26:27 +07:00
|
|
|
in_dev_put(in_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct in6_addr addr;
|
|
|
|
|
|
|
|
ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
|
|
|
|
ipv6_dev_mc_inc(br->dev, &addr);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void br_multicast_join_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
br_ip4_multicast_join_snoopers(br);
|
|
|
|
br_ip6_multicast_join_snoopers(br);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct in_device *in_dev = in_dev_get(br->dev);
|
|
|
|
|
|
|
|
if (WARN_ON(!in_dev))
|
|
|
|
return;
|
|
|
|
|
2019-02-02 11:20:52 +07:00
|
|
|
__ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
|
2019-01-21 13:26:27 +07:00
|
|
|
in_dev_put(in_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct in6_addr addr;
|
|
|
|
|
|
|
|
ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
|
|
|
|
ipv6_dev_mc_dec(br->dev, &addr);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void br_multicast_leave_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
br_ip4_multicast_leave_snoopers(br);
|
|
|
|
br_ip6_multicast_leave_snoopers(br);
|
|
|
|
}
|
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
static void __br_multicast_open(struct net_bridge *br,
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_own_query *query)
|
2010-02-28 02:41:45 +07:00
|
|
|
{
|
2013-08-30 22:28:17 +07:00
|
|
|
query->startup_sent = 0;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
2018-09-26 21:01:03 +07:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
2010-02-28 02:41:45 +07:00
|
|
|
return;
|
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
mod_timer(&query->timer, jiffies);
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_open(struct net_bridge *br)
|
|
|
|
{
|
2019-01-21 13:26:27 +07:00
|
|
|
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
|
|
|
br_multicast_join_snoopers(br);
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
__br_multicast_open(br, &br->ip4_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2014-06-07 23:26:26 +07:00
|
|
|
__br_multicast_open(br, &br->ip6_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_stop(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
del_timer_sync(&br->multicast_router_timer);
|
2014-06-07 23:26:26 +07:00
|
|
|
del_timer_sync(&br->ip4_other_query.timer);
|
|
|
|
del_timer_sync(&br->ip4_own_query.timer);
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2014-06-07 23:26:26 +07:00
|
|
|
del_timer_sync(&br->ip6_other_query.timer);
|
|
|
|
del_timer_sync(&br->ip6_own_query.timer);
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2019-01-21 13:26:27 +07:00
|
|
|
|
|
|
|
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
|
|
|
br_multicast_leave_snoopers(br);
|
2015-07-15 21:16:51 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_dev_del(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
2018-12-05 20:14:24 +07:00
|
|
|
struct hlist_node *tmp;
|
2010-02-28 02:41:45 +07:00
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2018-12-05 20:14:24 +07:00
|
|
|
hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
|
|
|
|
del_timer(&mp->timer);
|
|
|
|
rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
|
|
|
|
br_mdb_rht_params);
|
|
|
|
hlist_del_rcu(&mp->mdb_node);
|
2018-12-05 20:14:25 +07:00
|
|
|
kfree_rcu(mp, rcu);
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
2018-12-05 20:14:24 +07:00
|
|
|
|
2018-12-05 20:14:25 +07:00
|
|
|
rcu_barrier();
|
2010-02-28 02:41:45 +07:00
|
|
|
}
|
2010-02-28 02:41:49 +07:00
|
|
|
|
|
|
|
int br_multicast_set_router(struct net_bridge *br, unsigned long val)
|
|
|
|
{
|
2015-05-23 08:12:34 +07:00
|
|
|
int err = -EINVAL;
|
2010-02-28 02:41:49 +07:00
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
|
|
|
|
switch (val) {
|
2016-02-27 03:20:01 +07:00
|
|
|
case MDB_RTR_TYPE_DISABLED:
|
|
|
|
case MDB_RTR_TYPE_PERM:
|
2017-10-09 16:15:31 +07:00
|
|
|
br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
|
2010-02-28 02:41:49 +07:00
|
|
|
del_timer(&br->multicast_router_timer);
|
2017-10-09 16:15:31 +07:00
|
|
|
br->multicast_router = val;
|
|
|
|
err = 0;
|
|
|
|
break;
|
2016-02-27 03:20:01 +07:00
|
|
|
case MDB_RTR_TYPE_TEMP_QUERY:
|
2017-10-09 16:15:31 +07:00
|
|
|
if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
|
|
|
|
br_mc_router_state_change(br, false);
|
2010-02-28 02:41:49 +07:00
|
|
|
br->multicast_router = val;
|
|
|
|
err = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-02-27 03:20:01 +07:00
|
|
|
static void __del_port_router(struct net_bridge_port *p)
|
|
|
|
{
|
|
|
|
if (hlist_unhashed(&p->rlist))
|
|
|
|
return;
|
|
|
|
hlist_del_init_rcu(&p->rlist);
|
|
|
|
br_rtr_notify(p->br->dev, p, RTM_DELMDB);
|
2017-02-09 20:54:42 +07:00
|
|
|
br_port_mc_router_state_change(p, false);
|
2017-02-09 20:54:41 +07:00
|
|
|
|
|
|
|
/* don't allow timer refresh */
|
|
|
|
if (p->multicast_router == MDB_RTR_TYPE_TEMP)
|
|
|
|
p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
|
2016-02-27 03:20:01 +07:00
|
|
|
}
|
|
|
|
|
2010-02-28 02:41:49 +07:00
|
|
|
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = p->br;
|
2016-02-27 03:20:03 +07:00
|
|
|
unsigned long now = jiffies;
|
2015-05-23 08:12:34 +07:00
|
|
|
int err = -EINVAL;
|
2010-02-28 02:41:49 +07:00
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
2016-02-27 03:20:02 +07:00
|
|
|
if (p->multicast_router == val) {
|
2016-02-27 03:20:03 +07:00
|
|
|
/* Refresh the temp router port timer */
|
|
|
|
if (p->multicast_router == MDB_RTR_TYPE_TEMP)
|
|
|
|
mod_timer(&p->multicast_router_timer,
|
|
|
|
now + br->multicast_querier_interval);
|
2016-02-27 03:20:02 +07:00
|
|
|
err = 0;
|
|
|
|
goto unlock;
|
|
|
|
}
|
2010-02-28 02:41:49 +07:00
|
|
|
switch (val) {
|
2016-02-27 03:20:01 +07:00
|
|
|
case MDB_RTR_TYPE_DISABLED:
|
|
|
|
p->multicast_router = MDB_RTR_TYPE_DISABLED;
|
|
|
|
__del_port_router(p);
|
|
|
|
del_timer(&p->multicast_router_timer);
|
|
|
|
break;
|
|
|
|
case MDB_RTR_TYPE_TEMP_QUERY:
|
|
|
|
p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
|
|
|
|
__del_port_router(p);
|
|
|
|
break;
|
|
|
|
case MDB_RTR_TYPE_PERM:
|
|
|
|
p->multicast_router = MDB_RTR_TYPE_PERM;
|
2010-02-28 02:41:49 +07:00
|
|
|
del_timer(&p->multicast_router_timer);
|
|
|
|
br_multicast_add_router(br, p);
|
|
|
|
break;
|
2016-02-27 03:20:03 +07:00
|
|
|
case MDB_RTR_TYPE_TEMP:
|
|
|
|
p->multicast_router = MDB_RTR_TYPE_TEMP;
|
|
|
|
br_multicast_mark_router(br, p);
|
|
|
|
break;
|
2016-02-27 03:20:01 +07:00
|
|
|
default:
|
|
|
|
goto unlock;
|
2010-02-28 02:41:49 +07:00
|
|
|
}
|
2016-02-27 03:20:01 +07:00
|
|
|
err = 0;
|
|
|
|
unlock:
|
2010-02-28 02:41:49 +07:00
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2010-02-28 02:41:50 +07:00
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
static void br_multicast_start_querier(struct net_bridge *br,
|
2014-06-07 23:26:26 +07:00
|
|
|
struct bridge_mcast_own_query *query)
|
2010-02-28 02:41:50 +07:00
|
|
|
{
|
|
|
|
struct net_bridge_port *port;
|
2012-04-13 09:37:42 +07:00
|
|
|
|
2013-08-30 22:28:17 +07:00
|
|
|
__br_multicast_open(br, query);
|
2012-04-13 09:37:42 +07:00
|
|
|
|
2019-04-11 19:08:25 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(port, &br->port_list, list) {
|
2012-04-13 09:37:42 +07:00
|
|
|
if (port->state == BR_STATE_DISABLED ||
|
|
|
|
port->state == BR_STATE_BLOCKING)
|
|
|
|
continue;
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
if (query == &br->ip4_own_query)
|
|
|
|
br_multicast_enable(&port->ip4_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_enable(&port->ip6_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2012-04-13 09:37:42 +07:00
|
|
|
}
|
2019-04-11 19:08:25 +07:00
|
|
|
rcu_read_unlock();
|
2012-04-13 09:37:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
|
|
|
|
{
|
2016-10-18 23:09:48 +07:00
|
|
|
struct net_bridge_port *port;
|
2010-02-28 02:41:50 +07:00
|
|
|
|
2011-11-10 12:48:03 +07:00
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2018-09-26 21:01:03 +07:00
|
|
|
if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
|
2010-02-28 02:41:50 +07:00
|
|
|
goto unlock;
|
|
|
|
|
2018-09-26 21:01:03 +07:00
|
|
|
br_mc_disabled_update(br->dev, val);
|
|
|
|
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
|
2019-01-21 13:26:27 +07:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
|
|
|
|
br_multicast_leave_snoopers(br);
|
2010-02-28 02:41:50 +07:00
|
|
|
goto unlock;
|
2019-01-21 13:26:27 +07:00
|
|
|
}
|
2010-02-28 02:41:50 +07:00
|
|
|
|
2010-07-29 07:45:30 +07:00
|
|
|
if (!netif_running(br->dev))
|
|
|
|
goto unlock;
|
|
|
|
|
2016-10-18 23:09:48 +07:00
|
|
|
br_multicast_open(br);
|
|
|
|
list_for_each_entry(port, &br->port_list, list)
|
|
|
|
__br_multicast_enable_port(port);
|
2010-02-28 02:41:50 +07:00
|
|
|
|
|
|
|
unlock:
|
2011-11-10 12:48:03 +07:00
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
2010-02-28 02:41:50 +07:00
|
|
|
|
2018-12-17 16:46:23 +07:00
|
|
|
return 0;
|
2010-02-28 02:41:50 +07:00
|
|
|
}
|
2010-02-28 02:41:51 +07:00
|
|
|
|
2017-05-26 13:37:24 +07:00
|
|
|
bool br_multicast_enabled(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = netdev_priv(dev);
|
|
|
|
|
2018-09-26 21:01:03 +07:00
|
|
|
return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
|
2017-05-26 13:37:24 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_enabled);
|
|
|
|
|
2017-10-09 16:15:32 +07:00
|
|
|
bool br_multicast_router(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = netdev_priv(dev);
|
|
|
|
bool is_router;
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
is_router = br_multicast_is_router(br);
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
return is_router;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_router);
|
|
|
|
|
2012-04-13 09:37:42 +07:00
|
|
|
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
|
|
|
|
{
|
2013-08-01 06:06:20 +07:00
|
|
|
unsigned long max_delay;
|
|
|
|
|
2012-04-13 09:37:42 +07:00
|
|
|
val = !!val;
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2018-09-26 21:01:04 +07:00
|
|
|
if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
|
2012-04-13 09:37:42 +07:00
|
|
|
goto unlock;
|
|
|
|
|
2018-09-26 21:01:04 +07:00
|
|
|
br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
|
2013-08-01 06:06:20 +07:00
|
|
|
if (!val)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
max_delay = br->multicast_query_response_interval;
|
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
if (!timer_pending(&br->ip4_other_query.timer))
|
|
|
|
br->ip4_other_query.delay_time = jiffies + max_delay;
|
2013-08-30 22:28:17 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_start_querier(br, &br->ip4_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2014-06-07 23:26:26 +07:00
|
|
|
if (!timer_pending(&br->ip6_other_query.timer))
|
|
|
|
br->ip6_other_query.delay_time = jiffies + max_delay;
|
2013-08-30 22:28:17 +07:00
|
|
|
|
2014-06-07 23:26:26 +07:00
|
|
|
br_multicast_start_querier(br, &br->ip6_own_query);
|
2013-08-30 22:28:17 +07:00
|
|
|
#endif
|
2012-04-13 09:37:42 +07:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-21 19:03:24 +07:00
|
|
|
int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
|
|
|
|
{
|
|
|
|
/* Currently we support only version 2 and 3 */
|
|
|
|
switch (val) {
|
|
|
|
case 2:
|
|
|
|
case 3:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
br->multicast_igmp_version = val;
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-21 19:03:25 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
|
|
|
|
{
|
|
|
|
/* Currently we support version 1 and 2 */
|
|
|
|
switch (val) {
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
br->multicast_mld_version = val;
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-07 23:26:28 +07:00
|
|
|
/**
|
|
|
|
* br_multicast_list_adjacent - Returns snooped multicast addresses
|
|
|
|
* @dev: The bridge port adjacent to which to retrieve addresses
|
|
|
|
* @br_ip_list: The list to store found, snooped multicast IP addresses in
|
|
|
|
*
|
|
|
|
* Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
|
|
|
|
* snooping feature on all bridge ports of dev's bridge device, excluding
|
|
|
|
* the addresses from dev itself.
|
|
|
|
*
|
|
|
|
* Returns the number of items added to br_ip_list.
|
|
|
|
*
|
|
|
|
* Notes:
|
|
|
|
* - br_ip_list needs to be initialized by caller
|
|
|
|
* - br_ip_list might contain duplicates in the end
|
|
|
|
* (needs to be taken care of by caller)
|
|
|
|
* - br_ip_list needs to be freed by caller
|
|
|
|
*/
|
|
|
|
int br_multicast_list_adjacent(struct net_device *dev,
|
|
|
|
struct list_head *br_ip_list)
|
|
|
|
{
|
|
|
|
struct net_bridge *br;
|
|
|
|
struct net_bridge_port *port;
|
|
|
|
struct net_bridge_port_group *group;
|
|
|
|
struct br_ip_list *entry;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2019-03-29 20:38:19 +07:00
|
|
|
if (!br_ip_list || !netif_is_bridge_port(dev))
|
2014-06-07 23:26:28 +07:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
port = br_port_get_rcu(dev);
|
|
|
|
if (!port || !port->br)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
br = port->br;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(port, &br->port_list, list) {
|
|
|
|
if (!port->dev || port->dev == dev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
|
|
|
|
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
|
|
|
if (!entry)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
entry->addr = group->addr;
|
|
|
|
list_add(&entry->list, br_ip_list);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
|
2014-06-07 23:26:29 +07:00
|
|
|
|
2014-07-07 10:41:17 +07:00
|
|
|
/**
|
|
|
|
* br_multicast_has_querier_anywhere - Checks for a querier on a bridge
|
|
|
|
* @dev: The bridge port providing the bridge on which to check for a querier
|
|
|
|
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
|
|
|
|
*
|
|
|
|
* Checks whether the given interface has a bridge on top and if so returns
|
|
|
|
* true if a valid querier exists anywhere on the bridged link layer.
|
|
|
|
* Otherwise returns false.
|
|
|
|
*/
|
|
|
|
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
|
|
|
|
{
|
|
|
|
struct net_bridge *br;
|
|
|
|
struct net_bridge_port *port;
|
|
|
|
struct ethhdr eth;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2019-03-29 20:38:19 +07:00
|
|
|
if (!netif_is_bridge_port(dev))
|
2014-07-07 10:41:17 +07:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
port = br_port_get_rcu(dev);
|
|
|
|
if (!port || !port->br)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
br = port->br;
|
|
|
|
|
|
|
|
memset(ð, 0, sizeof(eth));
|
|
|
|
eth.h_proto = htons(proto);
|
|
|
|
|
|
|
|
ret = br_multicast_querier_exists(br, ð);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
|
|
|
|
|
2014-06-07 23:26:29 +07:00
|
|
|
/**
|
|
|
|
* br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
|
|
|
|
* @dev: The bridge port adjacent to which to check for a querier
|
|
|
|
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
|
|
|
|
*
|
|
|
|
* Checks whether the given interface has a bridge on top and if so returns
|
|
|
|
* true if a selected querier is behind one of the other ports of this
|
|
|
|
* bridge. Otherwise returns false.
|
|
|
|
*/
|
|
|
|
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
|
|
|
|
{
|
|
|
|
struct net_bridge *br;
|
|
|
|
struct net_bridge_port *port;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2019-03-29 20:38:19 +07:00
|
|
|
if (!netif_is_bridge_port(dev))
|
2014-06-07 23:26:29 +07:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
port = br_port_get_rcu(dev);
|
|
|
|
if (!port || !port->br)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
br = port->br;
|
|
|
|
|
|
|
|
switch (proto) {
|
|
|
|
case ETH_P_IP:
|
|
|
|
if (!timer_pending(&br->ip4_other_query.timer) ||
|
|
|
|
rcu_dereference(br->ip4_querier.port) == port)
|
|
|
|
goto unlock;
|
|
|
|
break;
|
2014-06-12 06:41:24 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2014-06-07 23:26:29 +07:00
|
|
|
case ETH_P_IPV6:
|
|
|
|
if (!timer_pending(&br->ip6_other_query.timer) ||
|
|
|
|
rcu_dereference(br->ip6_querier.port) == port)
|
|
|
|
goto unlock;
|
|
|
|
break;
|
2014-06-12 06:41:24 +07:00
|
|
|
#endif
|
2014-06-07 23:26:29 +07:00
|
|
|
default:
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = true;
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
|
2016-06-28 21:57:06 +07:00
|
|
|
|
|
|
|
static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
|
2016-07-07 02:12:21 +07:00
|
|
|
const struct sk_buff *skb, u8 type, u8 dir)
|
2016-06-28 21:57:06 +07:00
|
|
|
{
|
|
|
|
struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
|
2016-07-07 02:12:21 +07:00
|
|
|
__be16 proto = skb->protocol;
|
|
|
|
unsigned int t_len;
|
2016-06-28 21:57:06 +07:00
|
|
|
|
|
|
|
u64_stats_update_begin(&pstats->syncp);
|
|
|
|
switch (proto) {
|
|
|
|
case htons(ETH_P_IP):
|
2016-07-07 02:12:21 +07:00
|
|
|
t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
|
2016-06-28 21:57:06 +07:00
|
|
|
switch (type) {
|
|
|
|
case IGMP_HOST_MEMBERSHIP_REPORT:
|
|
|
|
pstats->mstats.igmp_v1reports[dir]++;
|
|
|
|
break;
|
|
|
|
case IGMPV2_HOST_MEMBERSHIP_REPORT:
|
|
|
|
pstats->mstats.igmp_v2reports[dir]++;
|
|
|
|
break;
|
|
|
|
case IGMPV3_HOST_MEMBERSHIP_REPORT:
|
|
|
|
pstats->mstats.igmp_v3reports[dir]++;
|
|
|
|
break;
|
|
|
|
case IGMP_HOST_MEMBERSHIP_QUERY:
|
2016-07-07 02:12:21 +07:00
|
|
|
if (t_len != sizeof(struct igmphdr)) {
|
|
|
|
pstats->mstats.igmp_v3queries[dir]++;
|
|
|
|
} else {
|
|
|
|
unsigned int offset = skb_transport_offset(skb);
|
|
|
|
struct igmphdr *ih, _ihdr;
|
|
|
|
|
|
|
|
ih = skb_header_pointer(skb, offset,
|
|
|
|
sizeof(_ihdr), &_ihdr);
|
|
|
|
if (!ih)
|
|
|
|
break;
|
|
|
|
if (!ih->code)
|
|
|
|
pstats->mstats.igmp_v1queries[dir]++;
|
|
|
|
else
|
|
|
|
pstats->mstats.igmp_v2queries[dir]++;
|
|
|
|
}
|
2016-06-28 21:57:06 +07:00
|
|
|
break;
|
|
|
|
case IGMP_HOST_LEAVE_MESSAGE:
|
|
|
|
pstats->mstats.igmp_leaves[dir]++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
case htons(ETH_P_IPV6):
|
2016-07-07 02:12:21 +07:00
|
|
|
t_len = ntohs(ipv6_hdr(skb)->payload_len) +
|
|
|
|
sizeof(struct ipv6hdr);
|
|
|
|
t_len -= skb_network_header_len(skb);
|
2016-06-28 21:57:06 +07:00
|
|
|
switch (type) {
|
|
|
|
case ICMPV6_MGM_REPORT:
|
|
|
|
pstats->mstats.mld_v1reports[dir]++;
|
|
|
|
break;
|
|
|
|
case ICMPV6_MLD2_REPORT:
|
|
|
|
pstats->mstats.mld_v2reports[dir]++;
|
|
|
|
break;
|
|
|
|
case ICMPV6_MGM_QUERY:
|
2016-07-07 02:12:21 +07:00
|
|
|
if (t_len != sizeof(struct mld_msg))
|
|
|
|
pstats->mstats.mld_v2queries[dir]++;
|
|
|
|
else
|
|
|
|
pstats->mstats.mld_v1queries[dir]++;
|
2016-06-28 21:57:06 +07:00
|
|
|
break;
|
|
|
|
case ICMPV6_MGM_REDUCTION:
|
|
|
|
pstats->mstats.mld_leaves[dir]++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_IPV6 */
|
|
|
|
}
|
|
|
|
u64_stats_update_end(&pstats->syncp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
|
2016-07-07 02:12:21 +07:00
|
|
|
const struct sk_buff *skb, u8 type, u8 dir)
|
2016-06-28 21:57:06 +07:00
|
|
|
{
|
|
|
|
struct bridge_mcast_stats __percpu *stats;
|
|
|
|
|
|
|
|
/* if multicast_disabled is true then igmp type can't be set */
|
2018-09-26 21:01:04 +07:00
|
|
|
if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
|
2016-06-28 21:57:06 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (p)
|
|
|
|
stats = p->mcast_stats;
|
|
|
|
else
|
|
|
|
stats = br->mcast_stats;
|
|
|
|
if (WARN_ON(!stats))
|
|
|
|
return;
|
|
|
|
|
2016-07-07 02:12:21 +07:00
|
|
|
br_mcast_stats_add(stats, skb, type, dir);
|
2016-06-28 21:57:06 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int br_multicast_init_stats(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
|
|
|
|
if (!br->mcast_stats)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-10 18:59:27 +07:00
|
|
|
void br_multicast_uninit_stats(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
free_percpu(br->mcast_stats);
|
|
|
|
}
|
|
|
|
|
2020-05-27 20:51:13 +07:00
|
|
|
/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
|
|
|
|
static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
|
2016-06-28 21:57:06 +07:00
|
|
|
{
|
|
|
|
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
|
|
|
|
dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_get_stats(const struct net_bridge *br,
|
|
|
|
const struct net_bridge_port *p,
|
|
|
|
struct br_mcast_stats *dest)
|
|
|
|
{
|
|
|
|
struct bridge_mcast_stats __percpu *stats;
|
|
|
|
struct br_mcast_stats tdst;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(dest, 0, sizeof(*dest));
|
|
|
|
if (p)
|
|
|
|
stats = p->mcast_stats;
|
|
|
|
else
|
|
|
|
stats = br->mcast_stats;
|
|
|
|
if (WARN_ON(!stats))
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(&tdst, 0, sizeof(tdst));
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
|
|
|
|
struct br_mcast_stats temp;
|
|
|
|
unsigned int start;
|
|
|
|
|
|
|
|
do {
|
|
|
|
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
|
|
|
memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
|
|
|
|
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
|
|
|
|
2016-07-07 02:12:21 +07:00
|
|
|
mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
|
2016-06-28 21:57:06 +07:00
|
|
|
mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
|
|
|
|
tdst.igmp_parse_errors += temp.igmp_parse_errors;
|
|
|
|
|
2016-07-07 02:12:21 +07:00
|
|
|
mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
|
|
|
|
mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
|
2016-06-28 21:57:06 +07:00
|
|
|
mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
|
|
|
|
mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
|
|
|
|
mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
|
|
|
|
tdst.mld_parse_errors += temp.mld_parse_errors;
|
|
|
|
}
|
|
|
|
memcpy(dest, &tdst, sizeof(*dest));
|
|
|
|
}
|
2018-12-05 20:14:24 +07:00
|
|
|
|
|
|
|
int br_mdb_hash_init(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_mdb_hash_fini(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
rhashtable_destroy(&br->mdb_hash_tbl);
|
|
|
|
}
|