2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Linux NET3: Internet Group Management Protocol [IGMP]
|
|
|
|
*
|
|
|
|
* Authors:
|
2008-10-14 09:01:08 +07:00
|
|
|
* Alan Cox <alan@lxorguk.ukuu.org.uk>
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* Extended to talk the BSD extended IGMP protocol of mrouted 3.6
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_IGMP_H
|
|
|
|
#define _LINUX_IGMP_H
|
|
|
|
|
|
|
|
#include <linux/skbuff.h>
|
2006-12-04 11:15:30 +07:00
|
|
|
#include <linux/timer.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/in.h>
|
2012-10-13 16:46:48 +07:00
|
|
|
#include <uapi/linux/igmp.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-03-24 12:05:44 +07:00
|
|
|
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct igmphdr *)skb_transport_header(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct igmpv3_report *
|
|
|
|
igmpv3_report_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct igmpv3_report *)skb_transport_header(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct igmpv3_query *
|
|
|
|
igmpv3_query_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct igmpv3_query *)skb_transport_header(skb);
|
|
|
|
}
|
|
|
|
|
2005-08-16 12:18:02 +07:00
|
|
|
extern int sysctl_igmp_max_memberships;
|
|
|
|
extern int sysctl_igmp_max_msf;
|
2014-09-02 20:49:26 +07:00
|
|
|
extern int sysctl_igmp_qrv;
|
2005-08-16 12:18:02 +07:00
|
|
|
|
2009-11-05 00:50:58 +07:00
|
|
|
struct ip_sf_socklist {
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned int sl_max;
|
|
|
|
unsigned int sl_count;
|
2010-02-02 22:32:29 +07:00
|
|
|
struct rcu_head rcu;
|
2006-09-28 08:30:52 +07:00
|
|
|
__be32 sl_addr[0];
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
|
2006-09-28 08:31:32 +07:00
|
|
|
(count) * sizeof(__be32))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define IP_SFBLOCK 10 /* allocate this many at once */
|
|
|
|
|
|
|
|
/* ip_mc_socklist is real list now. Speed is not argument;
|
|
|
|
this list never used in fast path code
|
|
|
|
*/
|
|
|
|
|
2009-11-05 00:50:58 +07:00
|
|
|
struct ip_mc_socklist {
|
2010-11-12 12:46:50 +07:00
|
|
|
struct ip_mc_socklist __rcu *next_rcu;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct ip_mreqn multi;
|
|
|
|
unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
|
2010-11-12 12:46:50 +07:00
|
|
|
struct ip_sf_socklist __rcu *sflist;
|
2010-02-02 22:32:29 +07:00
|
|
|
struct rcu_head rcu;
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2009-11-05 00:50:58 +07:00
|
|
|
struct ip_sf_list {
|
2005-04-17 05:20:36 +07:00
|
|
|
struct ip_sf_list *sf_next;
|
2006-09-28 08:30:52 +07:00
|
|
|
__be32 sf_inaddr;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long sf_count[2]; /* include/exclude counts */
|
|
|
|
unsigned char sf_gsresp; /* include in g & s response? */
|
|
|
|
unsigned char sf_oldin; /* change state */
|
|
|
|
unsigned char sf_crcount; /* retrans. left to send */
|
|
|
|
};
|
|
|
|
|
2009-11-05 00:50:58 +07:00
|
|
|
struct ip_mc_list {
|
2005-04-17 05:20:36 +07:00
|
|
|
struct in_device *interface;
|
2006-06-06 11:04:39 +07:00
|
|
|
__be32 multiaddr;
|
2010-11-12 12:46:50 +07:00
|
|
|
unsigned int sfmode;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct ip_sf_list *sources;
|
|
|
|
struct ip_sf_list *tomb;
|
|
|
|
unsigned long sfcount[2];
|
2010-11-12 12:46:50 +07:00
|
|
|
union {
|
|
|
|
struct ip_mc_list *next;
|
|
|
|
struct ip_mc_list __rcu *next_rcu;
|
|
|
|
};
|
2013-06-07 22:48:57 +07:00
|
|
|
struct ip_mc_list __rcu *next_hash;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct timer_list timer;
|
|
|
|
int users;
|
|
|
|
atomic_t refcnt;
|
|
|
|
spinlock_t lock;
|
|
|
|
char tm_running;
|
|
|
|
char reporter;
|
|
|
|
char unsolicit_count;
|
|
|
|
char loaded;
|
|
|
|
unsigned char gsquery; /* check source marks? */
|
|
|
|
unsigned char crcount;
|
2010-11-12 12:46:50 +07:00
|
|
|
struct rcu_head rcu;
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* V3 exponential field decoding */
|
|
|
|
#define IGMPV3_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
|
|
|
|
#define IGMPV3_EXP(thresh, nbmant, nbexp, value) \
|
|
|
|
((value) < (thresh) ? (value) : \
|
2006-11-20 01:38:39 +07:00
|
|
|
((IGMPV3_MASK(value, nbmant) | (1<<(nbmant))) << \
|
2005-04-17 05:20:36 +07:00
|
|
|
(IGMPV3_MASK((value) >> (nbmant), nbexp) + (nbexp))))
|
|
|
|
|
|
|
|
#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
|
|
|
|
#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
|
|
|
|
|
2011-03-11 07:34:38 +07:00
|
|
|
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto);
|
2005-04-17 05:20:36 +07:00
|
|
|
extern int igmp_rcv(struct sk_buff *);
|
|
|
|
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
|
|
|
|
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
|
|
|
|
extern void ip_mc_drop_socket(struct sock *sk);
|
|
|
|
extern int ip_mc_source(int add, int omode, struct sock *sk,
|
|
|
|
struct ip_mreq_source *mreqs, int ifindex);
|
|
|
|
extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
|
|
|
|
extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
|
|
|
|
struct ip_msfilter __user *optval, int __user *optlen);
|
|
|
|
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
|
|
|
|
struct group_filter __user *optval, int __user *optlen);
|
2006-09-28 08:31:10 +07:00
|
|
|
extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif);
|
2005-04-17 05:20:36 +07:00
|
|
|
extern void ip_mc_init_dev(struct in_device *);
|
|
|
|
extern void ip_mc_destroy_dev(struct in_device *);
|
|
|
|
extern void ip_mc_up(struct in_device *);
|
|
|
|
extern void ip_mc_down(struct in_device *);
|
2009-09-15 16:37:40 +07:00
|
|
|
extern void ip_mc_unmap(struct in_device *);
|
|
|
|
extern void ip_mc_remap(struct in_device *);
|
2006-09-28 08:30:07 +07:00
|
|
|
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
|
|
|
|
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
|
2015-05-02 19:01:07 +07:00
|
|
|
int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
|
bonding: Improve IGMP join processing
In active-backup mode, the current bonding code duplicates IGMP
traffic to all slaves, so that switches are up to date in case of a
failover from an active to a backup interface. If bonding then fails
back to the original active interface, it is likely that the "active
slave" switch's IGMP forwarding for the port will be out of date until
some event occurs to refresh the switch (e.g., a membership query).
This patch alters the behavior of bonding to no longer flood
IGMP to all ports, and to issue IGMP JOINs to the newly active port at
the time of a failover. This insures that switches are kept up to date
for all cases.
"GOELLESCH Niels" <niels.goellesch@eurocontrol.int> originally
reported this problem, and included a patch. His original patch was
modified by Jay Vosburgh to additionally remove the existing IGMP flood
behavior, use RCU, streamline code paths, fix trailing white space, and
adjust for style.
Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-03-01 08:03:37 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|