ipmr: cleanups

Various code style cleanups

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2010-10-01 16:15:29 +00:00 committed by David S. Miller
parent a8c9486b81
commit a8cb16dd9c

View File

@ -98,7 +98,7 @@ struct ipmr_result {
}; };
/* Big lock, protecting vif table, mrt cache and mroute socket state. /* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock. * Note that the changes are semaphored via rtnl_lock.
*/ */
static DEFINE_RWLOCK(mrt_lock); static DEFINE_RWLOCK(mrt_lock);
@ -113,11 +113,11 @@ static DEFINE_RWLOCK(mrt_lock);
static DEFINE_SPINLOCK(mfc_unres_lock); static DEFINE_SPINLOCK(mfc_unres_lock);
/* We return to original Alan's scheme. Hash table of resolved /* We return to original Alan's scheme. Hash table of resolved
entries is changed only in process context and protected * entries is changed only in process context and protected
with weak lock mrt_lock. Queue of unresolved entries is protected * with weak lock mrt_lock. Queue of unresolved entries is protected
with strong spinlock mfc_unres_lock. * with strong spinlock mfc_unres_lock.
*
In this case data path is free of exclusive locks at all. * In this case data path is free of exclusive locks at all.
*/ */
static struct kmem_cache *mrt_cachep __read_mostly; static struct kmem_cache *mrt_cachep __read_mostly;
@ -396,9 +396,9 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
set_fs(oldfs); set_fs(oldfs);
} else } else {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
}
dev = NULL; dev = NULL;
if (err == 0 && if (err == 0 &&
@ -495,7 +495,8 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
dev->iflink = 0; dev->iflink = 0;
rcu_read_lock(); rcu_read_lock();
if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { in_dev = __in_dev_get_rcu(dev);
if (!in_dev) {
rcu_read_unlock(); rcu_read_unlock();
goto failure; goto failure;
} }
@ -554,6 +555,7 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
if (vifi + 1 == mrt->maxvif) { if (vifi + 1 == mrt->maxvif) {
int tmp; int tmp;
for (tmp = vifi - 1; tmp >= 0; tmp--) { for (tmp = vifi - 1; tmp >= 0; tmp--) {
if (VIF_EXISTS(mrt, tmp)) if (VIF_EXISTS(mrt, tmp))
break; break;
@ -565,7 +567,8 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
dev_set_allmulti(dev, -1); dev_set_allmulti(dev, -1);
if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { in_dev = __in_dev_get_rtnl(dev);
if (in_dev) {
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
ip_rt_multicast_event(in_dev); ip_rt_multicast_event(in_dev);
} }
@ -590,7 +593,7 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
} }
/* Destroy an unresolved cache entry, killing queued skbs /* Destroy an unresolved cache entry, killing queued skbs
and reporting error to netlink readers. * and reporting error to netlink readers.
*/ */
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
@ -612,9 +615,10 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
memset(&e->msg, 0, sizeof(e->msg)); memset(&e->msg, 0, sizeof(e->msg));
rtnl_unicast(skb, net, NETLINK_CB(skb).pid); rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
} else } else {
kfree_skb(skb); kfree_skb(skb);
} }
}
ipmr_cache_free(c); ipmr_cache_free(c);
} }
@ -735,9 +739,9 @@ static int vif_add(struct net *net, struct mr_table *mrt,
dev_put(dev); dev_put(dev);
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
} }
} else } else {
dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
}
if (!dev) if (!dev)
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
err = dev_set_allmulti(dev, 1); err = dev_set_allmulti(dev, 1);
@ -750,16 +754,16 @@ static int vif_add(struct net *net, struct mr_table *mrt,
return -EINVAL; return -EINVAL;
} }
if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) { in_dev = __in_dev_get_rtnl(dev);
if (!in_dev) {
dev_put(dev); dev_put(dev);
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
} }
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
ip_rt_multicast_event(in_dev); ip_rt_multicast_event(in_dev);
/* /* Fill in the VIF structures */
* Fill in the VIF structures
*/
v->rate_limit = vifc->vifc_rate_limit; v->rate_limit = vifc->vifc_rate_limit;
v->local = vifc->vifc_lcl_addr.s_addr; v->local = vifc->vifc_lcl_addr.s_addr;
v->remote = vifc->vifc_rmt_addr.s_addr; v->remote = vifc->vifc_rmt_addr.s_addr;
@ -836,17 +840,15 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
struct sk_buff *skb; struct sk_buff *skb;
struct nlmsgerr *e; struct nlmsgerr *e;
/* /* Play the pending entries through our router */
* Play the pending entries through our router
*/
while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) { if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
nlh->nlmsg_len = (skb_tail_pointer(skb) - nlh->nlmsg_len = skb_tail_pointer(skb) -
(u8 *)nlh); (u8 *)nlh;
} else { } else {
nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
@ -857,10 +859,11 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
} }
rtnl_unicast(skb, net, NETLINK_CB(skb).pid); rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
} else } else {
ip_mr_forward(net, mrt, skb, c, 0); ip_mr_forward(net, mrt, skb, c, 0);
} }
} }
}
/* /*
* Bounce a cache query up to mrouted. We could use netlink for this but mrouted * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
@ -892,9 +895,9 @@ static int ipmr_cache_report(struct mr_table *mrt,
#ifdef CONFIG_IP_PIMSM #ifdef CONFIG_IP_PIMSM
if (assert == IGMPMSG_WHOLEPKT) { if (assert == IGMPMSG_WHOLEPKT) {
/* Ugly, but we have no choice with this interface. /* Ugly, but we have no choice with this interface.
Duplicate old header, fix ihl, length etc. * Duplicate old header, fix ihl, length etc.
And all this only to mangle msg->im_msgtype and * And all this only to mangle msg->im_msgtype and
to set msg->im_mbz to "mbz" :-) * to set msg->im_mbz to "mbz" :-)
*/ */
skb_push(skb, sizeof(struct iphdr)); skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb); skb_reset_network_header(skb);
@ -911,9 +914,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
#endif #endif
{ {
/* /* Copy the IP header */
* Copy the IP header
*/
skb->network_header = skb->tail; skb->network_header = skb->tail;
skb_put(skb, ihl); skb_put(skb, ihl);
@ -923,9 +924,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
msg->im_vif = vifi; msg->im_vif = vifi;
skb_dst_set(skb, dst_clone(skb_dst(pkt))); skb_dst_set(skb, dst_clone(skb_dst(pkt)));
/* /* Add our header */
* Add our header
*/
igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
igmp->type = igmp->type =
@ -943,9 +942,8 @@ static int ipmr_cache_report(struct mr_table *mrt,
return -EINVAL; return -EINVAL;
} }
/* /* Deliver to mrouted */
* Deliver to mrouted
*/
ret = sock_queue_rcv_skb(mroute_sk, skb); ret = sock_queue_rcv_skb(mroute_sk, skb);
rcu_read_unlock(); rcu_read_unlock();
if (ret < 0) { if (ret < 0) {
@ -979,9 +977,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
} }
if (!found) { if (!found) {
/* /* Create a new entry if allowable */
* Create a new entry if allowable
*/
if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
(c = ipmr_cache_alloc_unres()) == NULL) { (c = ipmr_cache_alloc_unres()) == NULL) {
@ -991,16 +987,14 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
return -ENOBUFS; return -ENOBUFS;
} }
/* /* Fill in the new cache entry */
* Fill in the new cache entry
*/
c->mfc_parent = -1; c->mfc_parent = -1;
c->mfc_origin = iph->saddr; c->mfc_origin = iph->saddr;
c->mfc_mcastgrp = iph->daddr; c->mfc_mcastgrp = iph->daddr;
/* /* Reflect first query at mrouted. */
* Reflect first query at mrouted.
*/
err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
if (err < 0) { if (err < 0) {
/* If the report failed throw the cache entry /* If the report failed throw the cache entry
@ -1020,9 +1014,8 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
} }
/* /* See if we can append the packet */
* See if we can append the packet
*/
if (c->mfc_un.unres.unresolved.qlen > 3) { if (c->mfc_un.unres.unresolved.qlen > 3) {
kfree_skb(skb); kfree_skb(skb);
err = -ENOBUFS; err = -ENOBUFS;
@ -1140,18 +1133,16 @@ static void mroute_clean_tables(struct mr_table *mrt)
LIST_HEAD(list); LIST_HEAD(list);
struct mfc_cache *c, *next; struct mfc_cache *c, *next;
/* /* Shut down all active vif entries */
* Shut down all active vif entries
*/
for (i = 0; i < mrt->maxvif; i++) { for (i = 0; i < mrt->maxvif; i++) {
if (!(mrt->vif_table[i].flags & VIFF_STATIC)) if (!(mrt->vif_table[i].flags & VIFF_STATIC))
vif_delete(mrt, i, 0, &list); vif_delete(mrt, i, 0, &list);
} }
unregister_netdevice_many(&list); unregister_netdevice_many(&list);
/* /* Wipe the cache */
* Wipe the cache
*/
for (i = 0; i < MFC_LINES; i++) { for (i = 0; i < MFC_LINES; i++) {
list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
if (c->mfc_flags & MFC_STATIC) if (c->mfc_flags & MFC_STATIC)
@ -1544,21 +1535,33 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
#endif #endif
if (vif->flags & VIFF_TUNNEL) { if (vif->flags & VIFF_TUNNEL) {
struct flowi fl = { .oif = vif->link, struct flowi fl = {
.nl_u = { .ip4_u = .oif = vif->link,
{ .daddr = vif->remote, .nl_u = {
.ip4_u = {
.daddr = vif->remote,
.saddr = vif->local, .saddr = vif->local,
.tos = RT_TOS(iph->tos) } }, .tos = RT_TOS(iph->tos)
.proto = IPPROTO_IPIP }; }
},
.proto = IPPROTO_IPIP
};
if (ip_route_output_key(net, &rt, &fl)) if (ip_route_output_key(net, &rt, &fl))
goto out_free; goto out_free;
encap = sizeof(struct iphdr); encap = sizeof(struct iphdr);
} else { } else {
struct flowi fl = { .oif = vif->link, struct flowi fl = {
.nl_u = { .ip4_u = .oif = vif->link,
{ .daddr = iph->daddr, .nl_u = {
.tos = RT_TOS(iph->tos) } }, .ip4_u = {
.proto = IPPROTO_IPIP }; .daddr = iph->daddr,
.tos = RT_TOS(iph->tos)
}
},
.proto = IPPROTO_IPIP
};
if (ip_route_output_key(net, &rt, &fl)) if (ip_route_output_key(net, &rt, &fl))
goto out_free; goto out_free;
} }
@ -1567,8 +1570,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not /* Do not fragment multicasts. Alas, IPv4 does not
allow to send ICMP, so that packets will disappear * allow to send ICMP, so that packets will disappear
to blackhole. * to blackhole.
*/ */
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@ -1591,7 +1594,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
ip_decrease_ttl(ip_hdr(skb)); ip_decrease_ttl(ip_hdr(skb));
/* FIXME: forward and output firewalls used to be called here. /* FIXME: forward and output firewalls used to be called here.
* What do we do with netfilter? -- RR */ * What do we do with netfilter? -- RR
*/
if (vif->flags & VIFF_TUNNEL) { if (vif->flags & VIFF_TUNNEL) {
ip_encap(skb, vif->local, vif->remote); ip_encap(skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */ /* FIXME: extra output firewall step used to be here. --RR */
@ -1652,15 +1656,15 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (skb_rtable(skb)->fl.iif == 0) { if (skb_rtable(skb)->fl.iif == 0) {
/* It is our own packet, looped back. /* It is our own packet, looped back.
Very complicated situation... * Very complicated situation...
*
The best workaround until routing daemons will be * The best workaround until routing daemons will be
fixed is not to redistribute packet, if it was * fixed is not to redistribute packet, if it was
send through wrong interface. It means, that * send through wrong interface. It means, that
multicast applications WILL NOT work for * multicast applications WILL NOT work for
(S,G), which have default multicast route pointing * (S,G), which have default multicast route pointing
to wrong oif. In any case, it is not a good * to wrong oif. In any case, it is not a good
idea to use multicasting applications on router. * idea to use multicasting applications on router.
*/ */
goto dont_forward; goto dont_forward;
} }
@ -1670,9 +1674,9 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (true_vifi >= 0 && mrt->mroute_do_assert && if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT, /* pimsm uses asserts, when switching from RPT to SPT,
so that we cannot check that packet arrived on an oif. * so that we cannot check that packet arrived on an oif.
It is bad, but otherwise we would need to move pretty * It is bad, but otherwise we would need to move pretty
large chunk of pimd to kernel. Ough... --ANK * large chunk of pimd to kernel. Ough... --ANK
*/ */
(mrt->mroute_do_pim || (mrt->mroute_do_pim ||
cache->mfc_un.res.ttls[true_vifi] < 255) && cache->mfc_un.res.ttls[true_vifi] < 255) &&
@ -1690,10 +1694,12 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
/* /*
* Forward the frame * Forward the frame
*/ */
for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) { for (ct = cache->mfc_un.res.maxvif - 1;
ct >= cache->mfc_un.res.minvif; ct--) {
if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
if (psend != -1) { if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) if (skb2)
ipmr_queue_xmit(net, mrt, skb2, cache, ipmr_queue_xmit(net, mrt, skb2, cache,
psend); psend);
@ -1704,6 +1710,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (psend != -1) { if (psend != -1) {
if (local) { if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) if (skb2)
ipmr_queue_xmit(net, mrt, skb2, cache, psend); ipmr_queue_xmit(net, mrt, skb2, cache, psend);
} else { } else {
@ -1733,7 +1740,7 @@ int ip_mr_input(struct sk_buff *skb)
int err; int err;
/* Packet is looped back after forward, it should not be /* Packet is looped back after forward, it should not be
forwarded second time, but still can be delivered locally. * forwarded second time, but still can be delivered locally.
*/ */
if (IPCB(skb)->flags & IPSKB_FORWARDED) if (IPCB(skb)->flags & IPSKB_FORWARDED)
goto dont_forward; goto dont_forward;
@ -1822,10 +1829,10 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/* /*
Check that: * Check that:
a. packet is really destinted to a multicast group * a. packet is really sent to a multicast group
b. packet is not a NULL-REGISTER * b. packet is not a NULL-REGISTER
c. packet is not truncated * c. packet is not truncated
*/ */
if (!ipv4_is_multicast(encap->daddr) || if (!ipv4_is_multicast(encap->daddr) ||
encap->tot_len == 0 || encap->tot_len == 0 ||
@ -1971,7 +1978,7 @@ int ipmr_get_route(struct net *net,
struct sk_buff *skb2; struct sk_buff *skb2;
struct iphdr *iph; struct iphdr *iph;
struct net_device *dev; struct net_device *dev;
int vif; int vif = -1;
if (nowait) { if (nowait) {
rcu_read_unlock(); rcu_read_unlock();
@ -1980,7 +1987,9 @@ int ipmr_get_route(struct net *net,
dev = skb->dev; dev = skb->dev;
read_lock(&mrt_lock); read_lock(&mrt_lock);
if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) { if (dev)
vif = ipmr_find_vif(mrt, dev);
if (vif < 0) {
read_unlock(&mrt_lock); read_unlock(&mrt_lock);
rcu_read_unlock(); rcu_read_unlock();
return -ENODEV; return -ENODEV;
@ -2098,7 +2107,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* /*
* The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif * The /proc interfaces to multicast routing :
* /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
*/ */
struct ipmr_vif_iter { struct ipmr_vif_iter {
struct seq_net_private p; struct seq_net_private p;