2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Linux INET6 implementation
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Pedro Roque <roque@di.fc.ul.pt>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _IP6_FIB_H
|
|
|
|
#define _IP6_FIB_H
|
|
|
|
|
|
|
|
#include <linux/ipv6_route.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <linux/spinlock.h>
|
2017-08-03 18:28:16 +07:00
|
|
|
#include <linux/notifier.h>
|
2006-08-22 14:01:08 +07:00
|
|
|
#include <net/dst.h>
|
|
|
|
#include <net/flow.h>
|
|
|
|
#include <net/netlink.h>
|
2010-12-01 03:27:11 +07:00
|
|
|
#include <net/inetpeer.h>
|
2017-08-03 18:28:16 +07:00
|
|
|
#include <net/fib_notifier.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-07-31 08:52:15 +07:00
|
|
|
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
|
|
|
#define FIB6_TABLE_HASHSZ 256
|
|
|
|
#else
|
|
|
|
#define FIB6_TABLE_HASHSZ 1
|
|
|
|
#endif
|
|
|
|
|
2017-10-07 02:06:01 +07:00
|
|
|
#define RT6_DEBUG 2
|
|
|
|
|
|
|
|
#if RT6_DEBUG >= 3
|
|
|
|
#define RT6_TRACE(x...) pr_debug(x)
|
|
|
|
#else
|
|
|
|
#define RT6_TRACE(x...) do { ; } while (0)
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
struct rt6_info;
|
|
|
|
|
2009-11-03 10:26:03 +07:00
|
|
|
struct fib6_config {
|
2006-08-22 14:01:08 +07:00
|
|
|
u32 fc_table;
|
|
|
|
u32 fc_metric;
|
|
|
|
int fc_dst_len;
|
|
|
|
int fc_src_len;
|
|
|
|
int fc_ifindex;
|
|
|
|
u32 fc_flags;
|
|
|
|
u32 fc_protocol;
|
2017-02-03 03:37:08 +07:00
|
|
|
u16 fc_type; /* only 8 bits are used */
|
|
|
|
u16 fc_delete_all_nh : 1,
|
|
|
|
__unused : 15;
|
2006-08-22 14:01:08 +07:00
|
|
|
|
|
|
|
struct in6_addr fc_dst;
|
|
|
|
struct in6_addr fc_src;
|
2011-04-14 04:10:57 +07:00
|
|
|
struct in6_addr fc_prefsrc;
|
2006-08-22 14:01:08 +07:00
|
|
|
struct in6_addr fc_gateway;
|
|
|
|
|
|
|
|
unsigned long fc_expires;
|
|
|
|
struct nlattr *fc_mx;
|
|
|
|
int fc_mx_len;
|
2012-10-22 10:42:09 +07:00
|
|
|
int fc_mp_len;
|
|
|
|
struct nlattr *fc_mp;
|
2006-08-22 14:01:08 +07:00
|
|
|
|
|
|
|
struct nl_info fc_nlinfo;
|
2015-07-21 15:43:48 +07:00
|
|
|
struct nlattr *fc_encap;
|
|
|
|
u16 fc_encap_type;
|
2006-08-22 14:01:08 +07:00
|
|
|
};
|
|
|
|
|
2009-11-03 10:26:03 +07:00
|
|
|
struct fib6_node {
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
struct fib6_node __rcu *parent;
|
|
|
|
struct fib6_node __rcu *left;
|
|
|
|
struct fib6_node __rcu *right;
|
2006-12-14 07:38:29 +07:00
|
|
|
#ifdef CONFIG_IPV6_SUBTREES
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
struct fib6_node __rcu *subtree;
|
2006-12-14 07:38:29 +07:00
|
|
|
#endif
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
struct rt6_info __rcu *leaf;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
__u16 fn_bit; /* bit key */
|
|
|
|
__u16 fn_flags;
|
2014-10-07 00:58:35 +07:00
|
|
|
int fn_sernum;
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
struct rt6_info __rcu *rr_ptr;
|
2017-08-21 23:47:10 +07:00
|
|
|
struct rcu_head rcu;
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2017-10-07 02:06:01 +07:00
|
|
|
struct fib6_gc_args {
|
|
|
|
int timeout;
|
|
|
|
int more;
|
|
|
|
};
|
|
|
|
|
2006-08-24 07:22:24 +07:00
|
|
|
#ifndef CONFIG_IPV6_SUBTREES
|
|
|
|
#define FIB6_SUBTREE(fn) NULL
|
|
|
|
#else
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
#define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1))
|
2006-08-24 07:22:24 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-01-06 05:57:44 +07:00
|
|
|
struct mx6_config {
|
|
|
|
const u32 *mx;
|
|
|
|
DECLARE_BITMAP(mx_valid, RTAX_MAX);
|
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* routing information
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2009-11-03 10:26:03 +07:00
|
|
|
struct rt6key {
|
2005-04-17 05:20:36 +07:00
|
|
|
struct in6_addr addr;
|
|
|
|
int plen;
|
|
|
|
};
|
|
|
|
|
2006-08-05 13:20:06 +07:00
|
|
|
struct fib6_table;
|
|
|
|
|
2017-10-07 02:05:57 +07:00
|
|
|
struct rt6_exception_bucket {
|
|
|
|
struct hlist_head chain;
|
|
|
|
int depth;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rt6_exception {
|
|
|
|
struct hlist_node hlist;
|
|
|
|
struct rt6_info *rt6i;
|
|
|
|
unsigned long stamp;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define FIB6_EXCEPTION_BUCKET_SIZE_SHIFT 10
|
|
|
|
#define FIB6_EXCEPTION_BUCKET_SIZE (1 << FIB6_EXCEPTION_BUCKET_SIZE_SHIFT)
|
|
|
|
#define FIB6_MAX_DEPTH 5
|
|
|
|
|
2009-11-03 10:26:03 +07:00
|
|
|
struct rt6_info {
|
2010-06-11 13:31:35 +07:00
|
|
|
struct dst_entry dst;
|
2017-11-29 03:40:15 +07:00
|
|
|
struct rt6_info __rcu *rt6_next;
|
2017-11-29 03:40:40 +07:00
|
|
|
struct rt6_info *from;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-04-01 05:24:22 +07:00
|
|
|
/*
|
|
|
|
* Tail elements of dst_entry (__refcnt etc.)
|
|
|
|
* and these elements (rarely used in hot path) are in
|
|
|
|
* the same cache line.
|
|
|
|
*/
|
|
|
|
struct fib6_table *rt6i_table;
|
2017-08-26 05:03:10 +07:00
|
|
|
struct fib6_node __rcu *rt6i_node;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
struct in6_addr rt6i_gateway;
|
|
|
|
|
2012-10-22 10:42:09 +07:00
|
|
|
/* Multipath routes:
|
|
|
|
* siblings is a list of rt6_info that have the the same metric/weight,
|
|
|
|
* destination, but not the same gateway. nsiblings is just a cache
|
|
|
|
* to speed up lookup.
|
|
|
|
*/
|
|
|
|
struct list_head rt6i_siblings;
|
|
|
|
unsigned int rt6i_nsiblings;
|
2018-01-09 21:40:25 +07:00
|
|
|
atomic_t rt6i_nh_upper_bound;
|
2012-10-22 10:42:09 +07:00
|
|
|
|
2010-04-01 05:24:22 +07:00
|
|
|
atomic_t rt6i_ref;
|
2007-09-06 17:31:25 +07:00
|
|
|
|
2017-08-15 14:09:49 +07:00
|
|
|
unsigned int rt6i_nh_flags;
|
|
|
|
|
2010-04-01 05:24:22 +07:00
|
|
|
/* These are in a separate cache line. */
|
|
|
|
struct rt6key rt6i_dst ____cacheline_aligned_in_smp;
|
|
|
|
u32 rt6i_flags;
|
|
|
|
struct rt6key rt6i_src;
|
2011-04-14 04:10:57 +07:00
|
|
|
struct rt6key rt6i_prefsrc;
|
2007-11-14 12:33:32 +07:00
|
|
|
|
2015-05-23 10:56:04 +07:00
|
|
|
struct list_head rt6i_uncached;
|
|
|
|
struct uncached_list *rt6i_uncached_list;
|
|
|
|
|
2010-04-01 05:24:22 +07:00
|
|
|
struct inet6_dev *rt6i_idev;
|
2015-05-23 10:56:06 +07:00
|
|
|
struct rt6_info * __percpu *rt6i_pcpu;
|
2017-10-07 02:05:57 +07:00
|
|
|
struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
|
2007-11-14 12:33:32 +07:00
|
|
|
|
2014-09-28 05:46:06 +07:00
|
|
|
u32 rt6i_metric;
|
ipv6: Stop rt6_info from using inet_peer's metrics
inet_peer is indexed by the dst address alone. However, the fib6 tree
could have multiple routing entries (rt6_info) for the same dst. For
example,
1. A /128 dst via multiple gateways.
2. A RTF_CACHE route cloned from a /128 route.
In the above cases, all of them will share the same metrics and
step on each other.
This patch will steer away from inet_peer's metrics and use
dst_cow_metrics_generic() for everything.
Change Highlights:
1. Remove rt6_cow_metrics() which currently acquires metrics from
inet_peer for DST_HOST route (i.e. /128 route).
2. Add rt6i_pmtu to take care of the pmtu update to avoid creating a
full size metrics just to override the RTAX_MTU.
3. After (2), the RTF_CACHE route can also share the metrics with its
dst.from route, by:
dst_init_metrics(&cache_rt->dst, dst_metrics_ptr(cache_rt->dst.from), true);
4. Stop creating RTF_CACHE route by cloning another RTF_CACHE route. Instead,
directly clone from rt->dst.
[ Currently, cloning from another RTF_CACHE is only possible during
rt6_do_redirect(). Also, the old clone is removed from the tree
immediately after the new clone is added. ]
In case of cloning from an older redirect RTF_CACHE, it should work as
before.
In case of cloning from an older pmtu RTF_CACHE, this patch will forget
the pmtu and re-learn it (if there is any) from the redirected route.
The _rt6i_peer and DST_METRICS_FORCE_OVERWRITE will be removed
in the next cleanup patch.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Reviewed-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-04-29 03:03:06 +07:00
|
|
|
u32 rt6i_pmtu;
|
2010-04-01 05:24:22 +07:00
|
|
|
/* more non-fragment space at head required */
|
2018-01-09 21:40:28 +07:00
|
|
|
int rt6i_nh_weight;
|
2010-04-01 05:24:22 +07:00
|
|
|
unsigned short rt6i_nfheader_len;
|
|
|
|
u8 rt6i_protocol;
|
2017-10-07 02:05:57 +07:00
|
|
|
u8 exception_bucket_flushed:1,
|
2018-01-07 17:45:11 +07:00
|
|
|
should_flush:1,
|
|
|
|
unused:6;
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
#define for_each_fib6_node_rt_rcu(fn) \
|
|
|
|
for (rt = rcu_dereference((fn)->leaf); rt; \
|
2017-11-29 03:40:15 +07:00
|
|
|
rt = rcu_dereference(rt->rt6_next))
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
|
|
|
|
#define for_each_fib6_walker_rt(w) \
|
|
|
|
for (rt = (w)->leaf; rt; \
|
2017-11-29 03:40:15 +07:00
|
|
|
rt = rcu_dereference_protected(rt->rt6_next, 1))
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
|
2006-10-13 14:17:25 +07:00
|
|
|
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
|
|
|
|
{
|
|
|
|
return ((struct rt6_info *)dst)->rt6i_idev;
|
|
|
|
}
|
|
|
|
|
2012-04-06 07:13:10 +07:00
|
|
|
static inline void rt6_clean_expires(struct rt6_info *rt)
|
|
|
|
{
|
|
|
|
rt->rt6i_flags &= ~RTF_EXPIRES;
|
2013-10-24 15:14:27 +07:00
|
|
|
rt->dst.expires = 0;
|
2012-04-06 07:13:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
|
|
|
|
{
|
|
|
|
rt->dst.expires = expires;
|
ipv6: fix race condition regarding dst->expires and dst->from.
Eric Dumazet wrote:
| Some strange crashes happen in rt6_check_expired(), with access
| to random addresses.
|
| At first glance, it looks like the RTF_EXPIRES and
| stuff added in commit 1716a96101c49186b
| (ipv6: fix problem with expired dst cache)
| are racy : same dst could be manipulated at the same time
| on different cpus.
|
| At some point, our stack believes rt->dst.from contains a dst pointer,
| while its really a jiffie value (as rt->dst.expires shares the same area
| of memory)
|
| rt6_update_expires() should be fixed, or am I missing something ?
|
| CC Neil because of https://bugzilla.redhat.com/show_bug.cgi?id=892060
Because we do not have any locks for dst_entry, we cannot change
essential structure in the entry; e.g., we cannot change reference
to other entity.
To fix this issue, split 'from' and 'expires' field in dst_entry
out of union. Once it is 'from' is assigned in the constructor,
keep the reference until the very last stage of the life time of
the object.
Of course, it is unsafe to change 'from', so make rt6_set_from simple
just for fresh entries.
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Reported-by: Neil Horman <nhorman@tuxdriver.com>
CC: Gao Feng <gaofeng@cn.fujitsu.com>
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reported-by: Steinar H. Gunderson <sesse@google.com>
Reviewed-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-20 07:29:08 +07:00
|
|
|
rt->rt6i_flags |= RTF_EXPIRES;
|
2012-04-06 07:13:10 +07:00
|
|
|
}
|
|
|
|
|
ipv6: fix race condition regarding dst->expires and dst->from.
Eric Dumazet wrote:
| Some strange crashes happen in rt6_check_expired(), with access
| to random addresses.
|
| At first glance, it looks like the RTF_EXPIRES and
| stuff added in commit 1716a96101c49186b
| (ipv6: fix problem with expired dst cache)
| are racy : same dst could be manipulated at the same time
| on different cpus.
|
| At some point, our stack believes rt->dst.from contains a dst pointer,
| while its really a jiffie value (as rt->dst.expires shares the same area
| of memory)
|
| rt6_update_expires() should be fixed, or am I missing something ?
|
| CC Neil because of https://bugzilla.redhat.com/show_bug.cgi?id=892060
Because we do not have any locks for dst_entry, we cannot change
essential structure in the entry; e.g., we cannot change reference
to other entity.
To fix this issue, split 'from' and 'expires' field in dst_entry
out of union. Once it is 'from' is assigned in the constructor,
keep the reference until the very last stage of the life time of
the object.
Of course, it is unsafe to change 'from', so make rt6_set_from simple
just for fresh entries.
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Reported-by: Neil Horman <nhorman@tuxdriver.com>
CC: Gao Feng <gaofeng@cn.fujitsu.com>
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reported-by: Steinar H. Gunderson <sesse@google.com>
Reviewed-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-20 07:29:08 +07:00
|
|
|
static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
|
2012-04-06 07:13:10 +07:00
|
|
|
{
|
ipv6: fix race condition regarding dst->expires and dst->from.
Eric Dumazet wrote:
| Some strange crashes happen in rt6_check_expired(), with access
| to random addresses.
|
| At first glance, it looks like the RTF_EXPIRES and
| stuff added in commit 1716a96101c49186b
| (ipv6: fix problem with expired dst cache)
| are racy : same dst could be manipulated at the same time
| on different cpus.
|
| At some point, our stack believes rt->dst.from contains a dst pointer,
| while its really a jiffie value (as rt->dst.expires shares the same area
| of memory)
|
| rt6_update_expires() should be fixed, or am I missing something ?
|
| CC Neil because of https://bugzilla.redhat.com/show_bug.cgi?id=892060
Because we do not have any locks for dst_entry, we cannot change
essential structure in the entry; e.g., we cannot change reference
to other entity.
To fix this issue, split 'from' and 'expires' field in dst_entry
out of union. Once it is 'from' is assigned in the constructor,
keep the reference until the very last stage of the life time of
the object.
Of course, it is unsafe to change 'from', so make rt6_set_from simple
just for fresh entries.
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Reported-by: Neil Horman <nhorman@tuxdriver.com>
CC: Gao Feng <gaofeng@cn.fujitsu.com>
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reported-by: Steinar H. Gunderson <sesse@google.com>
Reviewed-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-20 07:29:08 +07:00
|
|
|
struct rt6_info *rt;
|
|
|
|
|
2017-11-29 03:40:40 +07:00
|
|
|
for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from);
|
ipv6: fix race condition regarding dst->expires and dst->from.
Eric Dumazet wrote:
| Some strange crashes happen in rt6_check_expired(), with access
| to random addresses.
|
| At first glance, it looks like the RTF_EXPIRES and
| stuff added in commit 1716a96101c49186b
| (ipv6: fix problem with expired dst cache)
| are racy : same dst could be manipulated at the same time
| on different cpus.
|
| At some point, our stack believes rt->dst.from contains a dst pointer,
| while its really a jiffie value (as rt->dst.expires shares the same area
| of memory)
|
| rt6_update_expires() should be fixed, or am I missing something ?
|
| CC Neil because of https://bugzilla.redhat.com/show_bug.cgi?id=892060
Because we do not have any locks for dst_entry, we cannot change
essential structure in the entry; e.g., we cannot change reference
to other entity.
To fix this issue, split 'from' and 'expires' field in dst_entry
out of union. Once it is 'from' is assigned in the constructor,
keep the reference until the very last stage of the life time of
the object.
Of course, it is unsafe to change 'from', so make rt6_set_from simple
just for fresh entries.
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Reported-by: Neil Horman <nhorman@tuxdriver.com>
CC: Gao Feng <gaofeng@cn.fujitsu.com>
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reported-by: Steinar H. Gunderson <sesse@google.com>
Reviewed-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-20 07:29:08 +07:00
|
|
|
if (rt && rt != rt0)
|
|
|
|
rt0->dst.expires = rt->dst.expires;
|
|
|
|
dst_set_expires(&rt0->dst, timeout);
|
|
|
|
rt0->rt6i_flags |= RTF_EXPIRES;
|
2012-04-06 07:13:10 +07:00
|
|
|
}
|
2015-05-23 10:56:01 +07:00
|
|
|
|
2017-08-21 23:47:10 +07:00
|
|
|
/* Function to safely get fn->sernum for passed in rt
|
|
|
|
* and store result in passed in cookie.
|
|
|
|
* Return true if we can get cookie safely
|
|
|
|
* Return false if not
|
|
|
|
*/
|
|
|
|
static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
|
|
|
|
u32 *cookie)
|
|
|
|
{
|
|
|
|
struct fib6_node *fn;
|
|
|
|
bool status = false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
fn = rcu_dereference(rt->rt6i_node);
|
|
|
|
|
|
|
|
if (fn) {
|
|
|
|
*cookie = fn->fn_sernum;
|
2017-10-07 02:06:07 +07:00
|
|
|
/* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
|
|
|
|
smp_rmb();
|
2017-08-21 23:47:10 +07:00
|
|
|
status = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2015-05-23 10:56:01 +07:00
|
|
|
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
|
|
|
|
{
|
2017-08-21 23:47:10 +07:00
|
|
|
u32 cookie = 0;
|
|
|
|
|
2015-11-12 02:51:08 +07:00
|
|
|
if (rt->rt6i_flags & RTF_PCPU ||
|
2017-11-29 03:40:40 +07:00
|
|
|
(unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
|
|
|
|
rt = rt->from;
|
2015-05-23 10:56:03 +07:00
|
|
|
|
2017-08-21 23:47:10 +07:00
|
|
|
rt6_get_cookie_safe(rt, &cookie);
|
|
|
|
|
|
|
|
return cookie;
|
2015-05-23 10:56:01 +07:00
|
|
|
}
|
2012-04-06 07:13:10 +07:00
|
|
|
|
2012-10-29 07:13:19 +07:00
|
|
|
static inline void ip6_rt_put(struct rt6_info *rt)
|
|
|
|
{
|
|
|
|
/* dst_release() accepts a NULL parameter.
|
|
|
|
* We rely on dst being first structure in struct rt6_info
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(offsetof(struct rt6_info, dst) != 0);
|
|
|
|
dst_release(&rt->dst);
|
|
|
|
}
|
|
|
|
|
2017-08-03 18:28:25 +07:00
|
|
|
void rt6_free_pcpu(struct rt6_info *non_pcpu_rt);
|
|
|
|
|
|
|
|
static inline void rt6_hold(struct rt6_info *rt)
|
|
|
|
{
|
|
|
|
atomic_inc(&rt->rt6i_ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rt6_release(struct rt6_info *rt)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&rt->rt6i_ref)) {
|
|
|
|
rt6_free_pcpu(rt);
|
|
|
|
dst_dev_put(&rt->dst);
|
|
|
|
dst_release(&rt->dst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-07 00:58:34 +07:00
|
|
|
enum fib6_walk_state {
|
|
|
|
#ifdef CONFIG_IPV6_SUBTREES
|
|
|
|
FWS_S,
|
|
|
|
#endif
|
|
|
|
FWS_L,
|
|
|
|
FWS_R,
|
|
|
|
FWS_C,
|
|
|
|
FWS_U
|
|
|
|
};
|
|
|
|
|
|
|
|
struct fib6_walker {
|
2010-02-18 15:13:30 +07:00
|
|
|
struct list_head lh;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct fib6_node *root, *node;
|
|
|
|
struct rt6_info *leaf;
|
2014-10-07 00:58:34 +07:00
|
|
|
enum fib6_walk_state state;
|
2010-02-08 12:19:03 +07:00
|
|
|
unsigned int skip;
|
|
|
|
unsigned int count;
|
2014-10-07 00:58:34 +07:00
|
|
|
int (*func)(struct fib6_walker *);
|
2005-04-17 05:20:36 +07:00
|
|
|
void *args;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rt6_statistics {
|
2017-10-07 02:06:11 +07:00
|
|
|
__u32 fib_nodes; /* all fib6 nodes */
|
|
|
|
__u32 fib_route_nodes; /* intermediate nodes */
|
|
|
|
__u32 fib_rt_entries; /* rt entries in fib table */
|
|
|
|
__u32 fib_rt_cache; /* cached rt entries in exception table */
|
|
|
|
__u32 fib_discarded_routes; /* total number of routes delete */
|
|
|
|
|
|
|
|
/* The following stats are not protected by any lock */
|
|
|
|
atomic_t fib_rt_alloc; /* total number of routes alloced */
|
|
|
|
atomic_t fib_rt_uncache; /* rt entries in uncached list */
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
#define RTN_TL_ROOT 0x0001
|
|
|
|
#define RTN_ROOT 0x0002 /* tree root node */
|
|
|
|
#define RTN_RTINFO 0x0004 /* node with valid routing info */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* priority levels (or metrics)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2006-08-05 13:20:06 +07:00
|
|
|
struct fib6_table {
|
|
|
|
struct hlist_node tb6_hlist;
|
|
|
|
u32 tb6_id;
|
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-07 02:06:10 +07:00
|
|
|
spinlock_t tb6_lock;
|
2006-08-05 13:20:06 +07:00
|
|
|
struct fib6_node tb6_root;
|
2012-06-11 14:01:52 +07:00
|
|
|
struct inet_peer_base tb6_peers;
|
2016-10-25 00:52:35 +07:00
|
|
|
unsigned int flags;
|
2017-08-03 18:28:19 +07:00
|
|
|
unsigned int fib_seq;
|
2016-10-25 00:52:35 +07:00
|
|
|
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
|
2006-08-05 13:20:06 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
|
|
|
|
#define RT6_TABLE_MAIN RT_TABLE_MAIN
|
|
|
|
#define RT6_TABLE_DFLT RT6_TABLE_MAIN
|
|
|
|
#define RT6_TABLE_INFO RT6_TABLE_MAIN
|
|
|
|
#define RT6_TABLE_PREFIX RT6_TABLE_MAIN
|
|
|
|
|
|
|
|
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
|
|
|
#define FIB6_TABLE_MIN 1
|
|
|
|
#define FIB6_TABLE_MAX RT_TABLE_MAX
|
2006-08-04 17:39:02 +07:00
|
|
|
#define RT6_TABLE_LOCAL RT_TABLE_LOCAL
|
2006-08-05 13:20:06 +07:00
|
|
|
#else
|
|
|
|
#define FIB6_TABLE_MIN RT_TABLE_MAIN
|
|
|
|
#define FIB6_TABLE_MAX FIB6_TABLE_MIN
|
2006-08-04 17:39:02 +07:00
|
|
|
#define RT6_TABLE_LOCAL RT6_TABLE_MAIN
|
2006-08-05 13:20:06 +07:00
|
|
|
#endif
|
|
|
|
|
2008-03-05 04:48:30 +07:00
|
|
|
typedef struct rt6_info *(*pol_lookup_t)(struct net *,
|
|
|
|
struct fib6_table *,
|
2018-03-02 23:32:17 +07:00
|
|
|
struct flowi6 *,
|
|
|
|
const struct sk_buff *, int);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2017-08-03 18:28:17 +07:00
|
|
|
struct fib6_entry_notifier_info {
|
|
|
|
struct fib_notifier_info info; /* must be first */
|
|
|
|
struct rt6_info *rt;
|
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* exported functions
|
|
|
|
*/
|
|
|
|
|
2013-09-22 00:22:42 +07:00
|
|
|
struct fib6_table *fib6_get_table(struct net *net, u32 id);
|
|
|
|
struct fib6_table *fib6_new_table(struct net *net, u32 id);
|
|
|
|
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
|
2018-03-02 23:32:17 +07:00
|
|
|
const struct sk_buff *skb,
|
2013-09-22 00:22:42 +07:00
|
|
|
int flags, pol_lookup_t lookup);
|
2006-08-05 13:20:06 +07:00
|
|
|
|
2013-09-22 00:22:42 +07:00
|
|
|
struct fib6_node *fib6_lookup(struct fib6_node *root,
|
|
|
|
const struct in6_addr *daddr,
|
|
|
|
const struct in6_addr *saddr);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-09-22 00:22:42 +07:00
|
|
|
struct fib6_node *fib6_locate(struct fib6_node *root,
|
|
|
|
const struct in6_addr *daddr, int dst_len,
|
2017-10-07 02:06:02 +07:00
|
|
|
const struct in6_addr *saddr, int src_len,
|
|
|
|
bool exact_match);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-09-22 00:22:42 +07:00
|
|
|
void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
|
2013-12-27 15:32:38 +07:00
|
|
|
void *arg);
|
2006-08-05 13:20:06 +07:00
|
|
|
|
2015-01-06 05:57:44 +07:00
|
|
|
int fib6_add(struct fib6_node *root, struct rt6_info *rt,
|
2017-05-21 23:12:04 +07:00
|
|
|
struct nl_info *info, struct mx6_config *mxc,
|
|
|
|
struct netlink_ext_ack *extack);
|
2013-09-22 00:22:42 +07:00
|
|
|
int fib6_del(struct rt6_info *rt, struct nl_info *info);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-09-14 00:18:33 +07:00
|
|
|
void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
|
|
|
|
unsigned int flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-09-22 00:22:42 +07:00
|
|
|
void fib6_run_gc(unsigned long expires, struct net *net, bool force);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-09-22 00:22:42 +07:00
|
|
|
void fib6_gc_cleanup(void);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-09-22 00:22:42 +07:00
|
|
|
int fib6_init(void);
|
2006-08-04 17:39:02 +07:00
|
|
|
|
2013-09-21 21:55:59 +07:00
|
|
|
int ipv6_route_open(struct inode *inode, struct file *file);
|
|
|
|
|
2017-08-03 18:28:16 +07:00
|
|
|
int call_fib6_notifier(struct notifier_block *nb, struct net *net,
|
|
|
|
enum fib_event_type event_type,
|
|
|
|
struct fib_notifier_info *info);
|
|
|
|
int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
|
|
|
|
struct fib_notifier_info *info);
|
|
|
|
|
|
|
|
int __net_init fib6_notifier_init(struct net *net);
|
|
|
|
void __net_exit fib6_notifier_exit(struct net *net);
|
|
|
|
|
2017-08-03 18:28:19 +07:00
|
|
|
unsigned int fib6_tables_seq_read(struct net *net);
|
|
|
|
int fib6_tables_dump(struct net *net, struct notifier_block *nb);
|
|
|
|
|
2017-10-07 02:05:56 +07:00
|
|
|
void fib6_update_sernum(struct rt6_info *rt);
|
2018-01-07 17:45:13 +07:00
|
|
|
void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
|
2017-10-07 02:05:56 +07:00
|
|
|
|
2007-12-08 15:14:54 +07:00
|
|
|
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
2013-09-22 00:22:42 +07:00
|
|
|
int fib6_rules_init(void);
|
|
|
|
void fib6_rules_cleanup(void);
|
2017-08-03 18:28:15 +07:00
|
|
|
bool fib6_rule_default(const struct fib_rule *rule);
|
2017-08-03 18:28:18 +07:00
|
|
|
int fib6_rules_dump(struct net *net, struct notifier_block *nb);
|
|
|
|
unsigned int fib6_rules_seq_read(struct net *net);
|
2018-03-01 10:43:22 +07:00
|
|
|
|
|
|
|
static inline bool fib6_rules_early_flow_dissect(struct net *net,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct flowi6 *fl6,
|
|
|
|
struct flow_keys *flkeys)
|
|
|
|
{
|
|
|
|
unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
|
|
|
|
|
|
|
|
if (!net->ipv6.fib6_rules_require_fldissect)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
skb_flow_dissect_flow_keys(skb, flkeys, flag);
|
|
|
|
fl6->fl6_sport = flkeys->ports.src;
|
|
|
|
fl6->fl6_dport = flkeys->ports.dst;
|
|
|
|
fl6->flowi6_proto = flkeys->basic.ip_proto;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2007-12-08 15:14:54 +07:00
|
|
|
#else
|
|
|
|
static inline int fib6_rules_init(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline void fib6_rules_cleanup(void)
|
|
|
|
{
|
|
|
|
return ;
|
|
|
|
}
|
2017-08-03 18:28:15 +07:00
|
|
|
static inline bool fib6_rule_default(const struct fib_rule *rule)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
2017-08-03 18:28:18 +07:00
|
|
|
static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline unsigned int fib6_rules_seq_read(struct net *net)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2018-03-01 10:43:22 +07:00
|
|
|
static inline bool fib6_rules_early_flow_dissect(struct net *net,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct flowi6 *fl6,
|
|
|
|
struct flow_keys *flkeys)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2007-12-08 15:14:54 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|