2019-05-19 19:08:55 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-05-17 02:58:40 +07:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2007-09-12 16:50:50 +07:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/delay.h>
|
2007-09-27 12:04:26 +07:00
|
|
|
#include <linux/sched.h>
|
2008-04-15 14:35:23 +07:00
|
|
|
#include <linux/idr.h>
|
2009-07-10 16:51:33 +07:00
|
|
|
#include <linux/rculist.h>
|
2009-07-10 16:51:35 +07:00
|
|
|
#include <linux/nsproxy.h>
|
2013-04-12 07:50:06 +07:00
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/proc_ns.h>
|
2011-05-05 07:51:50 +07:00
|
|
|
#include <linux/file.h>
|
2011-07-15 22:47:34 +07:00
|
|
|
#include <linux/export.h>
|
2012-06-14 16:31:10 +07:00
|
|
|
#include <linux/user_namespace.h>
|
2015-01-15 21:11:15 +07:00
|
|
|
#include <linux/net_namespace.h>
|
2017-02-06 16:57:33 +07:00
|
|
|
#include <linux/sched/task.h>
|
2018-07-21 04:56:53 +07:00
|
|
|
#include <linux/uidgid.h>
|
2017-02-06 16:57:33 +07:00
|
|
|
|
2015-01-15 21:11:15 +07:00
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/netlink.h>
|
2007-09-12 16:50:50 +07:00
|
|
|
#include <net/net_namespace.h>
|
2008-04-15 14:36:08 +07:00
|
|
|
#include <net/netns/generic.h>
|
2007-09-12 16:50:50 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Our network namespace constructor/destructor lists
|
|
|
|
*/
|
|
|
|
|
|
|
|
static LIST_HEAD(pernet_list);
|
|
|
|
static struct list_head *first_device = &pernet_list;
|
|
|
|
|
|
|
|
LIST_HEAD(net_namespace_list);
|
2008-10-08 16:35:06 +07:00
|
|
|
EXPORT_SYMBOL_GPL(net_namespace_list);
|
2007-09-12 16:50:50 +07:00
|
|
|
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 23:20:32 +07:00
|
|
|
/* Protects net_namespace_list. Nests iside rtnl_lock() */
|
|
|
|
DECLARE_RWSEM(net_rwsem);
|
|
|
|
EXPORT_SYMBOL_GPL(net_rwsem);
|
|
|
|
|
2019-06-27 03:02:33 +07:00
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
|
|
|
|
#endif
|
|
|
|
|
2012-07-18 16:06:07 +07:00
|
|
|
struct net init_net = {
|
2018-01-12 22:28:31 +07:00
|
|
|
.count = REFCOUNT_INIT(1),
|
2017-04-28 04:40:23 +07:00
|
|
|
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
|
2019-06-27 03:02:33 +07:00
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
.key_domain = &init_net_key_domain,
|
|
|
|
#endif
|
2012-07-18 16:06:07 +07:00
|
|
|
};
|
2008-01-23 13:05:33 +07:00
|
|
|
EXPORT_SYMBOL(init_net);
|
2007-09-12 16:50:50 +07:00
|
|
|
|
2016-08-11 04:36:00 +07:00
|
|
|
static bool init_net_initialized;
|
net: Introduce net_sem for protection of pernet_list
Currently, the mutex is mostly used to protect pernet operations
list. It orders setup_net() and cleanup_net() with parallel
{un,}register_pernet_operations() calls, so ->exit{,batch} methods
of the same pernet operations are executed for a dying net, as
were used to call ->init methods, even after the net namespace
is unlinked from net_namespace_list in cleanup_net().
But there are several problems with scalability. The first one
is that more than one net can't be created or destroyed
at the same moment on the node. For big machines with many cpus
running many containers it's very sensitive.
The second one is that it's need to synchronize_rcu() after net
is removed from net_namespace_list():
Destroy net_ns:
cleanup_net()
mutex_lock(&net_mutex)
list_del_rcu(&net->list)
synchronize_rcu() <--- Sleep there for ages
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list)
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list)
mutex_unlock(&net_mutex)
This primitive is not fast, especially on the systems with many processors
and/or when preemptible RCU is enabled in config. So, all the time, while
cleanup_net() is waiting for RCU grace period, creation of new net namespaces
is not possible, the tasks, who makes it, are sleeping on the same mutex:
Create net_ns:
copy_net_ns()
mutex_lock_killable(&net_mutex) <--- Sleep there for ages
I observed 20-30 seconds hangs of "unshare -n" on ordinary 8-cpu laptop
with preemptible RCU enabled after CRIU tests round is finished.
The solution is to convert net_mutex to the rw_semaphore and add fine grain
locks to really small number of pernet_operations, what really need them.
Then, pernet_operations::init/::exit methods, modifying the net-related data,
will require down_read() locking only, while down_write() will be used
for changing pernet_list (i.e., when modules are being loaded and unloaded).
This gives signify performance increase, after all patch set is applied,
like you may see here:
%for i in {1..10000}; do unshare -n bash -c exit; done
*before*
real 1m40,377s
user 0m9,672s
sys 0m19,928s
*after*
real 0m17,007s
user 0m5,311s
sys 0m11,779
(5.8 times faster)
This patch starts replacing net_mutex to net_sem. It adds rw_semaphore,
describes the variables it protects, and makes to use, where appropriate.
net_mutex is still present, and next patches will kick it out step-by-step.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: Andrei Vagin <avagin@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-13 16:26:23 +07:00
|
|
|
/*
|
2018-03-27 22:02:23 +07:00
|
|
|
* pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
|
net: Introduce net_sem for protection of pernet_list
Currently, the mutex is mostly used to protect pernet operations
list. It orders setup_net() and cleanup_net() with parallel
{un,}register_pernet_operations() calls, so ->exit{,batch} methods
of the same pernet operations are executed for a dying net, as
were used to call ->init methods, even after the net namespace
is unlinked from net_namespace_list in cleanup_net().
But there are several problems with scalability. The first one
is that more than one net can't be created or destroyed
at the same moment on the node. For big machines with many cpus
running many containers it's very sensitive.
The second one is that it's need to synchronize_rcu() after net
is removed from net_namespace_list():
Destroy net_ns:
cleanup_net()
mutex_lock(&net_mutex)
list_del_rcu(&net->list)
synchronize_rcu() <--- Sleep there for ages
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list)
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list)
mutex_unlock(&net_mutex)
This primitive is not fast, especially on the systems with many processors
and/or when preemptible RCU is enabled in config. So, all the time, while
cleanup_net() is waiting for RCU grace period, creation of new net namespaces
is not possible, the tasks, who makes it, are sleeping on the same mutex:
Create net_ns:
copy_net_ns()
mutex_lock_killable(&net_mutex) <--- Sleep there for ages
I observed 20-30 seconds hangs of "unshare -n" on ordinary 8-cpu laptop
with preemptible RCU enabled after CRIU tests round is finished.
The solution is to convert net_mutex to the rw_semaphore and add fine grain
locks to really small number of pernet_operations, what really need them.
Then, pernet_operations::init/::exit methods, modifying the net-related data,
will require down_read() locking only, while down_write() will be used
for changing pernet_list (i.e., when modules are being loaded and unloaded).
This gives signify performance increase, after all patch set is applied,
like you may see here:
%for i in {1..10000}; do unshare -n bash -c exit; done
*before*
real 1m40,377s
user 0m9,672s
sys 0m19,928s
*after*
real 0m17,007s
user 0m5,311s
sys 0m11,779
(5.8 times faster)
This patch starts replacing net_mutex to net_sem. It adds rw_semaphore,
describes the variables it protects, and makes to use, where appropriate.
net_mutex is still present, and next patches will kick it out step-by-step.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: Andrei Vagin <avagin@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-13 16:26:23 +07:00
|
|
|
* init_net_initialized and first_device pointer.
|
2018-03-27 22:02:32 +07:00
|
|
|
* This is internal net namespace object. Please, don't use it
|
|
|
|
* outside.
|
net: Introduce net_sem for protection of pernet_list
Currently, the mutex is mostly used to protect pernet operations
list. It orders setup_net() and cleanup_net() with parallel
{un,}register_pernet_operations() calls, so ->exit{,batch} methods
of the same pernet operations are executed for a dying net, as
were used to call ->init methods, even after the net namespace
is unlinked from net_namespace_list in cleanup_net().
But there are several problems with scalability. The first one
is that more than one net can't be created or destroyed
at the same moment on the node. For big machines with many cpus
running many containers it's very sensitive.
The second one is that it's need to synchronize_rcu() after net
is removed from net_namespace_list():
Destroy net_ns:
cleanup_net()
mutex_lock(&net_mutex)
list_del_rcu(&net->list)
synchronize_rcu() <--- Sleep there for ages
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list)
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list)
mutex_unlock(&net_mutex)
This primitive is not fast, especially on the systems with many processors
and/or when preemptible RCU is enabled in config. So, all the time, while
cleanup_net() is waiting for RCU grace period, creation of new net namespaces
is not possible, the tasks, who makes it, are sleeping on the same mutex:
Create net_ns:
copy_net_ns()
mutex_lock_killable(&net_mutex) <--- Sleep there for ages
I observed 20-30 seconds hangs of "unshare -n" on ordinary 8-cpu laptop
with preemptible RCU enabled after CRIU tests round is finished.
The solution is to convert net_mutex to the rw_semaphore and add fine grain
locks to really small number of pernet_operations, what really need them.
Then, pernet_operations::init/::exit methods, modifying the net-related data,
will require down_read() locking only, while down_write() will be used
for changing pernet_list (i.e., when modules are being loaded and unloaded).
This gives signify performance increase, after all patch set is applied,
like you may see here:
%for i in {1..10000}; do unshare -n bash -c exit; done
*before*
real 1m40,377s
user 0m9,672s
sys 0m19,928s
*after*
real 0m17,007s
user 0m5,311s
sys 0m11,779
(5.8 times faster)
This patch starts replacing net_mutex to net_sem. It adds rw_semaphore,
describes the variables it protects, and makes to use, where appropriate.
net_mutex is still present, and next patches will kick it out step-by-step.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: Andrei Vagin <avagin@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-13 16:26:23 +07:00
|
|
|
*/
|
2018-03-27 22:02:23 +07:00
|
|
|
DECLARE_RWSEM(pernet_ops_rwsem);
|
2018-03-30 23:38:37 +07:00
|
|
|
EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
|
2016-08-11 04:36:00 +07:00
|
|
|
|
2016-12-02 08:21:32 +07:00
|
|
|
#define MIN_PERNET_OPS_ID \
|
|
|
|
((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
|
|
|
|
|
2008-04-15 14:36:08 +07:00
|
|
|
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
|
|
|
|
|
2012-01-26 07:41:38 +07:00
|
|
|
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
|
|
|
|
|
bpf: Add netns cookie and enable it for bpf cgroup hooks
In Cilium we're mainly using BPF cgroup hooks today in order to implement
kube-proxy free Kubernetes service translation for ClusterIP, NodePort (*),
ExternalIP, and LoadBalancer as well as HostPort mapping [0] for all traffic
between Cilium managed nodes. While this works in its current shape and avoids
packet-level NAT for inter Cilium managed node traffic, there is one major
limitation we're facing today, that is, lack of netns awareness.
In Kubernetes, the concept of Pods (which hold one or multiple containers)
has been built around network namespaces, so while we can use the global scope
of attaching to root BPF cgroup hooks also to our advantage (e.g. for exposing
NodePort ports on loopback addresses), we also have the need to differentiate
between initial network namespaces and non-initial one. For example, ExternalIP
services mandate that non-local service IPs are not to be translated from the
host (initial) network namespace as one example. Right now, we have an ugly
work-around in place where non-local service IPs for ExternalIP services are
not xlated from connect() and friends BPF hooks but instead via less efficient
packet-level NAT on the veth tc ingress hook for Pod traffic.
On top of determining whether we're in initial or non-initial network namespace
we also have a need for a socket-cookie like mechanism for network namespaces
scope. Socket cookies have the nice property that they can be combined as part
of the key structure e.g. for BPF LRU maps without having to worry that the
cookie could be recycled. We are planning to use this for our sessionAffinity
implementation for services. Therefore, add a new bpf_get_netns_cookie() helper
which would resolve both use cases at once: bpf_get_netns_cookie(NULL) would
provide the cookie for the initial network namespace while passing the context
instead of NULL would provide the cookie from the application's network namespace.
We're using a hole, so no size increase; the assignment happens only once.
Therefore this allows for a comparison on initial namespace as well as regular
cookie usage as we have today with socket cookies. We could later on enable
this helper for other program types as well as we would see need.
(*) Both externalTrafficPolicy={Local|Cluster} types
[0] https://github.com/cilium/cilium/blob/master/bpf/bpf_sock.c
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/c47d2346982693a9cf9da0e12690453aded4c788.1585323121.git.daniel@iogearbox.net
2020-03-27 22:58:52 +07:00
|
|
|
static atomic64_t cookie_gen;
|
|
|
|
|
|
|
|
u64 net_gen_cookie(struct net *net)
|
|
|
|
{
|
|
|
|
while (1) {
|
|
|
|
u64 res = atomic64_read(&net->net_cookie);
|
|
|
|
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
res = atomic64_inc_return(&cookie_gen);
|
|
|
|
atomic64_cmpxchg(&net->net_cookie, 0, res);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-26 07:41:38 +07:00
|
|
|
static struct net_generic *net_alloc_generic(void)
|
|
|
|
{
|
|
|
|
struct net_generic *ng;
|
2016-12-02 08:21:32 +07:00
|
|
|
unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
|
2012-01-26 07:41:38 +07:00
|
|
|
|
|
|
|
ng = kzalloc(generic_size, GFP_KERNEL);
|
|
|
|
if (ng)
|
2016-12-02 08:12:58 +07:00
|
|
|
ng->s.len = max_gen_ptrs;
|
2012-01-26 07:41:38 +07:00
|
|
|
|
|
|
|
return ng;
|
|
|
|
}
|
|
|
|
|
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 08:58:21 +07:00
|
|
|
static int net_assign_generic(struct net *net, unsigned int id, void *data)
|
2010-04-23 08:40:47 +07:00
|
|
|
{
|
|
|
|
struct net_generic *ng, *old_ng;
|
|
|
|
|
2016-12-02 08:21:32 +07:00
|
|
|
BUG_ON(id < MIN_PERNET_OPS_ID);
|
2010-04-23 08:40:47 +07:00
|
|
|
|
2010-10-25 10:20:11 +07:00
|
|
|
old_ng = rcu_dereference_protected(net->gen,
|
2018-03-27 22:02:23 +07:00
|
|
|
lockdep_is_held(&pernet_ops_rwsem));
|
2016-12-02 08:21:32 +07:00
|
|
|
if (old_ng->s.len > id) {
|
|
|
|
old_ng->ptr[id] = data;
|
2016-12-02 08:11:34 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2010-04-23 08:40:47 +07:00
|
|
|
|
2012-01-26 07:41:38 +07:00
|
|
|
ng = net_alloc_generic();
|
2010-04-23 08:40:47 +07:00
|
|
|
if (ng == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some synchronisation notes:
|
|
|
|
*
|
|
|
|
* The net_generic explores the net->gen array inside rcu
|
|
|
|
* read section. Besides once set the net->gen->ptr[x]
|
|
|
|
* pointer never changes (see rules in netns/generic.h).
|
|
|
|
*
|
|
|
|
* That said, we simply duplicate this array and schedule
|
|
|
|
* the old copy for kfree after a grace period.
|
|
|
|
*/
|
|
|
|
|
2016-12-02 08:21:32 +07:00
|
|
|
memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
|
|
|
|
(old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
|
|
|
|
ng->ptr[id] = data;
|
2010-04-23 08:40:47 +07:00
|
|
|
|
|
|
|
rcu_assign_pointer(net->gen, ng);
|
2016-12-02 08:12:58 +07:00
|
|
|
kfree_rcu(old_ng, s.rcu);
|
2010-04-23 08:40:47 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-30 05:25:28 +07:00
|
|
|
static int ops_init(const struct pernet_operations *ops, struct net *net)
|
|
|
|
{
|
2012-04-16 11:43:15 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
void *data = NULL;
|
|
|
|
|
2009-11-30 05:25:28 +07:00
|
|
|
if (ops->id && ops->size) {
|
2012-04-16 11:43:15 +07:00
|
|
|
data = kzalloc(ops->size, GFP_KERNEL);
|
2009-11-30 05:25:28 +07:00
|
|
|
if (!data)
|
2012-04-16 11:43:15 +07:00
|
|
|
goto out;
|
2009-11-30 05:25:28 +07:00
|
|
|
|
|
|
|
err = net_assign_generic(net, *ops->id, data);
|
2012-04-16 11:43:15 +07:00
|
|
|
if (err)
|
|
|
|
goto cleanup;
|
2009-11-30 05:25:28 +07:00
|
|
|
}
|
2012-04-16 11:43:15 +07:00
|
|
|
err = 0;
|
2009-11-30 05:25:28 +07:00
|
|
|
if (ops->init)
|
2012-04-16 11:43:15 +07:00
|
|
|
err = ops->init(net);
|
|
|
|
if (!err)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
kfree(data);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
2009-11-30 05:25:28 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ops_free(const struct pernet_operations *ops, struct net *net)
|
|
|
|
{
|
|
|
|
if (ops->id && ops->size) {
|
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 08:58:21 +07:00
|
|
|
kfree(net_generic(net, *ops->id));
|
2009-11-30 05:25:28 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-19 01:08:59 +07:00
|
|
|
static void ops_pre_exit_list(const struct pernet_operations *ops,
|
|
|
|
struct list_head *net_exit_list)
|
|
|
|
{
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
if (ops->pre_exit) {
|
|
|
|
list_for_each_entry(net, net_exit_list, exit_list)
|
|
|
|
ops->pre_exit(net);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-03 09:29:03 +07:00
|
|
|
static void ops_exit_list(const struct pernet_operations *ops,
|
|
|
|
struct list_head *net_exit_list)
|
|
|
|
{
|
|
|
|
struct net *net;
|
|
|
|
if (ops->exit) {
|
|
|
|
list_for_each_entry(net, net_exit_list, exit_list)
|
|
|
|
ops->exit(net);
|
|
|
|
}
|
|
|
|
if (ops->exit_batch)
|
|
|
|
ops->exit_batch(net_exit_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ops_free_list(const struct pernet_operations *ops,
|
|
|
|
struct list_head *net_exit_list)
|
|
|
|
{
|
|
|
|
struct net *net;
|
|
|
|
if (ops->size && ops->id) {
|
|
|
|
list_for_each_entry(net, net_exit_list, exit_list)
|
|
|
|
ops_free(ops, net);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-07 16:02:51 +07:00
|
|
|
/* should be called with nsid_lock held */
|
2015-01-15 21:11:15 +07:00
|
|
|
static int alloc_netid(struct net *net, struct net *peer, int reqid)
|
|
|
|
{
|
2015-05-07 16:02:50 +07:00
|
|
|
int min = 0, max = 0;
|
2015-01-15 21:11:15 +07:00
|
|
|
|
|
|
|
if (reqid >= 0) {
|
|
|
|
min = reqid;
|
|
|
|
max = reqid + 1;
|
|
|
|
}
|
|
|
|
|
2015-05-07 16:02:51 +07:00
|
|
|
return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
|
2015-01-15 21:11:15 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is used by idr_for_each(). If net is equal to peer, the
|
|
|
|
* function returns the id so that idr_for_each() stops. Because we cannot
|
|
|
|
* returns the id 0 (idr_for_each() will not stop), we return the magic value
|
|
|
|
* NET_ID_ZERO (-1) for it.
|
|
|
|
*/
|
|
|
|
#define NET_ID_ZERO -1
|
|
|
|
static int net_eq_idr(int id, void *net, void *peer)
|
|
|
|
{
|
|
|
|
if (net_eq(net, peer))
|
|
|
|
return id ? : NET_ID_ZERO;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-14 04:39:22 +07:00
|
|
|
/* Must be called from RCU-critical section or with nsid_lock held */
|
2020-01-14 04:39:20 +07:00
|
|
|
static int __peernet2id(const struct net *net, struct net *peer)
|
2015-01-15 21:11:15 +07:00
|
|
|
{
|
|
|
|
int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
|
2015-05-07 16:02:50 +07:00
|
|
|
|
2015-01-15 21:11:15 +07:00
|
|
|
/* Magic value for id 0. */
|
|
|
|
if (id == NET_ID_ZERO)
|
|
|
|
return 0;
|
|
|
|
if (id > 0)
|
|
|
|
return id;
|
|
|
|
|
2015-05-07 16:02:47 +07:00
|
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
2015-01-15 21:11:15 +07:00
|
|
|
}
|
|
|
|
|
2019-10-09 16:19:10 +07:00
|
|
|
static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 23:39:04 +07:00
|
|
|
struct nlmsghdr *nlh, gfp_t gfp);
|
2015-01-15 21:11:15 +07:00
|
|
|
/* This function returns the id of a peer netns. If no id is assigned, one will
|
|
|
|
* be allocated and returned.
|
|
|
|
*/
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 23:39:04 +07:00
|
|
|
int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
|
2015-01-15 21:11:15 +07:00
|
|
|
{
|
2015-05-07 16:02:50 +07:00
|
|
|
int id;
|
2015-01-15 21:11:15 +07:00
|
|
|
|
2018-01-12 22:28:31 +07:00
|
|
|
if (refcount_read(&net->count) == 0)
|
2016-11-17 01:27:02 +07:00
|
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
2020-01-14 04:39:20 +07:00
|
|
|
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_lock(&net->nsid_lock);
|
2020-01-14 04:39:20 +07:00
|
|
|
id = __peernet2id(net, peer);
|
|
|
|
if (id >= 0) {
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2020-01-14 04:39:20 +07:00
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* When peer is obtained from RCU lists, we may race with
|
2018-01-16 16:31:41 +07:00
|
|
|
* its cleanup. Check whether it's alive, and this guarantees
|
|
|
|
* we never hash a peer back to net->netns_ids, after it has
|
|
|
|
* just been idr_remove()'d from there in cleanup_net().
|
|
|
|
*/
|
2020-01-14 04:39:20 +07:00
|
|
|
if (!maybe_get_net(peer)) {
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2020-01-14 04:39:20 +07:00
|
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
|
|
|
}
|
|
|
|
|
|
|
|
id = alloc_netid(net, peer, -1);
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2020-01-14 04:39:20 +07:00
|
|
|
|
|
|
|
put_net(peer);
|
|
|
|
if (id < 0)
|
|
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
|
|
|
|
|
|
|
rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
|
|
|
|
|
2015-05-07 16:02:50 +07:00
|
|
|
return id;
|
2015-01-15 21:11:15 +07:00
|
|
|
}
|
2017-11-03 02:04:36 +07:00
|
|
|
EXPORT_SYMBOL_GPL(peernet2id_alloc);
|
2015-01-15 21:11:15 +07:00
|
|
|
|
2015-05-07 16:02:51 +07:00
|
|
|
/* This function returns, if assigned, the id of a peer netns. */
|
2020-01-17 03:16:46 +07:00
|
|
|
int peernet2id(const struct net *net, struct net *peer)
|
2015-05-07 16:02:51 +07:00
|
|
|
{
|
|
|
|
int id;
|
|
|
|
|
2020-01-14 04:39:22 +07:00
|
|
|
rcu_read_lock();
|
2015-05-07 16:02:51 +07:00
|
|
|
id = __peernet2id(net, peer);
|
2020-01-14 04:39:22 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2015-05-07 16:02:51 +07:00
|
|
|
return id;
|
|
|
|
}
|
2016-09-02 11:53:44 +07:00
|
|
|
EXPORT_SYMBOL(peernet2id);
|
2015-05-07 16:02:51 +07:00
|
|
|
|
2015-05-07 16:02:53 +07:00
|
|
|
/* This function returns true is the peer netns has an id assigned into the
|
|
|
|
* current netns.
|
|
|
|
*/
|
2020-01-17 03:16:46 +07:00
|
|
|
bool peernet_has_id(const struct net *net, struct net *peer)
|
2015-05-07 16:02:53 +07:00
|
|
|
{
|
|
|
|
return peernet2id(net, peer) >= 0;
|
|
|
|
}
|
|
|
|
|
2020-01-17 03:16:46 +07:00
|
|
|
struct net *get_net_ns_by_id(const struct net *net, int id)
|
2015-01-15 21:11:15 +07:00
|
|
|
{
|
|
|
|
struct net *peer;
|
|
|
|
|
|
|
|
if (id < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
peer = idr_find(&net->netns_ids, id);
|
|
|
|
if (peer)
|
net: Fix double free and memory corruption in get_net_ns_by_id()
(I can trivially verify that that idr_remove in cleanup_net happens
after the network namespace count has dropped to zero --EWB)
Function get_net_ns_by_id() does not check for net::count
after it has found a peer in netns_ids idr.
It may dereference a peer, after its count has already been
finaly decremented. This leads to double free and memory
corruption:
put_net(peer) rtnl_lock()
atomic_dec_and_test(&peer->count) [count=0] ...
__put_net(peer) get_net_ns_by_id(net, id)
spin_lock(&cleanup_list_lock)
list_add(&net->cleanup_list, &cleanup_list)
spin_unlock(&cleanup_list_lock)
queue_work() peer = idr_find(&net->netns_ids, id)
| get_net(peer) [count=1]
| ...
| (use after final put)
v ...
cleanup_net() ...
spin_lock(&cleanup_list_lock) ...
list_replace_init(&cleanup_list, ..) ...
spin_unlock(&cleanup_list_lock) ...
... ...
... put_net(peer)
... atomic_dec_and_test(&peer->count) [count=0]
... spin_lock(&cleanup_list_lock)
... list_add(&net->cleanup_list, &cleanup_list)
... spin_unlock(&cleanup_list_lock)
... queue_work()
... rtnl_unlock()
rtnl_lock() ...
for_each_net(tmp) { ...
id = __peernet2id(tmp, peer) ...
spin_lock_irq(&tmp->nsid_lock) ...
idr_remove(&tmp->netns_ids, id) ...
... ...
net_drop_ns() ...
net_free(peer) ...
} ...
|
v
cleanup_net()
...
(Second free of peer)
Also, put_net() on the right cpu may reorder with left's cpu
list_replace_init(&cleanup_list, ..), and then cleanup_list
will be corrupted.
Since cleanup_net() is executed in worker thread, while
put_net(peer) can happen everywhere, there should be
enough time for concurrent get_net_ns_by_id() to pick
the peer up, and the race does not seem to be unlikely.
The patch fixes the problem in standard way.
(Also, there is possible problem in peernet2id_alloc(), which requires
check for net::count under nsid_lock and maybe_get_net(peer), but
in current stable kernel it's used under rtnl_lock() and it has to be
safe. Openswitch begun to use peernet2id_alloc(), and possibly it should
be fixed too. While this is not in stable kernel yet, so I'll send
a separate message to netdev@ later).
Cc: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Fixes: 0c7aecd4bde4 "netns: add rtnl cmd to add and get peer netns ids"
Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-12-20 00:27:56 +07:00
|
|
|
peer = maybe_get_net(peer);
|
2015-01-15 21:11:15 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return peer;
|
|
|
|
}
|
|
|
|
|
2007-09-12 16:50:50 +07:00
|
|
|
/*
|
|
|
|
* setup_net runs the initializers for the network namespace object.
|
|
|
|
*/
|
2012-06-14 16:31:10 +07:00
|
|
|
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
|
2007-09-12 16:50:50 +07:00
|
|
|
{
|
2018-03-27 22:02:23 +07:00
|
|
|
/* Must be called with pernet_ops_rwsem held */
|
2009-11-30 05:25:28 +07:00
|
|
|
const struct pernet_operations *ops, *saved_ops;
|
2009-02-22 15:07:53 +07:00
|
|
|
int error = 0;
|
2009-12-03 09:29:03 +07:00
|
|
|
LIST_HEAD(net_exit_list);
|
2007-09-12 16:50:50 +07:00
|
|
|
|
2018-01-12 22:28:31 +07:00
|
|
|
refcount_set(&net->count, 1);
|
2017-06-30 17:08:08 +07:00
|
|
|
refcount_set(&net->passive, 1);
|
2019-03-27 22:21:30 +07:00
|
|
|
get_random_bytes(&net->hash_mix, sizeof(u32));
|
2011-06-21 10:11:20 +07:00
|
|
|
net->dev_base_seq = 1;
|
2012-06-14 16:31:10 +07:00
|
|
|
net->user_ns = user_ns;
|
2015-01-15 21:11:15 +07:00
|
|
|
idr_init(&net->netns_ids);
|
2015-05-16 04:47:32 +07:00
|
|
|
spin_lock_init(&net->nsid_lock);
|
2018-03-22 16:45:40 +07:00
|
|
|
mutex_init(&net->ipv4.ra_mutex);
|
2009-02-22 15:07:53 +07:00
|
|
|
|
2007-09-19 03:20:41 +07:00
|
|
|
list_for_each_entry(ops, &pernet_list, list) {
|
2009-11-30 05:25:28 +07:00
|
|
|
error = ops_init(ops, net);
|
|
|
|
if (error < 0)
|
|
|
|
goto out_undo;
|
2007-09-12 16:50:50 +07:00
|
|
|
}
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 23:20:32 +07:00
|
|
|
down_write(&net_rwsem);
|
2018-02-13 16:26:02 +07:00
|
|
|
list_add_tail_rcu(&net->list, &net_namespace_list);
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 23:20:32 +07:00
|
|
|
up_write(&net_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
out:
|
|
|
|
return error;
|
2007-09-19 03:20:41 +07:00
|
|
|
|
2007-09-12 16:50:50 +07:00
|
|
|
out_undo:
|
|
|
|
/* Walk through the list backwards calling the exit functions
|
|
|
|
* for the pernet modules whose init functions did not fail.
|
|
|
|
*/
|
2009-12-03 09:29:03 +07:00
|
|
|
list_add(&net->exit_list, &net_exit_list);
|
2009-11-30 05:25:28 +07:00
|
|
|
saved_ops = ops;
|
2019-06-19 01:08:59 +07:00
|
|
|
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
|
|
|
|
ops_pre_exit_list(ops, &net_exit_list);
|
|
|
|
|
|
|
|
synchronize_rcu();
|
|
|
|
|
2019-06-20 18:24:40 +07:00
|
|
|
ops = saved_ops;
|
2009-12-03 09:29:03 +07:00
|
|
|
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
|
|
|
|
ops_exit_list(ops, &net_exit_list);
|
|
|
|
|
2009-11-30 05:25:28 +07:00
|
|
|
ops = saved_ops;
|
|
|
|
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
|
2009-12-03 09:29:03 +07:00
|
|
|
ops_free_list(ops, &net_exit_list);
|
2007-10-31 05:38:57 +07:00
|
|
|
|
|
|
|
rcu_barrier();
|
2007-09-12 16:50:50 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-05-24 15:22:22 +07:00
|
|
|
static int __net_init net_defaults_init_net(struct net *net)
|
|
|
|
{
|
|
|
|
net->core.sysctl_somaxconn = SOMAXCONN;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations net_defaults_ops = {
|
|
|
|
.init = net_defaults_init_net,
|
|
|
|
};
|
|
|
|
|
|
|
|
static __init int net_defaults_init(void)
|
|
|
|
{
|
|
|
|
if (register_pernet_subsys(&net_defaults_ops))
|
|
|
|
panic("Cannot initialize net default settings");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
core_initcall(net_defaults_init);
|
2007-11-01 14:44:50 +07:00
|
|
|
|
2009-02-24 06:37:35 +07:00
|
|
|
#ifdef CONFIG_NET_NS
|
2016-09-23 23:06:12 +07:00
|
|
|
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
|
|
|
|
{
|
|
|
|
return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dec_net_namespaces(struct ucounts *ucounts)
|
|
|
|
{
|
|
|
|
dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
|
|
|
|
}
|
|
|
|
|
2018-02-25 01:20:33 +07:00
|
|
|
static struct kmem_cache *net_cachep __ro_after_init;
|
2009-02-24 06:37:35 +07:00
|
|
|
static struct workqueue_struct *netns_wq;
|
|
|
|
|
2009-02-22 15:07:53 +07:00
|
|
|
static struct net *net_alloc(void)
|
2007-11-07 16:30:30 +07:00
|
|
|
{
|
2009-02-22 15:07:53 +07:00
|
|
|
struct net *net = NULL;
|
|
|
|
struct net_generic *ng;
|
|
|
|
|
|
|
|
ng = net_alloc_generic();
|
|
|
|
if (!ng)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
|
2007-11-07 16:30:30 +07:00
|
|
|
if (!net)
|
2009-02-22 15:07:53 +07:00
|
|
|
goto out_free;
|
2007-11-07 16:30:30 +07:00
|
|
|
|
2019-06-27 03:02:33 +07:00
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
|
|
|
|
if (!net->key_domain)
|
|
|
|
goto out_free_2;
|
|
|
|
refcount_set(&net->key_domain->usage, 1);
|
|
|
|
#endif
|
|
|
|
|
2009-02-22 15:07:53 +07:00
|
|
|
rcu_assign_pointer(net->gen, ng);
|
|
|
|
out:
|
|
|
|
return net;
|
|
|
|
|
2019-06-27 03:02:33 +07:00
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
out_free_2:
|
|
|
|
kmem_cache_free(net_cachep, net);
|
|
|
|
net = NULL;
|
|
|
|
#endif
|
2009-02-22 15:07:53 +07:00
|
|
|
out_free:
|
|
|
|
kfree(ng);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void net_free(struct net *net)
|
|
|
|
{
|
2014-09-09 22:24:53 +07:00
|
|
|
kfree(rcu_access_pointer(net->gen));
|
2007-11-07 16:30:30 +07:00
|
|
|
kmem_cache_free(net_cachep, net);
|
|
|
|
}
|
|
|
|
|
2011-06-09 08:13:01 +07:00
|
|
|
void net_drop_ns(void *p)
|
|
|
|
{
|
|
|
|
struct net *ns = p;
|
2017-06-30 17:08:08 +07:00
|
|
|
if (ns && refcount_dec_and_test(&ns->passive))
|
2011-06-09 08:13:01 +07:00
|
|
|
net_free(ns);
|
|
|
|
}
|
|
|
|
|
2012-06-14 16:31:10 +07:00
|
|
|
struct net *copy_net_ns(unsigned long flags,
|
|
|
|
struct user_namespace *user_ns, struct net *old_net)
|
2007-09-27 12:04:26 +07:00
|
|
|
{
|
2016-08-09 02:33:23 +07:00
|
|
|
struct ucounts *ucounts;
|
2009-05-05 01:12:14 +07:00
|
|
|
struct net *net;
|
|
|
|
int rv;
|
2007-09-27 12:04:26 +07:00
|
|
|
|
2011-04-15 09:26:25 +07:00
|
|
|
if (!(flags & CLONE_NEWNET))
|
|
|
|
return get_net(old_net);
|
|
|
|
|
2016-08-09 02:33:23 +07:00
|
|
|
ucounts = inc_net_namespaces(user_ns);
|
|
|
|
if (!ucounts)
|
2016-09-23 01:08:36 +07:00
|
|
|
return ERR_PTR(-ENOSPC);
|
2016-08-09 02:33:23 +07:00
|
|
|
|
2009-05-05 01:12:14 +07:00
|
|
|
net = net_alloc();
|
2016-08-09 02:33:23 +07:00
|
|
|
if (!net) {
|
2018-02-13 16:26:13 +07:00
|
|
|
rv = -ENOMEM;
|
|
|
|
goto dec_ucounts;
|
2016-08-09 02:33:23 +07:00
|
|
|
}
|
2018-02-13 16:26:13 +07:00
|
|
|
refcount_set(&net->passive, 1);
|
|
|
|
net->ucounts = ucounts;
|
2012-06-14 16:31:10 +07:00
|
|
|
get_user_ns(user_ns);
|
2018-03-27 22:02:01 +07:00
|
|
|
|
2018-03-27 22:02:23 +07:00
|
|
|
rv = down_read_killable(&pernet_ops_rwsem);
|
2018-02-13 16:26:13 +07:00
|
|
|
if (rv < 0)
|
|
|
|
goto put_userns;
|
2018-02-19 16:58:38 +07:00
|
|
|
|
2012-06-14 16:31:10 +07:00
|
|
|
rv = setup_net(net, user_ns);
|
2018-02-19 16:58:38 +07:00
|
|
|
|
2018-03-27 22:02:23 +07:00
|
|
|
up_read(&pernet_ops_rwsem);
|
2018-02-19 16:58:38 +07:00
|
|
|
|
2009-05-05 01:12:14 +07:00
|
|
|
if (rv < 0) {
|
2018-02-13 16:26:13 +07:00
|
|
|
put_userns:
|
2019-10-19 13:34:43 +07:00
|
|
|
key_remove_domain(net->key_domain);
|
2012-06-14 16:31:10 +07:00
|
|
|
put_user_ns(user_ns);
|
2011-06-09 08:13:01 +07:00
|
|
|
net_drop_ns(net);
|
2018-02-13 16:26:13 +07:00
|
|
|
dec_ucounts:
|
|
|
|
dec_net_namespaces(ucounts);
|
2009-05-05 01:12:14 +07:00
|
|
|
return ERR_PTR(rv);
|
|
|
|
}
|
|
|
|
return net;
|
|
|
|
}
|
2009-02-22 15:07:53 +07:00
|
|
|
|
2018-07-21 04:56:53 +07:00
|
|
|
/**
|
|
|
|
* net_ns_get_ownership - get sysfs ownership data for @net
|
|
|
|
* @net: network namespace in question (can be NULL)
|
|
|
|
* @uid: kernel user ID for sysfs objects
|
|
|
|
* @gid: kernel group ID for sysfs objects
|
|
|
|
*
|
|
|
|
* Returns the uid/gid pair of root in the user namespace associated with the
|
|
|
|
* given network namespace.
|
|
|
|
*/
|
|
|
|
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
|
|
|
|
{
|
|
|
|
if (net) {
|
|
|
|
kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
|
|
|
|
kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
|
|
|
|
|
|
|
|
if (uid_valid(ns_root_uid))
|
|
|
|
*uid = ns_root_uid;
|
|
|
|
|
|
|
|
if (gid_valid(ns_root_gid))
|
|
|
|
*gid = ns_root_gid;
|
|
|
|
} else {
|
|
|
|
*uid = GLOBAL_ROOT_UID;
|
|
|
|
*gid = GLOBAL_ROOT_GID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(net_ns_get_ownership);
|
|
|
|
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
static void unhash_nsid(struct net *net, struct net *last)
|
|
|
|
{
|
|
|
|
struct net *tmp;
|
|
|
|
/* This function is only called from cleanup_net() work,
|
|
|
|
* and this work is the only process, that may delete
|
|
|
|
* a net from net_namespace_list. So, when the below
|
|
|
|
* is executing, the list may only grow. Thus, we do not
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 23:20:32 +07:00
|
|
|
* use for_each_net_rcu() or net_rwsem.
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
*/
|
|
|
|
for_each_net(tmp) {
|
|
|
|
int id;
|
|
|
|
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_lock(&tmp->nsid_lock);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
id = __peernet2id(tmp, net);
|
|
|
|
if (id >= 0)
|
|
|
|
idr_remove(&tmp->netns_ids, id);
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_unlock(&tmp->nsid_lock);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
if (id >= 0)
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 23:39:04 +07:00
|
|
|
rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
|
|
|
|
GFP_KERNEL);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
if (tmp == last)
|
|
|
|
break;
|
|
|
|
}
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_lock(&net->nsid_lock);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
idr_destroy(&net->netns_ids);
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_unlock(&net->nsid_lock);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
}
|
|
|
|
|
2018-02-19 16:58:45 +07:00
|
|
|
static LLIST_HEAD(cleanup_list);
|
2009-11-30 05:25:27 +07:00
|
|
|
|
2007-11-01 14:44:50 +07:00
|
|
|
static void cleanup_net(struct work_struct *work)
|
|
|
|
{
|
2009-11-30 05:25:28 +07:00
|
|
|
const struct pernet_operations *ops;
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
struct net *net, *tmp, *last;
|
2018-02-19 16:58:45 +07:00
|
|
|
struct llist_node *net_kill_list;
|
2009-12-03 09:29:03 +07:00
|
|
|
LIST_HEAD(net_exit_list);
|
2007-11-01 14:44:50 +07:00
|
|
|
|
2009-11-30 05:25:27 +07:00
|
|
|
/* Atomically snapshot the list of namespaces to cleanup */
|
2018-02-19 16:58:45 +07:00
|
|
|
net_kill_list = llist_del_all(&cleanup_list);
|
2007-11-01 14:44:50 +07:00
|
|
|
|
2018-03-27 22:02:23 +07:00
|
|
|
down_read(&pernet_ops_rwsem);
|
2007-11-01 14:44:50 +07:00
|
|
|
|
|
|
|
/* Don't let anyone else find us. */
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 23:20:32 +07:00
|
|
|
down_write(&net_rwsem);
|
2018-02-19 16:58:45 +07:00
|
|
|
llist_for_each_entry(net, net_kill_list, cleanup_list)
|
2009-11-30 05:25:27 +07:00
|
|
|
list_del_rcu(&net->list);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
/* Cache last net. After we unlock rtnl, no one new net
|
|
|
|
* added to net_namespace_list can assign nsid pointer
|
|
|
|
* to a net from net_kill_list (see peernet2id_alloc()).
|
|
|
|
* So, we skip them in unhash_nsid().
|
|
|
|
*
|
|
|
|
* Note, that unhash_nsid() does not delete nsid links
|
|
|
|
* between net_kill_list's nets, as they've already
|
|
|
|
* deleted from net_namespace_list. But, this would be
|
|
|
|
* useless anyway, as netns_ids are destroyed there.
|
|
|
|
*/
|
|
|
|
last = list_last_entry(&net_namespace_list, struct net, list);
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 23:20:32 +07:00
|
|
|
up_write(&net_rwsem);
|
2015-04-03 17:02:36 +07:00
|
|
|
|
2018-02-19 16:58:45 +07:00
|
|
|
llist_for_each_entry(net, net_kill_list, cleanup_list) {
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 23:14:53 +07:00
|
|
|
unhash_nsid(net, last);
|
|
|
|
list_add_tail(&net->exit_list, &net_exit_list);
|
2009-12-03 09:29:03 +07:00
|
|
|
}
|
2007-11-01 14:44:50 +07:00
|
|
|
|
2019-06-19 01:08:59 +07:00
|
|
|
/* Run all of the network namespace pre_exit methods */
|
|
|
|
list_for_each_entry_reverse(ops, &pernet_list, list)
|
|
|
|
ops_pre_exit_list(ops, &net_exit_list);
|
|
|
|
|
2009-07-10 16:51:33 +07:00
|
|
|
/*
|
|
|
|
* Another CPU might be rcu-iterating the list, wait for it.
|
|
|
|
* This needs to be before calling the exit() notifiers, so
|
|
|
|
* the rcu_barrier() below isn't sufficient alone.
|
2019-06-19 01:08:59 +07:00
|
|
|
* Also the pre_exit() and exit() methods need this barrier.
|
2009-07-10 16:51:33 +07:00
|
|
|
*/
|
|
|
|
synchronize_rcu();
|
|
|
|
|
2007-11-01 14:44:50 +07:00
|
|
|
/* Run all of the network namespace exit methods */
|
2009-12-03 09:29:03 +07:00
|
|
|
list_for_each_entry_reverse(ops, &pernet_list, list)
|
|
|
|
ops_exit_list(ops, &net_exit_list);
|
|
|
|
|
2009-11-30 05:25:28 +07:00
|
|
|
/* Free the net generic variables */
|
2009-12-03 09:29:03 +07:00
|
|
|
list_for_each_entry_reverse(ops, &pernet_list, list)
|
|
|
|
ops_free_list(ops, &net_exit_list);
|
2007-11-01 14:44:50 +07:00
|
|
|
|
2018-03-27 22:02:23 +07:00
|
|
|
up_read(&pernet_ops_rwsem);
|
2007-11-01 14:44:50 +07:00
|
|
|
|
|
|
|
/* Ensure there are no outstanding rcu callbacks using this
|
|
|
|
* network namespace.
|
|
|
|
*/
|
|
|
|
rcu_barrier();
|
|
|
|
|
|
|
|
/* Finally it is safe to free my network namespace structure */
|
2009-12-03 09:29:03 +07:00
|
|
|
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
|
|
|
|
list_del_init(&net->exit_list);
|
2016-08-09 02:33:23 +07:00
|
|
|
dec_net_namespaces(net->ucounts);
|
2019-06-27 03:02:33 +07:00
|
|
|
key_remove_domain(net->key_domain);
|
2012-06-14 16:31:10 +07:00
|
|
|
put_user_ns(net->user_ns);
|
2011-06-09 08:13:01 +07:00
|
|
|
net_drop_ns(net);
|
2009-11-30 05:25:27 +07:00
|
|
|
}
|
2007-11-01 14:44:50 +07:00
|
|
|
}
|
2017-05-30 16:38:12 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* net_ns_barrier - wait until concurrent net_cleanup_work is done
|
|
|
|
*
|
|
|
|
* cleanup_net runs from work queue and will first remove namespaces
|
|
|
|
* from the global list, then run net exit functions.
|
|
|
|
*
|
|
|
|
* Call this in module exit path to make sure that all netns
|
|
|
|
* ->exit ops have been invoked before the function is removed.
|
|
|
|
*/
|
|
|
|
void net_ns_barrier(void)
|
|
|
|
{
|
2018-03-27 22:02:23 +07:00
|
|
|
down_write(&pernet_ops_rwsem);
|
|
|
|
up_write(&pernet_ops_rwsem);
|
2017-05-30 16:38:12 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(net_ns_barrier);
|
|
|
|
|
2009-11-30 05:25:27 +07:00
|
|
|
static DECLARE_WORK(net_cleanup_work, cleanup_net);
|
2007-11-01 14:44:50 +07:00
|
|
|
|
|
|
|
void __put_net(struct net *net)
|
|
|
|
{
|
|
|
|
/* Cleanup the network namespace in process context */
|
2018-02-19 16:58:54 +07:00
|
|
|
if (llist_add(&net->cleanup_list, &cleanup_list))
|
|
|
|
queue_work(netns_wq, &net_cleanup_work);
|
2007-11-01 14:44:50 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__put_net);
|
|
|
|
|
2011-05-12 10:51:13 +07:00
|
|
|
struct net *get_net_ns_by_fd(int fd)
|
|
|
|
{
|
|
|
|
struct file *file;
|
2014-11-01 13:32:53 +07:00
|
|
|
struct ns_common *ns;
|
2011-05-12 10:51:13 +07:00
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
file = proc_ns_fget(fd);
|
2011-06-05 07:37:35 +07:00
|
|
|
if (IS_ERR(file))
|
|
|
|
return ERR_CAST(file);
|
2011-05-12 10:51:13 +07:00
|
|
|
|
2014-11-01 14:13:17 +07:00
|
|
|
ns = get_proc_ns(file_inode(file));
|
2014-11-01 13:32:53 +07:00
|
|
|
if (ns->ops == &netns_operations)
|
|
|
|
net = get_net(container_of(ns, struct net, ns));
|
2011-06-05 07:37:35 +07:00
|
|
|
else
|
|
|
|
net = ERR_PTR(-EINVAL);
|
2011-05-12 10:51:13 +07:00
|
|
|
|
2011-06-05 07:37:35 +07:00
|
|
|
fput(file);
|
2011-05-12 10:51:13 +07:00
|
|
|
return net;
|
|
|
|
}
|
|
|
|
|
2007-11-01 14:44:50 +07:00
|
|
|
#else
|
2011-05-12 10:51:13 +07:00
|
|
|
struct net *get_net_ns_by_fd(int fd)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2007-11-01 14:44:50 +07:00
|
|
|
#endif
|
2015-01-12 21:34:05 +07:00
|
|
|
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
|
2007-11-01 14:44:50 +07:00
|
|
|
|
2009-07-10 16:51:35 +07:00
|
|
|
struct net *get_net_ns_by_pid(pid_t pid)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk;
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
/* Lookup the network namespace */
|
|
|
|
net = ERR_PTR(-ESRCH);
|
|
|
|
rcu_read_lock();
|
|
|
|
tsk = find_task_by_vpid(pid);
|
|
|
|
if (tsk) {
|
|
|
|
struct nsproxy *nsproxy;
|
2014-02-04 10:13:49 +07:00
|
|
|
task_lock(tsk);
|
|
|
|
nsproxy = tsk->nsproxy;
|
2009-07-10 16:51:35 +07:00
|
|
|
if (nsproxy)
|
|
|
|
net = get_net(nsproxy->net_ns);
|
2014-02-04 10:13:49 +07:00
|
|
|
task_unlock(tsk);
|
2009-07-10 16:51:35 +07:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return net;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
|
|
|
|
|
2011-06-16 00:21:48 +07:00
|
|
|
static __net_init int net_ns_net_init(struct net *net)
|
|
|
|
{
|
2014-11-01 13:32:53 +07:00
|
|
|
#ifdef CONFIG_NET_NS
|
|
|
|
net->ns.ops = &netns_operations;
|
|
|
|
#endif
|
2014-11-01 11:45:45 +07:00
|
|
|
return ns_alloc_inum(&net->ns);
|
2011-06-16 00:21:48 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static __net_exit void net_ns_net_exit(struct net *net)
|
|
|
|
{
|
2014-11-01 11:45:45 +07:00
|
|
|
ns_free_inum(&net->ns);
|
2011-06-16 00:21:48 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations __net_initdata net_ns_ops = {
|
|
|
|
.init = net_ns_net_init,
|
|
|
|
.exit = net_ns_net_exit,
|
|
|
|
};
|
|
|
|
|
2016-09-01 05:17:49 +07:00
|
|
|
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
|
2015-01-15 21:11:15 +07:00
|
|
|
[NETNSA_NONE] = { .type = NLA_UNSPEC },
|
|
|
|
[NETNSA_NSID] = { .type = NLA_S32 },
|
|
|
|
[NETNSA_PID] = { .type = NLA_U32 },
|
|
|
|
[NETNSA_FD] = { .type = NLA_U32 },
|
2018-11-26 21:42:04 +07:00
|
|
|
[NETNSA_TARGET_NSID] = { .type = NLA_S32 },
|
2015-01-15 21:11:15 +07:00
|
|
|
};
|
|
|
|
|
2017-04-16 23:48:24 +07:00
|
|
|
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-01-15 21:11:15 +07:00
|
|
|
{
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
struct nlattr *tb[NETNSA_MAX + 1];
|
2017-06-09 19:41:56 +07:00
|
|
|
struct nlattr *nla;
|
2015-01-15 21:11:15 +07:00
|
|
|
struct net *peer;
|
|
|
|
int nsid, err;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 19:07:28 +07:00
|
|
|
err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
|
|
|
|
NETNSA_MAX, rtnl_net_policy, extack);
|
2015-01-15 21:11:15 +07:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2017-06-09 19:41:56 +07:00
|
|
|
if (!tb[NETNSA_NSID]) {
|
|
|
|
NL_SET_ERR_MSG(extack, "nsid is missing");
|
2015-01-15 21:11:15 +07:00
|
|
|
return -EINVAL;
|
2017-06-09 19:41:56 +07:00
|
|
|
}
|
2015-01-15 21:11:15 +07:00
|
|
|
nsid = nla_get_s32(tb[NETNSA_NSID]);
|
|
|
|
|
2017-06-09 19:41:56 +07:00
|
|
|
if (tb[NETNSA_PID]) {
|
2015-01-15 21:11:15 +07:00
|
|
|
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
|
2017-06-09 19:41:56 +07:00
|
|
|
nla = tb[NETNSA_PID];
|
|
|
|
} else if (tb[NETNSA_FD]) {
|
2015-01-15 21:11:15 +07:00
|
|
|
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
|
2017-06-09 19:41:56 +07:00
|
|
|
nla = tb[NETNSA_FD];
|
|
|
|
} else {
|
|
|
|
NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
|
2015-01-15 21:11:15 +07:00
|
|
|
return -EINVAL;
|
2017-06-09 19:41:56 +07:00
|
|
|
}
|
|
|
|
if (IS_ERR(peer)) {
|
|
|
|
NL_SET_BAD_ATTR(extack, nla);
|
|
|
|
NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
|
2015-01-15 21:11:15 +07:00
|
|
|
return PTR_ERR(peer);
|
2017-06-09 19:41:56 +07:00
|
|
|
}
|
2015-01-15 21:11:15 +07:00
|
|
|
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_lock(&net->nsid_lock);
|
2015-05-07 16:02:50 +07:00
|
|
|
if (__peernet2id(net, peer) >= 0) {
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2015-01-15 21:11:15 +07:00
|
|
|
err = -EEXIST;
|
2017-06-09 19:41:56 +07:00
|
|
|
NL_SET_BAD_ATTR(extack, nla);
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Peer netns already has a nsid assigned");
|
2015-01-15 21:11:15 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = alloc_netid(net, peer, nsid);
|
2020-01-14 04:39:23 +07:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2015-05-07 16:02:50 +07:00
|
|
|
if (err >= 0) {
|
2019-10-09 16:19:10 +07:00
|
|
|
rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 23:39:04 +07:00
|
|
|
nlh, GFP_KERNEL);
|
2015-01-15 21:11:15 +07:00
|
|
|
err = 0;
|
2017-06-09 19:41:56 +07:00
|
|
|
} else if (err == -ENOSPC && nsid >= 0) {
|
2017-06-09 19:41:57 +07:00
|
|
|
err = -EEXIST;
|
2017-06-09 19:41:56 +07:00
|
|
|
NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
|
|
|
|
NL_SET_ERR_MSG(extack, "The specified nsid is already used");
|
2015-05-07 16:02:50 +07:00
|
|
|
}
|
2015-01-15 21:11:15 +07:00
|
|
|
out:
|
|
|
|
put_net(peer);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rtnl_net_get_size(void)
|
|
|
|
{
|
|
|
|
return NLMSG_ALIGN(sizeof(struct rtgenmsg))
|
|
|
|
+ nla_total_size(sizeof(s32)) /* NETNSA_NSID */
|
2018-11-26 21:42:06 +07:00
|
|
|
+ nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
|
2015-01-15 21:11:15 +07:00
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2018-11-26 21:42:03 +07:00
|
|
|
struct net_fill_args {
|
|
|
|
u32 portid;
|
|
|
|
u32 seq;
|
|
|
|
int flags;
|
|
|
|
int cmd;
|
|
|
|
int nsid;
|
2018-11-26 21:42:06 +07:00
|
|
|
bool add_ref;
|
|
|
|
int ref_nsid;
|
2018-11-26 21:42:03 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
|
2015-01-15 21:11:15 +07:00
|
|
|
{
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct rtgenmsg *rth;
|
|
|
|
|
2018-11-26 21:42:03 +07:00
|
|
|
nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
|
|
|
|
args->flags);
|
2015-01-15 21:11:15 +07:00
|
|
|
if (!nlh)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
rth = nlmsg_data(nlh);
|
|
|
|
rth->rtgen_family = AF_UNSPEC;
|
|
|
|
|
2018-11-26 21:42:03 +07:00
|
|
|
if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
|
2015-01-15 21:11:15 +07:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
2018-11-26 21:42:06 +07:00
|
|
|
if (args->add_ref &&
|
|
|
|
nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2015-01-15 21:11:15 +07:00
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2019-01-19 01:46:17 +07:00
|
|
|
static int rtnl_net_valid_getid_req(struct sk_buff *skb,
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
struct nlattr **tb,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
if (!netlink_strict_get_check(skb))
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 19:07:28 +07:00
|
|
|
return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
|
|
|
|
tb, NETNSA_MAX, rtnl_net_policy,
|
|
|
|
extack);
|
2019-01-19 01:46:17 +07:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 19:07:28 +07:00
|
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
|
|
|
|
NETNSA_MAX, rtnl_net_policy,
|
|
|
|
extack);
|
2019-01-19 01:46:17 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for (i = 0; i <= NETNSA_MAX; i++) {
|
|
|
|
if (!tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case NETNSA_PID:
|
|
|
|
case NETNSA_FD:
|
|
|
|
case NETNSA_NSID:
|
|
|
|
case NETNSA_TARGET_NSID:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-16 23:48:24 +07:00
|
|
|
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-01-15 21:11:15 +07:00
|
|
|
{
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
struct nlattr *tb[NETNSA_MAX + 1];
|
2018-11-26 21:42:03 +07:00
|
|
|
struct net_fill_args fillargs = {
|
|
|
|
.portid = NETLINK_CB(skb).portid,
|
|
|
|
.seq = nlh->nlmsg_seq,
|
|
|
|
.cmd = RTM_NEWNSID,
|
|
|
|
};
|
2018-11-26 21:42:04 +07:00
|
|
|
struct net *peer, *target = net;
|
2017-06-09 19:41:56 +07:00
|
|
|
struct nlattr *nla;
|
2015-01-15 21:11:15 +07:00
|
|
|
struct sk_buff *msg;
|
2018-11-26 21:42:03 +07:00
|
|
|
int err;
|
2015-01-15 21:11:15 +07:00
|
|
|
|
2019-01-19 01:46:17 +07:00
|
|
|
err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
|
2015-01-15 21:11:15 +07:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2017-06-09 19:41:56 +07:00
|
|
|
if (tb[NETNSA_PID]) {
|
2015-01-15 21:11:15 +07:00
|
|
|
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
|
2017-06-09 19:41:56 +07:00
|
|
|
nla = tb[NETNSA_PID];
|
|
|
|
} else if (tb[NETNSA_FD]) {
|
2015-01-15 21:11:15 +07:00
|
|
|
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
|
2017-06-09 19:41:56 +07:00
|
|
|
nla = tb[NETNSA_FD];
|
2018-11-26 21:42:05 +07:00
|
|
|
} else if (tb[NETNSA_NSID]) {
|
2019-04-11 21:45:57 +07:00
|
|
|
peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
|
2018-11-26 21:42:05 +07:00
|
|
|
if (!peer)
|
|
|
|
peer = ERR_PTR(-ENOENT);
|
|
|
|
nla = tb[NETNSA_NSID];
|
2017-06-09 19:41:56 +07:00
|
|
|
} else {
|
|
|
|
NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
|
2015-01-15 21:11:15 +07:00
|
|
|
return -EINVAL;
|
2017-06-09 19:41:56 +07:00
|
|
|
}
|
2015-01-15 21:11:15 +07:00
|
|
|
|
2017-06-09 19:41:56 +07:00
|
|
|
if (IS_ERR(peer)) {
|
|
|
|
NL_SET_BAD_ATTR(extack, nla);
|
|
|
|
NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
|
2015-01-15 21:11:15 +07:00
|
|
|
return PTR_ERR(peer);
|
2017-06-09 19:41:56 +07:00
|
|
|
}
|
2015-01-15 21:11:15 +07:00
|
|
|
|
2018-11-26 21:42:04 +07:00
|
|
|
if (tb[NETNSA_TARGET_NSID]) {
|
|
|
|
int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
|
|
|
|
|
|
|
|
target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
|
|
|
|
if (IS_ERR(target)) {
|
|
|
|
NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Target netns reference is invalid");
|
|
|
|
err = PTR_ERR(target);
|
|
|
|
goto out;
|
|
|
|
}
|
2018-11-26 21:42:06 +07:00
|
|
|
fillargs.add_ref = true;
|
|
|
|
fillargs.ref_nsid = peernet2id(net, peer);
|
2018-11-26 21:42:04 +07:00
|
|
|
}
|
|
|
|
|
2015-01-15 21:11:15 +07:00
|
|
|
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
|
|
|
|
if (!msg) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-11-26 21:42:04 +07:00
|
|
|
fillargs.nsid = peernet2id(target, peer);
|
2018-11-26 21:42:03 +07:00
|
|
|
err = rtnl_net_fill(msg, &fillargs);
|
2015-01-15 21:11:15 +07:00
|
|
|
if (err < 0)
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
nlmsg_free(msg);
|
|
|
|
out:
|
2018-11-26 21:42:06 +07:00
|
|
|
if (fillargs.add_ref)
|
2018-11-26 21:42:04 +07:00
|
|
|
put_net(target);
|
2015-01-15 21:11:15 +07:00
|
|
|
put_net(peer);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-04-07 16:51:54 +07:00
|
|
|
struct rtnl_net_dump_cb {
|
2018-11-26 21:42:04 +07:00
|
|
|
struct net *tgt_net;
|
2018-11-26 21:42:06 +07:00
|
|
|
struct net *ref_net;
|
2015-04-07 16:51:54 +07:00
|
|
|
struct sk_buff *skb;
|
2018-11-26 21:42:03 +07:00
|
|
|
struct net_fill_args fillargs;
|
2015-04-07 16:51:54 +07:00
|
|
|
int idx;
|
|
|
|
int s_idx;
|
|
|
|
};
|
|
|
|
|
2020-01-14 04:39:22 +07:00
|
|
|
/* Runs in RCU-critical section. */
|
2015-04-07 16:51:54 +07:00
|
|
|
static int rtnl_net_dumpid_one(int id, void *peer, void *data)
|
|
|
|
{
|
|
|
|
struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (net_cb->idx < net_cb->s_idx)
|
|
|
|
goto cont;
|
|
|
|
|
2018-11-26 21:42:03 +07:00
|
|
|
net_cb->fillargs.nsid = id;
|
2018-11-26 21:42:06 +07:00
|
|
|
if (net_cb->fillargs.add_ref)
|
|
|
|
net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
|
2018-11-26 21:42:03 +07:00
|
|
|
ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
|
2015-04-07 16:51:54 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
cont:
|
|
|
|
net_cb->idx++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-26 21:42:04 +07:00
|
|
|
static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
|
|
|
|
struct rtnl_net_dump_cb *net_cb,
|
|
|
|
struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
struct netlink_ext_ack *extack = cb->extack;
|
|
|
|
struct nlattr *tb[NETNSA_MAX + 1];
|
|
|
|
int err, i;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 19:07:28 +07:00
|
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
|
|
|
|
NETNSA_MAX, rtnl_net_policy,
|
|
|
|
extack);
|
2018-11-26 21:42:04 +07:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for (i = 0; i <= NETNSA_MAX; i++) {
|
|
|
|
if (!tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (i == NETNSA_TARGET_NSID) {
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
|
|
|
|
if (IS_ERR(net)) {
|
|
|
|
NL_SET_BAD_ATTR(extack, tb[i]);
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Invalid target network namespace id");
|
|
|
|
return PTR_ERR(net);
|
|
|
|
}
|
2018-11-26 21:42:06 +07:00
|
|
|
net_cb->fillargs.add_ref = true;
|
|
|
|
net_cb->ref_net = net_cb->tgt_net;
|
2018-11-26 21:42:04 +07:00
|
|
|
net_cb->tgt_net = net;
|
|
|
|
} else {
|
|
|
|
NL_SET_BAD_ATTR(extack, tb[i]);
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Unsupported attribute in dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-07 16:51:54 +07:00
|
|
|
static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
struct rtnl_net_dump_cb net_cb = {
|
2018-11-26 21:42:04 +07:00
|
|
|
.tgt_net = sock_net(skb->sk),
|
2015-04-07 16:51:54 +07:00
|
|
|
.skb = skb,
|
2018-11-26 21:42:03 +07:00
|
|
|
.fillargs = {
|
|
|
|
.portid = NETLINK_CB(cb->skb).portid,
|
|
|
|
.seq = cb->nlh->nlmsg_seq,
|
|
|
|
.flags = NLM_F_MULTI,
|
|
|
|
.cmd = RTM_NEWNSID,
|
|
|
|
},
|
2015-04-07 16:51:54 +07:00
|
|
|
.idx = 0,
|
|
|
|
.s_idx = cb->args[0],
|
|
|
|
};
|
2018-11-26 21:42:04 +07:00
|
|
|
int err = 0;
|
2015-04-07 16:51:54 +07:00
|
|
|
|
2018-11-26 21:42:04 +07:00
|
|
|
if (cb->strict_check) {
|
|
|
|
err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
|
|
|
|
if (err < 0)
|
|
|
|
goto end;
|
2018-10-08 10:16:38 +07:00
|
|
|
}
|
|
|
|
|
2020-01-14 04:39:22 +07:00
|
|
|
rcu_read_lock();
|
2018-11-26 21:42:04 +07:00
|
|
|
idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
|
2020-01-14 04:39:22 +07:00
|
|
|
rcu_read_unlock();
|
2015-04-07 16:51:54 +07:00
|
|
|
|
|
|
|
cb->args[0] = net_cb.idx;
|
2018-11-26 21:42:04 +07:00
|
|
|
end:
|
2018-11-26 21:42:06 +07:00
|
|
|
if (net_cb.fillargs.add_ref)
|
2018-11-26 21:42:04 +07:00
|
|
|
put_net(net_cb.tgt_net);
|
|
|
|
return err < 0 ? err : skb->len;
|
2015-04-07 16:51:54 +07:00
|
|
|
}
|
|
|
|
|
2019-10-09 16:19:10 +07:00
|
|
|
static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 23:39:04 +07:00
|
|
|
struct nlmsghdr *nlh, gfp_t gfp)
|
2015-04-07 16:51:53 +07:00
|
|
|
{
|
2018-11-26 21:42:03 +07:00
|
|
|
struct net_fill_args fillargs = {
|
2019-10-09 16:19:10 +07:00
|
|
|
.portid = portid,
|
|
|
|
.seq = nlh ? nlh->nlmsg_seq : 0,
|
2018-11-26 21:42:03 +07:00
|
|
|
.cmd = cmd,
|
|
|
|
.nsid = id,
|
|
|
|
};
|
2015-04-07 16:51:53 +07:00
|
|
|
struct sk_buff *msg;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 23:39:04 +07:00
|
|
|
msg = nlmsg_new(rtnl_net_get_size(), gfp);
|
2015-04-07 16:51:53 +07:00
|
|
|
if (!msg)
|
|
|
|
goto out;
|
|
|
|
|
2018-11-26 21:42:03 +07:00
|
|
|
err = rtnl_net_fill(msg, &fillargs);
|
2015-04-07 16:51:53 +07:00
|
|
|
if (err < 0)
|
|
|
|
goto err_out;
|
|
|
|
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 23:39:04 +07:00
|
|
|
rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
|
2015-04-07 16:51:53 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
nlmsg_free(msg);
|
|
|
|
out:
|
|
|
|
rtnl_set_sk_err(net, RTNLGRP_NSID, err);
|
|
|
|
}
|
|
|
|
|
2007-09-12 16:50:50 +07:00
|
|
|
static int __init net_ns_init(void)
|
|
|
|
{
|
2009-02-22 15:07:53 +07:00
|
|
|
struct net_generic *ng;
|
2007-09-12 16:50:50 +07:00
|
|
|
|
2007-11-01 14:46:50 +07:00
|
|
|
#ifdef CONFIG_NET_NS
|
2007-09-12 16:50:50 +07:00
|
|
|
net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
|
|
|
|
SMP_CACHE_BYTES,
|
2018-03-01 19:23:28 +07:00
|
|
|
SLAB_PANIC|SLAB_ACCOUNT, NULL);
|
2007-11-20 14:18:16 +07:00
|
|
|
|
|
|
|
/* Create workqueue for cleanup */
|
|
|
|
netns_wq = create_singlethread_workqueue("netns");
|
|
|
|
if (!netns_wq)
|
|
|
|
panic("Could not create netns workq");
|
2007-11-01 14:46:50 +07:00
|
|
|
#endif
|
2007-11-20 14:18:16 +07:00
|
|
|
|
2009-02-22 15:07:53 +07:00
|
|
|
ng = net_alloc_generic();
|
|
|
|
if (!ng)
|
|
|
|
panic("Could not allocate generic netns");
|
|
|
|
|
|
|
|
rcu_assign_pointer(init_net.gen, ng);
|
bpf: Add netns cookie and enable it for bpf cgroup hooks
In Cilium we're mainly using BPF cgroup hooks today in order to implement
kube-proxy free Kubernetes service translation for ClusterIP, NodePort (*),
ExternalIP, and LoadBalancer as well as HostPort mapping [0] for all traffic
between Cilium managed nodes. While this works in its current shape and avoids
packet-level NAT for inter Cilium managed node traffic, there is one major
limitation we're facing today, that is, lack of netns awareness.
In Kubernetes, the concept of Pods (which hold one or multiple containers)
has been built around network namespaces, so while we can use the global scope
of attaching to root BPF cgroup hooks also to our advantage (e.g. for exposing
NodePort ports on loopback addresses), we also have the need to differentiate
between initial network namespaces and non-initial one. For example, ExternalIP
services mandate that non-local service IPs are not to be translated from the
host (initial) network namespace as one example. Right now, we have an ugly
work-around in place where non-local service IPs for ExternalIP services are
not xlated from connect() and friends BPF hooks but instead via less efficient
packet-level NAT on the veth tc ingress hook for Pod traffic.
On top of determining whether we're in initial or non-initial network namespace
we also have a need for a socket-cookie like mechanism for network namespaces
scope. Socket cookies have the nice property that they can be combined as part
of the key structure e.g. for BPF LRU maps without having to worry that the
cookie could be recycled. We are planning to use this for our sessionAffinity
implementation for services. Therefore, add a new bpf_get_netns_cookie() helper
which would resolve both use cases at once: bpf_get_netns_cookie(NULL) would
provide the cookie for the initial network namespace while passing the context
instead of NULL would provide the cookie from the application's network namespace.
We're using a hole, so no size increase; the assignment happens only once.
Therefore this allows for a comparison on initial namespace as well as regular
cookie usage as we have today with socket cookies. We could later on enable
this helper for other program types as well as we would see need.
(*) Both externalTrafficPolicy={Local|Cluster} types
[0] https://github.com/cilium/cilium/blob/master/bpf/bpf_sock.c
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/c47d2346982693a9cf9da0e12690453aded4c788.1585323121.git.daniel@iogearbox.net
2020-03-27 22:58:52 +07:00
|
|
|
net_gen_cookie(&init_net);
|
2009-02-22 15:07:53 +07:00
|
|
|
|
2018-03-27 22:02:23 +07:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2012-06-14 16:31:10 +07:00
|
|
|
if (setup_net(&init_net, &init_user_ns))
|
2009-05-22 05:10:31 +07:00
|
|
|
panic("Could not setup the initial network namespace");
|
2007-09-12 16:50:50 +07:00
|
|
|
|
2016-08-11 04:36:00 +07:00
|
|
|
init_net_initialized = true;
|
2018-03-27 22:02:23 +07:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
|
2018-12-24 08:42:38 +07:00
|
|
|
if (register_pernet_subsys(&net_ns_ops))
|
|
|
|
panic("Could not register network namespace subsystems");
|
2011-06-16 00:21:48 +07:00
|
|
|
|
2017-08-10 01:41:53 +07:00
|
|
|
rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
|
|
|
|
RTNL_FLAG_DOIT_UNLOCKED);
|
2015-04-07 16:51:54 +07:00
|
|
|
rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
|
2017-08-10 01:41:53 +07:00
|
|
|
RTNL_FLAG_DOIT_UNLOCKED);
|
2015-01-15 21:11:15 +07:00
|
|
|
|
2007-09-12 16:50:50 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pure_initcall(net_ns_init);
|
|
|
|
|
2007-11-13 18:23:21 +07:00
|
|
|
#ifdef CONFIG_NET_NS
|
2009-11-30 05:25:28 +07:00
|
|
|
static int __register_pernet_operations(struct list_head *list,
|
|
|
|
struct pernet_operations *ops)
|
2007-09-12 16:50:50 +07:00
|
|
|
{
|
2009-12-03 09:29:03 +07:00
|
|
|
struct net *net;
|
2007-09-12 16:50:50 +07:00
|
|
|
int error;
|
2009-12-03 09:29:03 +07:00
|
|
|
LIST_HEAD(net_exit_list);
|
2007-09-12 16:50:50 +07:00
|
|
|
|
|
|
|
list_add_tail(&ops->list, list);
|
2009-11-30 05:25:28 +07:00
|
|
|
if (ops->init || (ops->id && ops->size)) {
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 23:20:32 +07:00
|
|
|
/* We held write locked pernet_ops_rwsem, and parallel
|
|
|
|
* setup_net() and cleanup_net() are not possible.
|
|
|
|
*/
|
2007-11-01 14:42:43 +07:00
|
|
|
for_each_net(net) {
|
2009-11-30 05:25:28 +07:00
|
|
|
error = ops_init(ops, net);
|
2007-09-12 16:50:50 +07:00
|
|
|
if (error)
|
|
|
|
goto out_undo;
|
2009-12-03 09:29:03 +07:00
|
|
|
list_add_tail(&net->exit_list, &net_exit_list);
|
2007-09-12 16:50:50 +07:00
|
|
|
}
|
|
|
|
}
|
2007-11-01 14:42:43 +07:00
|
|
|
return 0;
|
2007-09-12 16:50:50 +07:00
|
|
|
|
|
|
|
out_undo:
|
|
|
|
/* If I have an error cleanup all namespaces I initialized */
|
|
|
|
list_del(&ops->list);
|
2019-06-19 01:08:59 +07:00
|
|
|
ops_pre_exit_list(ops, &net_exit_list);
|
|
|
|
synchronize_rcu();
|
2009-12-03 09:29:03 +07:00
|
|
|
ops_exit_list(ops, &net_exit_list);
|
|
|
|
ops_free_list(ops, &net_exit_list);
|
2007-11-01 14:42:43 +07:00
|
|
|
return error;
|
2007-09-12 16:50:50 +07:00
|
|
|
}
|
|
|
|
|
2009-11-30 05:25:28 +07:00
|
|
|
static void __unregister_pernet_operations(struct pernet_operations *ops)
|
2007-09-12 16:50:50 +07:00
|
|
|
{
|
|
|
|
struct net *net;
|
2009-12-03 09:29:03 +07:00
|
|
|
LIST_HEAD(net_exit_list);
|
2007-09-12 16:50:50 +07:00
|
|
|
|
|
|
|
list_del(&ops->list);
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 23:20:32 +07:00
|
|
|
/* See comment in __register_pernet_operations() */
|
2009-12-03 09:29:03 +07:00
|
|
|
for_each_net(net)
|
|
|
|
list_add_tail(&net->exit_list, &net_exit_list);
|
2019-06-19 01:08:59 +07:00
|
|
|
ops_pre_exit_list(ops, &net_exit_list);
|
|
|
|
synchronize_rcu();
|
2009-12-03 09:29:03 +07:00
|
|
|
ops_exit_list(ops, &net_exit_list);
|
|
|
|
ops_free_list(ops, &net_exit_list);
|
2007-09-12 16:50:50 +07:00
|
|
|
}
|
|
|
|
|
2007-11-13 18:23:21 +07:00
|
|
|
#else
|
|
|
|
|
2009-11-30 05:25:28 +07:00
|
|
|
static int __register_pernet_operations(struct list_head *list,
|
|
|
|
struct pernet_operations *ops)
|
2007-11-13 18:23:21 +07:00
|
|
|
{
|
2016-08-11 04:36:00 +07:00
|
|
|
if (!init_net_initialized) {
|
|
|
|
list_add_tail(&ops->list, list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-16 11:43:15 +07:00
|
|
|
return ops_init(ops, &init_net);
|
2007-11-13 18:23:21 +07:00
|
|
|
}
|
|
|
|
|
2009-11-30 05:25:28 +07:00
|
|
|
static void __unregister_pernet_operations(struct pernet_operations *ops)
|
2007-11-13 18:23:21 +07:00
|
|
|
{
|
2016-08-11 04:36:00 +07:00
|
|
|
if (!init_net_initialized) {
|
|
|
|
list_del(&ops->list);
|
|
|
|
} else {
|
|
|
|
LIST_HEAD(net_exit_list);
|
|
|
|
list_add(&init_net.exit_list, &net_exit_list);
|
2019-06-19 01:08:59 +07:00
|
|
|
ops_pre_exit_list(ops, &net_exit_list);
|
|
|
|
synchronize_rcu();
|
2016-08-11 04:36:00 +07:00
|
|
|
ops_exit_list(ops, &net_exit_list);
|
|
|
|
ops_free_list(ops, &net_exit_list);
|
|
|
|
}
|
2007-11-13 18:23:21 +07:00
|
|
|
}
|
2009-11-30 05:25:28 +07:00
|
|
|
|
|
|
|
#endif /* CONFIG_NET_NS */
|
2007-11-13 18:23:21 +07:00
|
|
|
|
2008-04-15 14:35:23 +07:00
|
|
|
static DEFINE_IDA(net_generic_ids);
|
|
|
|
|
2009-11-30 05:25:28 +07:00
|
|
|
static int register_pernet_operations(struct list_head *list,
|
|
|
|
struct pernet_operations *ops)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (ops->id) {
|
2018-06-17 16:37:08 +07:00
|
|
|
error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (error < 0)
|
2009-11-30 05:25:28 +07:00
|
|
|
return error;
|
2018-06-17 16:37:08 +07:00
|
|
|
*ops->id = error;
|
2016-12-02 08:21:32 +07:00
|
|
|
max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
|
2009-11-30 05:25:28 +07:00
|
|
|
}
|
|
|
|
error = __register_pernet_operations(list, ops);
|
2009-12-03 09:29:06 +07:00
|
|
|
if (error) {
|
|
|
|
rcu_barrier();
|
|
|
|
if (ops->id)
|
2018-06-17 16:37:08 +07:00
|
|
|
ida_free(&net_generic_ids, *ops->id);
|
2009-12-03 09:29:06 +07:00
|
|
|
}
|
2009-11-30 05:25:28 +07:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unregister_pernet_operations(struct pernet_operations *ops)
|
|
|
|
{
|
|
|
|
__unregister_pernet_operations(ops);
|
2009-12-03 09:29:06 +07:00
|
|
|
rcu_barrier();
|
2009-11-30 05:25:28 +07:00
|
|
|
if (ops->id)
|
2018-06-17 16:37:08 +07:00
|
|
|
ida_free(&net_generic_ids, *ops->id);
|
2009-11-30 05:25:28 +07:00
|
|
|
}
|
|
|
|
|
2007-09-12 16:50:50 +07:00
|
|
|
/**
|
|
|
|
* register_pernet_subsys - register a network namespace subsystem
|
|
|
|
* @ops: pernet operations structure for the subsystem
|
|
|
|
*
|
|
|
|
* Register a subsystem which has init and exit functions
|
|
|
|
* that are called when network namespaces are created and
|
|
|
|
* destroyed respectively.
|
|
|
|
*
|
|
|
|
* When registered all network namespace init functions are
|
|
|
|
* called for every existing network namespace. Allowing kernel
|
|
|
|
* modules to have a race free view of the set of network namespaces.
|
|
|
|
*
|
|
|
|
* When a new network namespace is created all of the init
|
|
|
|
* methods are called in the order in which they were registered.
|
|
|
|
*
|
|
|
|
* When a network namespace is destroyed all of the exit methods
|
|
|
|
* are called in the reverse of the order with which they were
|
|
|
|
* registered.
|
|
|
|
*/
|
|
|
|
int register_pernet_subsys(struct pernet_operations *ops)
|
|
|
|
{
|
|
|
|
int error;
|
2018-03-27 22:02:23 +07:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
error = register_pernet_operations(first_device, ops);
|
2018-03-27 22:02:23 +07:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(register_pernet_subsys);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* unregister_pernet_subsys - unregister a network namespace subsystem
|
|
|
|
* @ops: pernet operations structure to manipulate
|
|
|
|
*
|
|
|
|
* Remove the pernet operations structure from the list to be
|
2008-02-03 22:56:48 +07:00
|
|
|
* used when network namespaces are created or destroyed. In
|
2007-09-12 16:50:50 +07:00
|
|
|
* addition run the exit method for all existing network
|
|
|
|
* namespaces.
|
|
|
|
*/
|
2010-04-25 14:49:56 +07:00
|
|
|
void unregister_pernet_subsys(struct pernet_operations *ops)
|
2007-09-12 16:50:50 +07:00
|
|
|
{
|
2018-03-27 22:02:23 +07:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2010-04-25 14:49:56 +07:00
|
|
|
unregister_pernet_operations(ops);
|
2018-03-27 22:02:23 +07:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* register_pernet_device - register a network namespace device
|
|
|
|
* @ops: pernet operations structure for the subsystem
|
|
|
|
*
|
|
|
|
* Register a device which has init and exit functions
|
|
|
|
* that are called when network namespaces are created and
|
|
|
|
* destroyed respectively.
|
|
|
|
*
|
|
|
|
* When registered all network namespace init functions are
|
|
|
|
* called for every existing network namespace. Allowing kernel
|
|
|
|
* modules to have a race free view of the set of network namespaces.
|
|
|
|
*
|
|
|
|
* When a new network namespace is created all of the init
|
|
|
|
* methods are called in the order in which they were registered.
|
|
|
|
*
|
|
|
|
* When a network namespace is destroyed all of the exit methods
|
|
|
|
* are called in the reverse of the order with which they were
|
|
|
|
* registered.
|
|
|
|
*/
|
|
|
|
int register_pernet_device(struct pernet_operations *ops)
|
|
|
|
{
|
|
|
|
int error;
|
2018-03-27 22:02:23 +07:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
error = register_pernet_operations(&pernet_list, ops);
|
|
|
|
if (!error && (first_device == &pernet_list))
|
|
|
|
first_device = &ops->list;
|
2018-03-27 22:02:23 +07:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(register_pernet_device);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* unregister_pernet_device - unregister a network namespace netdevice
|
|
|
|
* @ops: pernet operations structure to manipulate
|
|
|
|
*
|
|
|
|
* Remove the pernet operations structure from the list to be
|
2008-02-03 22:56:48 +07:00
|
|
|
* used when network namespaces are created or destroyed. In
|
2007-09-12 16:50:50 +07:00
|
|
|
* addition run the exit method for all existing network
|
|
|
|
* namespaces.
|
|
|
|
*/
|
|
|
|
void unregister_pernet_device(struct pernet_operations *ops)
|
|
|
|
{
|
2018-03-27 22:02:23 +07:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
if (&ops->list == first_device)
|
|
|
|
first_device = first_device->next;
|
|
|
|
unregister_pernet_operations(ops);
|
2018-03-27 22:02:23 +07:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 16:50:50 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(unregister_pernet_device);
|
2010-03-08 09:14:23 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_NET_NS
|
2014-11-01 11:37:32 +07:00
|
|
|
static struct ns_common *netns_get(struct task_struct *task)
|
2010-03-08 09:14:23 +07:00
|
|
|
{
|
2011-05-05 07:51:50 +07:00
|
|
|
struct net *net = NULL;
|
|
|
|
struct nsproxy *nsproxy;
|
|
|
|
|
2014-02-04 10:13:49 +07:00
|
|
|
task_lock(task);
|
|
|
|
nsproxy = task->nsproxy;
|
2011-05-05 07:51:50 +07:00
|
|
|
if (nsproxy)
|
|
|
|
net = get_net(nsproxy->net_ns);
|
2014-02-04 10:13:49 +07:00
|
|
|
task_unlock(task);
|
2011-05-05 07:51:50 +07:00
|
|
|
|
2014-11-01 11:10:50 +07:00
|
|
|
return net ? &net->ns : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct net *to_net_ns(struct ns_common *ns)
|
|
|
|
{
|
|
|
|
return container_of(ns, struct net, ns);
|
2010-03-08 09:14:23 +07:00
|
|
|
}
|
|
|
|
|
2014-11-01 11:37:32 +07:00
|
|
|
static void netns_put(struct ns_common *ns)
|
2010-03-08 09:14:23 +07:00
|
|
|
{
|
2014-11-01 11:10:50 +07:00
|
|
|
put_net(to_net_ns(ns));
|
2010-03-08 09:14:23 +07:00
|
|
|
}
|
|
|
|
|
2020-05-05 21:04:30 +07:00
|
|
|
static int netns_install(struct nsset *nsset, struct ns_common *ns)
|
2010-03-08 09:14:23 +07:00
|
|
|
{
|
2020-05-05 21:04:30 +07:00
|
|
|
struct nsproxy *nsproxy = nsset->nsproxy;
|
2014-11-01 11:10:50 +07:00
|
|
|
struct net *net = to_net_ns(ns);
|
2012-07-26 15:13:20 +07:00
|
|
|
|
2012-12-14 22:55:36 +07:00
|
|
|
if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
|
2020-05-05 21:04:30 +07:00
|
|
|
!ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
|
2012-07-26 15:13:20 +07:00
|
|
|
return -EPERM;
|
|
|
|
|
2010-03-08 09:14:23 +07:00
|
|
|
put_net(nsproxy->net_ns);
|
2012-07-26 15:13:20 +07:00
|
|
|
nsproxy->net_ns = get_net(net);
|
2010-03-08 09:14:23 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-06 14:47:13 +07:00
|
|
|
static struct user_namespace *netns_owner(struct ns_common *ns)
|
|
|
|
{
|
|
|
|
return to_net_ns(ns)->user_ns;
|
|
|
|
}
|
|
|
|
|
2010-03-08 09:14:23 +07:00
|
|
|
const struct proc_ns_operations netns_operations = {
|
|
|
|
.name = "net",
|
|
|
|
.type = CLONE_NEWNET,
|
|
|
|
.get = netns_get,
|
|
|
|
.put = netns_put,
|
|
|
|
.install = netns_install,
|
2016-09-06 14:47:13 +07:00
|
|
|
.owner = netns_owner,
|
2010-03-08 09:14:23 +07:00
|
|
|
};
|
|
|
|
#endif
|