2019-05-19 13:08:55 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-05-16 19:58:40 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
2007-09-12 11:50:50 +02:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
|
#include <linux/cache.h>
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
#include <linux/list.h>
|
|
|
|
|
#include <linux/delay.h>
|
2007-09-26 22:04:26 -07:00
|
|
|
#include <linux/sched.h>
|
2008-04-15 00:35:23 -07:00
|
|
|
#include <linux/idr.h>
|
2009-07-10 09:51:33 +00:00
|
|
|
#include <linux/rculist.h>
|
2009-07-10 09:51:35 +00:00
|
|
|
#include <linux/nsproxy.h>
|
2013-04-12 01:50:06 +01:00
|
|
|
#include <linux/fs.h>
|
|
|
|
|
#include <linux/proc_ns.h>
|
2011-05-04 17:51:50 -07:00
|
|
|
#include <linux/file.h>
|
2011-07-15 11:47:34 -04:00
|
|
|
#include <linux/export.h>
|
2012-06-14 02:31:10 -07:00
|
|
|
#include <linux/user_namespace.h>
|
2015-01-15 15:11:15 +01:00
|
|
|
#include <linux/net_namespace.h>
|
2017-02-06 10:57:33 +01:00
|
|
|
#include <linux/sched/task.h>
|
2018-07-20 21:56:53 +00:00
|
|
|
#include <linux/uidgid.h>
|
2022-05-15 18:16:54 -04:00
|
|
|
#include <linux/proc_fs.h>
|
2025-09-12 13:52:44 +02:00
|
|
|
#include <linux/nstree.h>
|
2017-02-06 10:57:33 +01:00
|
|
|
|
2025-06-30 09:35:38 +00:00
|
|
|
#include <net/aligned_data.h>
|
2015-01-15 15:11:15 +01:00
|
|
|
#include <net/sock.h>
|
|
|
|
|
#include <net/netlink.h>
|
2007-09-12 11:50:50 +02:00
|
|
|
#include <net/net_namespace.h>
|
2008-04-15 00:36:08 -07:00
|
|
|
#include <net/netns/generic.h>
|
2007-09-12 11:50:50 +02:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Our network namespace constructor/destructor lists
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static LIST_HEAD(pernet_list);
|
|
|
|
|
static struct list_head *first_device = &pernet_list;
|
|
|
|
|
|
|
|
|
|
LIST_HEAD(net_namespace_list);
|
2008-10-08 11:35:06 +02:00
|
|
|
EXPORT_SYMBOL_GPL(net_namespace_list);
|
2007-09-12 11:50:50 +02:00
|
|
|
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 19:20:32 +03:00
|
|
|
/* Protects net_namespace_list. Nests iside rtnl_lock() */
|
|
|
|
|
DECLARE_RWSEM(net_rwsem);
|
|
|
|
|
EXPORT_SYMBOL_GPL(net_rwsem);
|
|
|
|
|
|
2019-06-26 21:02:33 +01:00
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
|
static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
|
|
|
|
|
#endif
|
|
|
|
|
|
2022-02-05 09:01:25 -08:00
|
|
|
struct net init_net;
|
2008-01-22 22:05:33 -08:00
|
|
|
EXPORT_SYMBOL(init_net);
|
2007-09-12 11:50:50 +02:00
|
|
|
|
2016-08-10 14:36:00 -07:00
|
|
|
static bool init_net_initialized;
|
net: Introduce net_sem for protection of pernet_list
Currently, the mutex is mostly used to protect pernet operations
list. It orders setup_net() and cleanup_net() with parallel
{un,}register_pernet_operations() calls, so ->exit{,batch} methods
of the same pernet operations are executed for a dying net, as
were used to call ->init methods, even after the net namespace
is unlinked from net_namespace_list in cleanup_net().
But there are several problems with scalability. The first one
is that more than one net can't be created or destroyed
at the same moment on the node. For big machines with many cpus
running many containers it's very sensitive.
The second one is that it's need to synchronize_rcu() after net
is removed from net_namespace_list():
Destroy net_ns:
cleanup_net()
mutex_lock(&net_mutex)
list_del_rcu(&net->list)
synchronize_rcu() <--- Sleep there for ages
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list)
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list)
mutex_unlock(&net_mutex)
This primitive is not fast, especially on the systems with many processors
and/or when preemptible RCU is enabled in config. So, all the time, while
cleanup_net() is waiting for RCU grace period, creation of new net namespaces
is not possible, the tasks, who makes it, are sleeping on the same mutex:
Create net_ns:
copy_net_ns()
mutex_lock_killable(&net_mutex) <--- Sleep there for ages
I observed 20-30 seconds hangs of "unshare -n" on ordinary 8-cpu laptop
with preemptible RCU enabled after CRIU tests round is finished.
The solution is to convert net_mutex to the rw_semaphore and add fine grain
locks to really small number of pernet_operations, what really need them.
Then, pernet_operations::init/::exit methods, modifying the net-related data,
will require down_read() locking only, while down_write() will be used
for changing pernet_list (i.e., when modules are being loaded and unloaded).
This gives signify performance increase, after all patch set is applied,
like you may see here:
%for i in {1..10000}; do unshare -n bash -c exit; done
*before*
real 1m40,377s
user 0m9,672s
sys 0m19,928s
*after*
real 0m17,007s
user 0m5,311s
sys 0m11,779
(5.8 times faster)
This patch starts replacing net_mutex to net_sem. It adds rw_semaphore,
describes the variables it protects, and makes to use, where appropriate.
net_mutex is still present, and next patches will kick it out step-by-step.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: Andrei Vagin <avagin@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-13 12:26:23 +03:00
|
|
|
/*
|
2018-03-27 18:02:23 +03:00
|
|
|
* pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
|
net: Introduce net_sem for protection of pernet_list
Currently, the mutex is mostly used to protect pernet operations
list. It orders setup_net() and cleanup_net() with parallel
{un,}register_pernet_operations() calls, so ->exit{,batch} methods
of the same pernet operations are executed for a dying net, as
were used to call ->init methods, even after the net namespace
is unlinked from net_namespace_list in cleanup_net().
But there are several problems with scalability. The first one
is that more than one net can't be created or destroyed
at the same moment on the node. For big machines with many cpus
running many containers it's very sensitive.
The second one is that it's need to synchronize_rcu() after net
is removed from net_namespace_list():
Destroy net_ns:
cleanup_net()
mutex_lock(&net_mutex)
list_del_rcu(&net->list)
synchronize_rcu() <--- Sleep there for ages
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list)
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list)
mutex_unlock(&net_mutex)
This primitive is not fast, especially on the systems with many processors
and/or when preemptible RCU is enabled in config. So, all the time, while
cleanup_net() is waiting for RCU grace period, creation of new net namespaces
is not possible, the tasks, who makes it, are sleeping on the same mutex:
Create net_ns:
copy_net_ns()
mutex_lock_killable(&net_mutex) <--- Sleep there for ages
I observed 20-30 seconds hangs of "unshare -n" on ordinary 8-cpu laptop
with preemptible RCU enabled after CRIU tests round is finished.
The solution is to convert net_mutex to the rw_semaphore and add fine grain
locks to really small number of pernet_operations, what really need them.
Then, pernet_operations::init/::exit methods, modifying the net-related data,
will require down_read() locking only, while down_write() will be used
for changing pernet_list (i.e., when modules are being loaded and unloaded).
This gives signify performance increase, after all patch set is applied,
like you may see here:
%for i in {1..10000}; do unshare -n bash -c exit; done
*before*
real 1m40,377s
user 0m9,672s
sys 0m19,928s
*after*
real 0m17,007s
user 0m5,311s
sys 0m11,779
(5.8 times faster)
This patch starts replacing net_mutex to net_sem. It adds rw_semaphore,
describes the variables it protects, and makes to use, where appropriate.
net_mutex is still present, and next patches will kick it out step-by-step.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: Andrei Vagin <avagin@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-13 12:26:23 +03:00
|
|
|
* init_net_initialized and first_device pointer.
|
2018-03-27 18:02:32 +03:00
|
|
|
* This is internal net namespace object. Please, don't use it
|
|
|
|
|
* outside.
|
net: Introduce net_sem for protection of pernet_list
Currently, the mutex is mostly used to protect pernet operations
list. It orders setup_net() and cleanup_net() with parallel
{un,}register_pernet_operations() calls, so ->exit{,batch} methods
of the same pernet operations are executed for a dying net, as
were used to call ->init methods, even after the net namespace
is unlinked from net_namespace_list in cleanup_net().
But there are several problems with scalability. The first one
is that more than one net can't be created or destroyed
at the same moment on the node. For big machines with many cpus
running many containers it's very sensitive.
The second one is that it's need to synchronize_rcu() after net
is removed from net_namespace_list():
Destroy net_ns:
cleanup_net()
mutex_lock(&net_mutex)
list_del_rcu(&net->list)
synchronize_rcu() <--- Sleep there for ages
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list)
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list)
mutex_unlock(&net_mutex)
This primitive is not fast, especially on the systems with many processors
and/or when preemptible RCU is enabled in config. So, all the time, while
cleanup_net() is waiting for RCU grace period, creation of new net namespaces
is not possible, the tasks, who makes it, are sleeping on the same mutex:
Create net_ns:
copy_net_ns()
mutex_lock_killable(&net_mutex) <--- Sleep there for ages
I observed 20-30 seconds hangs of "unshare -n" on ordinary 8-cpu laptop
with preemptible RCU enabled after CRIU tests round is finished.
The solution is to convert net_mutex to the rw_semaphore and add fine grain
locks to really small number of pernet_operations, what really need them.
Then, pernet_operations::init/::exit methods, modifying the net-related data,
will require down_read() locking only, while down_write() will be used
for changing pernet_list (i.e., when modules are being loaded and unloaded).
This gives signify performance increase, after all patch set is applied,
like you may see here:
%for i in {1..10000}; do unshare -n bash -c exit; done
*before*
real 1m40,377s
user 0m9,672s
sys 0m19,928s
*after*
real 0m17,007s
user 0m5,311s
sys 0m11,779
(5.8 times faster)
This patch starts replacing net_mutex to net_sem. It adds rw_semaphore,
describes the variables it protects, and makes to use, where appropriate.
net_mutex is still present, and next patches will kick it out step-by-step.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: Andrei Vagin <avagin@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-13 12:26:23 +03:00
|
|
|
*/
|
2018-03-27 18:02:23 +03:00
|
|
|
DECLARE_RWSEM(pernet_ops_rwsem);
|
2016-08-10 14:36:00 -07:00
|
|
|
|
2016-12-02 04:21:32 +03:00
|
|
|
#define MIN_PERNET_OPS_ID \
|
|
|
|
|
((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
|
|
|
|
|
|
2008-04-15 00:36:08 -07:00
|
|
|
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
|
|
|
|
|
|
2012-01-26 00:41:38 +00:00
|
|
|
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
|
|
|
|
|
|
|
|
|
|
static struct net_generic *net_alloc_generic(void)
|
|
|
|
|
{
|
2024-05-02 10:20:06 -03:00
|
|
|
unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
|
|
|
|
|
unsigned int generic_size;
|
2012-01-26 00:41:38 +00:00
|
|
|
struct net_generic *ng;
|
2024-05-02 10:20:06 -03:00
|
|
|
|
|
|
|
|
generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
|
2012-01-26 00:41:38 +00:00
|
|
|
|
|
|
|
|
ng = kzalloc(generic_size, GFP_KERNEL);
|
|
|
|
|
if (ng)
|
2024-05-02 10:20:06 -03:00
|
|
|
ng->s.len = gen_ptrs;
|
2012-01-26 00:41:38 +00:00
|
|
|
|
|
|
|
|
return ng;
|
|
|
|
|
}
|
|
|
|
|
|
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 04:58:21 +03:00
|
|
|
static int net_assign_generic(struct net *net, unsigned int id, void *data)
|
2010-04-23 01:40:47 +00:00
|
|
|
{
|
|
|
|
|
struct net_generic *ng, *old_ng;
|
|
|
|
|
|
2016-12-02 04:21:32 +03:00
|
|
|
BUG_ON(id < MIN_PERNET_OPS_ID);
|
2010-04-23 01:40:47 +00:00
|
|
|
|
2010-10-25 03:20:11 +00:00
|
|
|
old_ng = rcu_dereference_protected(net->gen,
|
2018-03-27 18:02:23 +03:00
|
|
|
lockdep_is_held(&pernet_ops_rwsem));
|
2016-12-02 04:21:32 +03:00
|
|
|
if (old_ng->s.len > id) {
|
|
|
|
|
old_ng->ptr[id] = data;
|
2016-12-02 04:11:34 +03:00
|
|
|
return 0;
|
|
|
|
|
}
|
2010-04-23 01:40:47 +00:00
|
|
|
|
2012-01-26 00:41:38 +00:00
|
|
|
ng = net_alloc_generic();
|
2021-08-17 23:23:00 +08:00
|
|
|
if (!ng)
|
2010-04-23 01:40:47 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Some synchronisation notes:
|
|
|
|
|
*
|
|
|
|
|
* The net_generic explores the net->gen array inside rcu
|
|
|
|
|
* read section. Besides once set the net->gen->ptr[x]
|
|
|
|
|
* pointer never changes (see rules in netns/generic.h).
|
|
|
|
|
*
|
|
|
|
|
* That said, we simply duplicate this array and schedule
|
|
|
|
|
* the old copy for kfree after a grace period.
|
|
|
|
|
*/
|
|
|
|
|
|
2016-12-02 04:21:32 +03:00
|
|
|
memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
|
|
|
|
|
(old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
|
|
|
|
|
ng->ptr[id] = data;
|
2010-04-23 01:40:47 +00:00
|
|
|
|
|
|
|
|
rcu_assign_pointer(net->gen, ng);
|
2016-12-02 04:12:58 +03:00
|
|
|
kfree_rcu(old_ng, s.rcu);
|
2010-04-23 01:40:47 +00:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-29 22:25:28 +00:00
|
|
|
static int ops_init(const struct pernet_operations *ops, struct net *net)
|
|
|
|
|
{
|
2022-10-20 10:42:13 +08:00
|
|
|
struct net_generic *ng;
|
2012-04-16 04:43:15 +00:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
void *data = NULL;
|
|
|
|
|
|
2024-07-31 13:07:17 -07:00
|
|
|
if (ops->id) {
|
2012-04-16 04:43:15 +00:00
|
|
|
data = kzalloc(ops->size, GFP_KERNEL);
|
2009-11-29 22:25:28 +00:00
|
|
|
if (!data)
|
2012-04-16 04:43:15 +00:00
|
|
|
goto out;
|
2009-11-29 22:25:28 +00:00
|
|
|
|
|
|
|
|
err = net_assign_generic(net, *ops->id, data);
|
2012-04-16 04:43:15 +00:00
|
|
|
if (err)
|
|
|
|
|
goto cleanup;
|
2009-11-29 22:25:28 +00:00
|
|
|
}
|
2012-04-16 04:43:15 +00:00
|
|
|
err = 0;
|
2009-11-29 22:25:28 +00:00
|
|
|
if (ops->init)
|
2012-04-16 04:43:15 +00:00
|
|
|
err = ops->init(net);
|
|
|
|
|
if (!err)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2024-07-31 13:07:17 -07:00
|
|
|
if (ops->id) {
|
2022-10-20 10:42:13 +08:00
|
|
|
ng = rcu_dereference_protected(net->gen,
|
|
|
|
|
lockdep_is_held(&pernet_ops_rwsem));
|
|
|
|
|
ng->ptr[*ops->id] = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-19 19:55:45 +01:00
|
|
|
cleanup:
|
2012-04-16 04:43:15 +00:00
|
|
|
kfree(data);
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
return err;
|
2009-11-29 22:25:28 +00:00
|
|
|
}
|
|
|
|
|
|
2019-06-18 11:08:59 -07:00
|
|
|
static void ops_pre_exit_list(const struct pernet_operations *ops,
|
|
|
|
|
struct list_head *net_exit_list)
|
|
|
|
|
{
|
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
|
|
if (ops->pre_exit) {
|
|
|
|
|
list_for_each_entry(net, net_exit_list, exit_list)
|
|
|
|
|
ops->pre_exit(net);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-11 13:52:32 -07:00
|
|
|
static void ops_exit_rtnl_list(const struct list_head *ops_list,
|
|
|
|
|
const struct pernet_operations *ops,
|
|
|
|
|
struct list_head *net_exit_list)
|
|
|
|
|
{
|
|
|
|
|
const struct pernet_operations *saved_ops = ops;
|
|
|
|
|
LIST_HEAD(dev_kill_list);
|
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(net, net_exit_list, exit_list) {
|
|
|
|
|
__rtnl_net_lock(net);
|
|
|
|
|
|
|
|
|
|
ops = saved_ops;
|
|
|
|
|
list_for_each_entry_continue_reverse(ops, ops_list, list) {
|
|
|
|
|
if (ops->exit_rtnl)
|
|
|
|
|
ops->exit_rtnl(net, &dev_kill_list);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__rtnl_net_unlock(net);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unregister_netdevice_many(&dev_kill_list);
|
|
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
|
}
|
|
|
|
|
|
2009-12-03 02:29:03 +00:00
|
|
|
static void ops_exit_list(const struct pernet_operations *ops,
|
|
|
|
|
struct list_head *net_exit_list)
|
|
|
|
|
{
|
|
|
|
|
if (ops->exit) {
|
2025-04-11 13:52:32 -07:00
|
|
|
struct net *net;
|
|
|
|
|
|
2022-01-18 03:43:40 -08:00
|
|
|
list_for_each_entry(net, net_exit_list, exit_list) {
|
2009-12-03 02:29:03 +00:00
|
|
|
ops->exit(net);
|
2022-01-18 03:43:40 -08:00
|
|
|
cond_resched();
|
|
|
|
|
}
|
2009-12-03 02:29:03 +00:00
|
|
|
}
|
2025-04-11 13:52:32 -07:00
|
|
|
|
2009-12-03 02:29:03 +00:00
|
|
|
if (ops->exit_batch)
|
|
|
|
|
ops->exit_batch(net_exit_list);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ops_free_list(const struct pernet_operations *ops,
|
|
|
|
|
struct list_head *net_exit_list)
|
|
|
|
|
{
|
|
|
|
|
struct net *net;
|
2024-07-31 13:07:17 -07:00
|
|
|
|
|
|
|
|
if (ops->id) {
|
2009-12-03 02:29:03 +00:00
|
|
|
list_for_each_entry(net, net_exit_list, exit_list)
|
2021-08-17 23:23:00 +08:00
|
|
|
kfree(net_generic(net, *ops->id));
|
2009-12-03 02:29:03 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-11 13:52:30 -07:00
|
|
|
static void ops_undo_list(const struct list_head *ops_list,
|
|
|
|
|
const struct pernet_operations *ops,
|
|
|
|
|
struct list_head *net_exit_list,
|
2025-04-17 17:32:32 -07:00
|
|
|
bool expedite_rcu)
|
2025-04-11 13:52:30 -07:00
|
|
|
{
|
|
|
|
|
const struct pernet_operations *saved_ops;
|
2025-04-17 17:32:32 -07:00
|
|
|
bool hold_rtnl = false;
|
2025-04-11 13:52:30 -07:00
|
|
|
|
|
|
|
|
if (!ops)
|
|
|
|
|
ops = list_entry(ops_list, typeof(*ops), list);
|
|
|
|
|
|
|
|
|
|
saved_ops = ops;
|
|
|
|
|
|
2025-04-17 17:32:32 -07:00
|
|
|
list_for_each_entry_continue_reverse(ops, ops_list, list) {
|
|
|
|
|
hold_rtnl |= !!ops->exit_rtnl;
|
2025-04-11 13:52:30 -07:00
|
|
|
ops_pre_exit_list(ops, net_exit_list);
|
2025-04-17 17:32:32 -07:00
|
|
|
}
|
2025-04-11 13:52:30 -07:00
|
|
|
|
|
|
|
|
/* Another CPU might be rcu-iterating the list, wait for it.
|
|
|
|
|
* This needs to be before calling the exit() notifiers, so the
|
|
|
|
|
* rcu_barrier() after ops_undo_list() isn't sufficient alone.
|
|
|
|
|
* Also the pre_exit() and exit() methods need this barrier.
|
|
|
|
|
*/
|
|
|
|
|
if (expedite_rcu)
|
|
|
|
|
synchronize_rcu_expedited();
|
|
|
|
|
else
|
|
|
|
|
synchronize_rcu();
|
|
|
|
|
|
2025-04-11 13:52:32 -07:00
|
|
|
if (hold_rtnl)
|
|
|
|
|
ops_exit_rtnl_list(ops_list, saved_ops, net_exit_list);
|
2025-04-11 13:52:30 -07:00
|
|
|
|
|
|
|
|
ops = saved_ops;
|
|
|
|
|
list_for_each_entry_continue_reverse(ops, ops_list, list)
|
|
|
|
|
ops_exit_list(ops, net_exit_list);
|
|
|
|
|
|
|
|
|
|
ops = saved_ops;
|
|
|
|
|
list_for_each_entry_continue_reverse(ops, ops_list, list)
|
|
|
|
|
ops_free_list(ops, net_exit_list);
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-11 13:52:31 -07:00
|
|
|
static void ops_undo_single(struct pernet_operations *ops,
|
|
|
|
|
struct list_head *net_exit_list)
|
|
|
|
|
{
|
|
|
|
|
LIST_HEAD(ops_list);
|
|
|
|
|
|
|
|
|
|
list_add(&ops->list, &ops_list);
|
2025-04-17 17:32:32 -07:00
|
|
|
ops_undo_list(&ops_list, NULL, net_exit_list, false);
|
2025-04-11 13:52:31 -07:00
|
|
|
list_del(&ops->list);
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-07 11:02:51 +02:00
|
|
|
/* should be called with nsid_lock held */
|
2015-01-15 15:11:15 +01:00
|
|
|
static int alloc_netid(struct net *net, struct net *peer, int reqid)
|
|
|
|
|
{
|
2015-05-07 11:02:50 +02:00
|
|
|
int min = 0, max = 0;
|
2015-01-15 15:11:15 +01:00
|
|
|
|
|
|
|
|
if (reqid >= 0) {
|
|
|
|
|
min = reqid;
|
|
|
|
|
max = reqid + 1;
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-07 11:02:51 +02:00
|
|
|
return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
|
2015-01-15 15:11:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function is used by idr_for_each(). If net is equal to peer, the
|
|
|
|
|
* function returns the id so that idr_for_each() stops. Because we cannot
|
|
|
|
|
* returns the id 0 (idr_for_each() will not stop), we return the magic value
|
|
|
|
|
* NET_ID_ZERO (-1) for it.
|
|
|
|
|
*/
|
|
|
|
|
#define NET_ID_ZERO -1
|
|
|
|
|
static int net_eq_idr(int id, void *net, void *peer)
|
|
|
|
|
{
|
|
|
|
|
if (net_eq(net, peer))
|
|
|
|
|
return id ? : NET_ID_ZERO;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-13 22:39:22 +01:00
|
|
|
/* Must be called from RCU-critical section or with nsid_lock held */
|
2020-01-13 22:39:20 +01:00
|
|
|
static int __peernet2id(const struct net *net, struct net *peer)
|
2015-01-15 15:11:15 +01:00
|
|
|
{
|
|
|
|
|
int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
|
2015-05-07 11:02:50 +02:00
|
|
|
|
2015-01-15 15:11:15 +01:00
|
|
|
/* Magic value for id 0. */
|
|
|
|
|
if (id == NET_ID_ZERO)
|
|
|
|
|
return 0;
|
|
|
|
|
if (id > 0)
|
|
|
|
|
return id;
|
|
|
|
|
|
2015-05-07 11:02:47 +02:00
|
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
2015-01-15 15:11:15 +01:00
|
|
|
}
|
|
|
|
|
|
2019-10-09 11:19:10 +02:00
|
|
|
static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 18:39:04 +02:00
|
|
|
struct nlmsghdr *nlh, gfp_t gfp);
|
2015-01-15 15:11:15 +01:00
|
|
|
/* This function returns the id of a peer netns. If no id is assigned, one will
|
|
|
|
|
* be allocated and returned.
|
|
|
|
|
*/
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 18:39:04 +02:00
|
|
|
int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
|
2015-01-15 15:11:15 +01:00
|
|
|
{
|
2015-05-07 11:02:50 +02:00
|
|
|
int id;
|
2015-01-15 15:11:15 +01:00
|
|
|
|
2025-09-18 12:11:54 +02:00
|
|
|
if (!check_net(net))
|
2016-11-16 10:27:02 -08:00
|
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
2020-01-13 22:39:20 +01:00
|
|
|
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_lock(&net->nsid_lock);
|
2020-01-13 22:39:20 +01:00
|
|
|
id = __peernet2id(net, peer);
|
|
|
|
|
if (id >= 0) {
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2020-01-13 22:39:20 +01:00
|
|
|
return id;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* When peer is obtained from RCU lists, we may race with
|
2018-01-16 12:31:41 +03:00
|
|
|
* its cleanup. Check whether it's alive, and this guarantees
|
|
|
|
|
* we never hash a peer back to net->netns_ids, after it has
|
|
|
|
|
* just been idr_remove()'d from there in cleanup_net().
|
|
|
|
|
*/
|
2020-01-13 22:39:20 +01:00
|
|
|
if (!maybe_get_net(peer)) {
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2020-01-13 22:39:20 +01:00
|
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
id = alloc_netid(net, peer, -1);
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2020-01-13 22:39:20 +01:00
|
|
|
|
|
|
|
|
put_net(peer);
|
|
|
|
|
if (id < 0)
|
|
|
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
|
|
|
|
|
|
|
|
|
rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
|
|
|
|
|
|
2015-05-07 11:02:50 +02:00
|
|
|
return id;
|
2015-01-15 15:11:15 +01:00
|
|
|
}
|
2017-11-02 17:04:36 -02:00
|
|
|
EXPORT_SYMBOL_GPL(peernet2id_alloc);
|
2015-01-15 15:11:15 +01:00
|
|
|
|
2015-05-07 11:02:51 +02:00
|
|
|
/* This function returns, if assigned, the id of a peer netns. */
|
2020-01-16 21:16:46 +01:00
|
|
|
int peernet2id(const struct net *net, struct net *peer)
|
2015-05-07 11:02:51 +02:00
|
|
|
{
|
|
|
|
|
int id;
|
|
|
|
|
|
2020-01-13 22:39:22 +01:00
|
|
|
rcu_read_lock();
|
2015-05-07 11:02:51 +02:00
|
|
|
id = __peernet2id(net, peer);
|
2020-01-13 22:39:22 +01:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
2015-05-07 11:02:51 +02:00
|
|
|
return id;
|
|
|
|
|
}
|
2016-09-01 21:53:44 -07:00
|
|
|
EXPORT_SYMBOL(peernet2id);
|
2015-05-07 11:02:51 +02:00
|
|
|
|
2015-05-07 11:02:53 +02:00
|
|
|
/* This function returns true is the peer netns has an id assigned into the
|
|
|
|
|
* current netns.
|
|
|
|
|
*/
|
2020-01-16 21:16:46 +01:00
|
|
|
bool peernet_has_id(const struct net *net, struct net *peer)
|
2015-05-07 11:02:53 +02:00
|
|
|
{
|
|
|
|
|
return peernet2id(net, peer) >= 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-16 21:16:46 +01:00
|
|
|
struct net *get_net_ns_by_id(const struct net *net, int id)
|
2015-01-15 15:11:15 +01:00
|
|
|
{
|
|
|
|
|
struct net *peer;
|
|
|
|
|
|
|
|
|
|
if (id < 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
peer = idr_find(&net->netns_ids, id);
|
|
|
|
|
if (peer)
|
net: Fix double free and memory corruption in get_net_ns_by_id()
(I can trivially verify that that idr_remove in cleanup_net happens
after the network namespace count has dropped to zero --EWB)
Function get_net_ns_by_id() does not check for net::count
after it has found a peer in netns_ids idr.
It may dereference a peer, after its count has already been
finaly decremented. This leads to double free and memory
corruption:
put_net(peer) rtnl_lock()
atomic_dec_and_test(&peer->count) [count=0] ...
__put_net(peer) get_net_ns_by_id(net, id)
spin_lock(&cleanup_list_lock)
list_add(&net->cleanup_list, &cleanup_list)
spin_unlock(&cleanup_list_lock)
queue_work() peer = idr_find(&net->netns_ids, id)
| get_net(peer) [count=1]
| ...
| (use after final put)
v ...
cleanup_net() ...
spin_lock(&cleanup_list_lock) ...
list_replace_init(&cleanup_list, ..) ...
spin_unlock(&cleanup_list_lock) ...
... ...
... put_net(peer)
... atomic_dec_and_test(&peer->count) [count=0]
... spin_lock(&cleanup_list_lock)
... list_add(&net->cleanup_list, &cleanup_list)
... spin_unlock(&cleanup_list_lock)
... queue_work()
... rtnl_unlock()
rtnl_lock() ...
for_each_net(tmp) { ...
id = __peernet2id(tmp, peer) ...
spin_lock_irq(&tmp->nsid_lock) ...
idr_remove(&tmp->netns_ids, id) ...
... ...
net_drop_ns() ...
net_free(peer) ...
} ...
|
v
cleanup_net()
...
(Second free of peer)
Also, put_net() on the right cpu may reorder with left's cpu
list_replace_init(&cleanup_list, ..), and then cleanup_list
will be corrupted.
Since cleanup_net() is executed in worker thread, while
put_net(peer) can happen everywhere, there should be
enough time for concurrent get_net_ns_by_id() to pick
the peer up, and the race does not seem to be unlikely.
The patch fixes the problem in standard way.
(Also, there is possible problem in peernet2id_alloc(), which requires
check for net::count under nsid_lock and maybe_get_net(peer), but
in current stable kernel it's used under rtnl_lock() and it has to be
safe. Openswitch begun to use peernet2id_alloc(), and possibly it should
be fixed too. While this is not in stable kernel yet, so I'll send
a separate message to netdev@ later).
Cc: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Fixes: 0c7aecd4bde4 "netns: add rtnl cmd to add and get peer netns ids"
Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-12-19 11:27:56 -06:00
|
|
|
peer = maybe_get_net(peer);
|
2015-01-15 15:11:15 +01:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
|
|
return peer;
|
|
|
|
|
}
|
2022-01-14 22:09:49 +05:30
|
|
|
EXPORT_SYMBOL_GPL(get_net_ns_by_id);
|
2015-01-15 15:11:15 +01:00
|
|
|
|
2024-07-31 13:07:21 -07:00
|
|
|
static __net_init void preinit_net_sysctl(struct net *net)
|
|
|
|
|
{
|
|
|
|
|
net->core.sysctl_somaxconn = SOMAXCONN;
|
|
|
|
|
/* Limits per socket sk_omem_alloc usage.
|
|
|
|
|
* TCP zerocopy regular usage needs 128 KB.
|
|
|
|
|
*/
|
|
|
|
|
net->core.sysctl_optmem_max = 128 * 1024;
|
|
|
|
|
net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
|
2024-10-06 07:26:09 +09:00
|
|
|
net->core.sysctl_tstamp_allow_data = 1;
|
2024-07-31 13:07:21 -07:00
|
|
|
}
|
|
|
|
|
|
2023-02-08 18:21:23 +00:00
|
|
|
/* init code that must occur even if setup_net() is not called. */
|
2025-09-12 13:52:34 +02:00
|
|
|
static __net_init int preinit_net(struct net *net, struct user_namespace *user_ns)
|
2023-02-08 18:21:23 +00:00
|
|
|
{
|
2025-09-12 13:52:34 +02:00
|
|
|
int ret;
|
|
|
|
|
|
2025-09-22 14:42:36 +02:00
|
|
|
ret = ns_common_init(net);
|
2025-09-12 13:52:34 +02:00
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
2024-07-31 13:07:18 -07:00
|
|
|
refcount_set(&net->passive, 1);
|
2025-06-18 10:24:22 -04:00
|
|
|
ref_tracker_dir_init(&net->refcnt_tracker, 128, "net_refcnt");
|
|
|
|
|
ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net_notrefcnt");
|
2024-07-31 13:07:20 -07:00
|
|
|
|
|
|
|
|
get_random_bytes(&net->hash_mix, sizeof(u32));
|
|
|
|
|
net->dev_base_seq = 1;
|
|
|
|
|
net->user_ns = user_ns;
|
|
|
|
|
|
|
|
|
|
idr_init(&net->netns_ids);
|
|
|
|
|
spin_lock_init(&net->nsid_lock);
|
|
|
|
|
mutex_init(&net->ipv4.ra_mutex);
|
2024-10-04 15:10:29 -07:00
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
|
|
|
|
|
mutex_init(&net->rtnl_mutex);
|
|
|
|
|
lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL);
|
|
|
|
|
#endif
|
|
|
|
|
|
2025-03-20 19:22:38 +01:00
|
|
|
INIT_LIST_HEAD(&net->ptype_all);
|
|
|
|
|
INIT_LIST_HEAD(&net->ptype_specific);
|
2024-07-31 13:07:21 -07:00
|
|
|
preinit_net_sysctl(net);
|
2025-09-12 13:52:34 +02:00
|
|
|
return 0;
|
2023-02-08 18:21:23 +00:00
|
|
|
}
|
|
|
|
|
|
2007-09-12 11:50:50 +02:00
|
|
|
/*
|
|
|
|
|
* setup_net runs the initializers for the network namespace object.
|
|
|
|
|
*/
|
2024-07-31 13:07:20 -07:00
|
|
|
static __net_init int setup_net(struct net *net)
|
2007-09-12 11:50:50 +02:00
|
|
|
{
|
2018-03-27 18:02:23 +03:00
|
|
|
/* Must be called with pernet_ops_rwsem held */
|
2025-04-11 13:52:30 -07:00
|
|
|
const struct pernet_operations *ops;
|
2009-12-03 02:29:03 +00:00
|
|
|
LIST_HEAD(net_exit_list);
|
2024-02-06 14:42:57 +00:00
|
|
|
int error = 0;
|
2007-09-12 11:50:50 +02:00
|
|
|
|
2025-09-12 13:52:44 +02:00
|
|
|
net->net_cookie = ns_tree_gen_id(&net->ns);
|
2009-02-22 00:07:53 -08:00
|
|
|
|
2007-09-18 13:20:41 -07:00
|
|
|
list_for_each_entry(ops, &pernet_list, list) {
|
2009-11-29 22:25:28 +00:00
|
|
|
error = ops_init(ops, net);
|
|
|
|
|
if (error < 0)
|
|
|
|
|
goto out_undo;
|
2007-09-12 11:50:50 +02:00
|
|
|
}
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 19:20:32 +03:00
|
|
|
down_write(&net_rwsem);
|
2018-02-13 12:26:02 +03:00
|
|
|
list_add_tail_rcu(&net->list, &net_namespace_list);
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 19:20:32 +03:00
|
|
|
up_write(&net_rwsem);
|
2025-09-12 13:52:44 +02:00
|
|
|
ns_tree_add_raw(net);
|
2007-09-12 11:50:50 +02:00
|
|
|
out:
|
|
|
|
|
return error;
|
2007-09-18 13:20:41 -07:00
|
|
|
|
2007-09-12 11:50:50 +02:00
|
|
|
out_undo:
|
|
|
|
|
/* Walk through the list backwards calling the exit functions
|
|
|
|
|
* for the pernet modules whose init functions did not fail.
|
|
|
|
|
*/
|
2009-12-03 02:29:03 +00:00
|
|
|
list_add(&net->exit_list, &net_exit_list);
|
2025-04-17 17:32:32 -07:00
|
|
|
ops_undo_list(&pernet_list, ops, &net_exit_list, false);
|
2007-10-30 15:38:57 -07:00
|
|
|
rcu_barrier();
|
2007-09-12 11:50:50 +02:00
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2009-02-23 15:37:35 -08:00
|
|
|
#ifdef CONFIG_NET_NS
|
2016-09-23 18:06:12 +02:00
|
|
|
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
|
|
|
|
|
{
|
|
|
|
|
return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void dec_net_namespaces(struct ucounts *ucounts)
|
|
|
|
|
{
|
|
|
|
|
dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-24 21:20:33 +03:00
|
|
|
static struct kmem_cache *net_cachep __ro_after_init;
|
2009-02-23 15:37:35 -08:00
|
|
|
static struct workqueue_struct *netns_wq;
|
|
|
|
|
|
2009-02-22 00:07:53 -08:00
|
|
|
static struct net *net_alloc(void)
|
2007-11-07 01:30:30 -08:00
|
|
|
{
|
2009-02-22 00:07:53 -08:00
|
|
|
struct net *net = NULL;
|
|
|
|
|
struct net_generic *ng;
|
|
|
|
|
|
|
|
|
|
ng = net_alloc_generic();
|
|
|
|
|
if (!ng)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
|
2007-11-07 01:30:30 -08:00
|
|
|
if (!net)
|
2009-02-22 00:07:53 -08:00
|
|
|
goto out_free;
|
2007-11-07 01:30:30 -08:00
|
|
|
|
2019-06-26 21:02:33 +01:00
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
|
net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
|
|
|
|
|
if (!net->key_domain)
|
|
|
|
|
goto out_free_2;
|
|
|
|
|
refcount_set(&net->key_domain->usage, 1);
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-02-22 00:07:53 -08:00
|
|
|
rcu_assign_pointer(net->gen, ng);
|
|
|
|
|
out:
|
|
|
|
|
return net;
|
|
|
|
|
|
2019-06-26 21:02:33 +01:00
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
|
out_free_2:
|
|
|
|
|
kmem_cache_free(net_cachep, net);
|
|
|
|
|
net = NULL;
|
|
|
|
|
#endif
|
2009-02-22 00:07:53 -08:00
|
|
|
out_free:
|
|
|
|
|
kfree(ng);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-04 12:54:55 +00:00
|
|
|
static LLIST_HEAD(defer_free_list);
|
|
|
|
|
|
|
|
|
|
static void net_complete_free(void)
|
|
|
|
|
{
|
|
|
|
|
struct llist_node *kill_list;
|
|
|
|
|
struct net *net, *next;
|
|
|
|
|
|
|
|
|
|
/* Get the list of namespaces to free from last round. */
|
|
|
|
|
kill_list = llist_del_all(&defer_free_list);
|
|
|
|
|
|
|
|
|
|
llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
|
|
|
|
|
kmem_cache_free(net_cachep, net);
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
2025-02-17 11:11:27 -08:00
|
|
|
void net_passive_dec(struct net *net)
|
2009-02-22 00:07:53 -08:00
|
|
|
{
|
2021-08-17 23:23:00 +08:00
|
|
|
if (refcount_dec_and_test(&net->passive)) {
|
|
|
|
|
kfree(rcu_access_pointer(net->gen));
|
2022-10-20 23:20:18 +00:00
|
|
|
|
|
|
|
|
/* There should not be any trackers left there. */
|
|
|
|
|
ref_tracker_dir_exit(&net->notrefcnt_tracker);
|
|
|
|
|
|
2024-12-04 12:54:55 +00:00
|
|
|
/* Wait for an extra rcu_barrier() before final free. */
|
|
|
|
|
llist_add(&net->defer_free_list, &defer_free_list);
|
2021-08-17 23:23:00 +08:00
|
|
|
}
|
2007-11-07 01:30:30 -08:00
|
|
|
}
|
|
|
|
|
|
2011-06-08 21:13:01 -04:00
|
|
|
void net_drop_ns(void *p)
|
|
|
|
|
{
|
2021-08-17 23:23:00 +08:00
|
|
|
struct net *net = (struct net *)p;
|
|
|
|
|
|
|
|
|
|
if (net)
|
2025-02-17 11:11:27 -08:00
|
|
|
net_passive_dec(net);
|
2011-06-08 21:13:01 -04:00
|
|
|
}
|
|
|
|
|
|
2025-09-01 15:09:51 +02:00
|
|
|
struct net *copy_net_ns(u64 flags,
|
2012-06-14 02:31:10 -07:00
|
|
|
struct user_namespace *user_ns, struct net *old_net)
|
2007-09-26 22:04:26 -07:00
|
|
|
{
|
2016-08-08 14:33:23 -05:00
|
|
|
struct ucounts *ucounts;
|
2009-05-04 11:12:14 -07:00
|
|
|
struct net *net;
|
|
|
|
|
int rv;
|
2007-09-26 22:04:26 -07:00
|
|
|
|
2011-04-15 02:26:25 +00:00
|
|
|
if (!(flags & CLONE_NEWNET))
|
|
|
|
|
return get_net(old_net);
|
|
|
|
|
|
2016-08-08 14:33:23 -05:00
|
|
|
ucounts = inc_net_namespaces(user_ns);
|
|
|
|
|
if (!ucounts)
|
2016-09-22 13:08:36 -05:00
|
|
|
return ERR_PTR(-ENOSPC);
|
2016-08-08 14:33:23 -05:00
|
|
|
|
2009-05-04 11:12:14 -07:00
|
|
|
net = net_alloc();
|
2016-08-08 14:33:23 -05:00
|
|
|
if (!net) {
|
2018-02-13 12:26:13 +03:00
|
|
|
rv = -ENOMEM;
|
|
|
|
|
goto dec_ucounts;
|
2016-08-08 14:33:23 -05:00
|
|
|
}
|
2023-02-08 18:21:23 +00:00
|
|
|
|
2025-09-12 13:52:34 +02:00
|
|
|
rv = preinit_net(net, user_ns);
|
|
|
|
|
if (rv < 0)
|
|
|
|
|
goto dec_ucounts;
|
2018-02-13 12:26:13 +03:00
|
|
|
net->ucounts = ucounts;
|
2012-06-14 02:31:10 -07:00
|
|
|
get_user_ns(user_ns);
|
2018-03-27 18:02:01 +03:00
|
|
|
|
2018-03-27 18:02:23 +03:00
|
|
|
rv = down_read_killable(&pernet_ops_rwsem);
|
2018-02-13 12:26:13 +03:00
|
|
|
if (rv < 0)
|
|
|
|
|
goto put_userns;
|
2018-02-19 12:58:38 +03:00
|
|
|
|
2024-07-31 13:07:20 -07:00
|
|
|
rv = setup_net(net);
|
2018-02-19 12:58:38 +03:00
|
|
|
|
2018-03-27 18:02:23 +03:00
|
|
|
up_read(&pernet_ops_rwsem);
|
2018-02-19 12:58:38 +03:00
|
|
|
|
2009-05-04 11:12:14 -07:00
|
|
|
if (rv < 0) {
|
2018-02-13 12:26:13 +03:00
|
|
|
put_userns:
|
2025-09-17 12:28:08 +02:00
|
|
|
ns_common_free(net);
|
2021-09-18 17:04:10 +08:00
|
|
|
#ifdef CONFIG_KEYS
|
2019-10-19 15:34:43 +09:00
|
|
|
key_remove_domain(net->key_domain);
|
2021-09-18 17:04:10 +08:00
|
|
|
#endif
|
2012-06-14 02:31:10 -07:00
|
|
|
put_user_ns(user_ns);
|
2025-02-17 11:11:27 -08:00
|
|
|
net_passive_dec(net);
|
2018-02-13 12:26:13 +03:00
|
|
|
dec_ucounts:
|
|
|
|
|
dec_net_namespaces(ucounts);
|
2009-05-04 11:12:14 -07:00
|
|
|
return ERR_PTR(rv);
|
|
|
|
|
}
|
|
|
|
|
return net;
|
|
|
|
|
}
|
2009-02-22 00:07:53 -08:00
|
|
|
|
2018-07-20 21:56:53 +00:00
|
|
|
/**
|
|
|
|
|
* net_ns_get_ownership - get sysfs ownership data for @net
|
|
|
|
|
* @net: network namespace in question (can be NULL)
|
|
|
|
|
* @uid: kernel user ID for sysfs objects
|
|
|
|
|
* @gid: kernel group ID for sysfs objects
|
|
|
|
|
*
|
|
|
|
|
* Returns the uid/gid pair of root in the user namespace associated with the
|
|
|
|
|
* given network namespace.
|
|
|
|
|
*/
|
|
|
|
|
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
|
|
|
|
|
{
|
|
|
|
|
if (net) {
|
|
|
|
|
kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
|
|
|
|
|
kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
|
|
|
|
|
|
|
|
|
|
if (uid_valid(ns_root_uid))
|
|
|
|
|
*uid = ns_root_uid;
|
|
|
|
|
|
|
|
|
|
if (gid_valid(ns_root_gid))
|
|
|
|
|
*gid = ns_root_gid;
|
|
|
|
|
} else {
|
|
|
|
|
*uid = GLOBAL_ROOT_UID;
|
|
|
|
|
*gid = GLOBAL_ROOT_GID;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(net_ns_get_ownership);
|
|
|
|
|
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
static void unhash_nsid(struct net *net, struct net *last)
|
|
|
|
|
{
|
|
|
|
|
struct net *tmp;
|
|
|
|
|
/* This function is only called from cleanup_net() work,
|
|
|
|
|
* and this work is the only process, that may delete
|
|
|
|
|
* a net from net_namespace_list. So, when the below
|
|
|
|
|
* is executing, the list may only grow. Thus, we do not
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 19:20:32 +03:00
|
|
|
* use for_each_net_rcu() or net_rwsem.
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
*/
|
|
|
|
|
for_each_net(tmp) {
|
|
|
|
|
int id;
|
|
|
|
|
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_lock(&tmp->nsid_lock);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
id = __peernet2id(tmp, net);
|
|
|
|
|
if (id >= 0)
|
|
|
|
|
idr_remove(&tmp->netns_ids, id);
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_unlock(&tmp->nsid_lock);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
if (id >= 0)
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 18:39:04 +02:00
|
|
|
rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
|
|
|
|
|
GFP_KERNEL);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
if (tmp == last)
|
|
|
|
|
break;
|
|
|
|
|
}
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_lock(&net->nsid_lock);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
idr_destroy(&net->netns_ids);
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_unlock(&net->nsid_lock);
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
}
|
|
|
|
|
|
2018-02-19 12:58:45 +03:00
|
|
|
static LLIST_HEAD(cleanup_list);
|
2009-11-29 22:25:27 +00:00
|
|
|
|
2025-01-14 20:55:27 +00:00
|
|
|
struct task_struct *cleanup_net_task;
|
|
|
|
|
|
2007-11-01 00:44:50 -07:00
|
|
|
static void cleanup_net(struct work_struct *work)
|
|
|
|
|
{
|
2018-02-19 12:58:45 +03:00
|
|
|
struct llist_node *net_kill_list;
|
2025-04-11 13:52:30 -07:00
|
|
|
struct net *net, *tmp, *last;
|
2009-12-03 02:29:03 +00:00
|
|
|
LIST_HEAD(net_exit_list);
|
2007-11-01 00:44:50 -07:00
|
|
|
|
2025-06-04 09:39:28 +00:00
|
|
|
WRITE_ONCE(cleanup_net_task, current);
|
2025-01-14 20:55:27 +00:00
|
|
|
|
2009-11-29 22:25:27 +00:00
|
|
|
/* Atomically snapshot the list of namespaces to cleanup */
|
2018-02-19 12:58:45 +03:00
|
|
|
net_kill_list = llist_del_all(&cleanup_list);
|
2007-11-01 00:44:50 -07:00
|
|
|
|
2018-03-27 18:02:23 +03:00
|
|
|
down_read(&pernet_ops_rwsem);
|
2007-11-01 00:44:50 -07:00
|
|
|
|
|
|
|
|
/* Don't let anyone else find us. */
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 19:20:32 +03:00
|
|
|
down_write(&net_rwsem);
|
2025-09-12 13:52:44 +02:00
|
|
|
llist_for_each_entry(net, net_kill_list, cleanup_list) {
|
|
|
|
|
ns_tree_remove(net);
|
2009-11-29 22:25:27 +00:00
|
|
|
list_del_rcu(&net->list);
|
2025-09-12 13:52:44 +02:00
|
|
|
}
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
/* Cache last net. After we unlock rtnl, no one new net
|
|
|
|
|
* added to net_namespace_list can assign nsid pointer
|
|
|
|
|
* to a net from net_kill_list (see peernet2id_alloc()).
|
|
|
|
|
* So, we skip them in unhash_nsid().
|
|
|
|
|
*
|
|
|
|
|
* Note, that unhash_nsid() does not delete nsid links
|
|
|
|
|
* between net_kill_list's nets, as they've already
|
|
|
|
|
* deleted from net_namespace_list. But, this would be
|
|
|
|
|
* useless anyway, as netns_ids are destroyed there.
|
|
|
|
|
*/
|
|
|
|
|
last = list_last_entry(&net_namespace_list, struct net, list);
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 19:20:32 +03:00
|
|
|
up_write(&net_rwsem);
|
2015-04-03 12:02:36 +02:00
|
|
|
|
2018-02-19 12:58:45 +03:00
|
|
|
llist_for_each_entry(net, net_kill_list, cleanup_list) {
|
net: Move net:netns_ids destruction out of rtnl_lock() and document locking scheme
Currently, we unhash a dying net from netns_ids lists
under rtnl_lock(). It's a leftover from the time when
net::netns_ids was introduced. There was no net::nsid_lock,
and rtnl_lock() was mostly need to order modification
of alive nets nsid idr, i.e. for:
for_each_net(tmp) {
...
id = __peernet2id(tmp, net);
idr_remove(&tmp->netns_ids, id);
...
}
Since we have net::nsid_lock, the modifications are
protected by this local lock, and now we may introduce
better scheme of netns_ids destruction.
Let's look at the functions peernet2id_alloc() and
get_net_ns_by_id(). Previous commits taught these
functions to work well with dying net acquired from
rtnl unlocked lists. And they are the only functions
which can hash a net to netns_ids or obtain from there.
And as easy to check, other netns_ids operating functions
works with id, not with net pointers. So, we do not
need rtnl_lock to synchronize cleanup_net() with all them.
The another property, which is used in the patch,
is that net is unhashed from net_namespace_list
in the only place and by the only process. So,
we avoid excess rcu_read_lock() or rtnl_lock(),
when we'are iterating over the list in unhash_nsid().
All the above makes possible to keep rtnl_lock() locked
only for net->list deletion, and completely avoid it
for netns_ids unhashing and destruction. As these two
doings may take long time (e.g., memory allocation
to send skb), the patch should positively act on
the scalability and signify decrease the time, which
rtnl_lock() is held in cleanup_net().
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-01-19 19:14:53 +03:00
|
|
|
unhash_nsid(net, last);
|
|
|
|
|
list_add_tail(&net->exit_list, &net_exit_list);
|
2009-12-03 02:29:03 +00:00
|
|
|
}
|
2007-11-01 00:44:50 -07:00
|
|
|
|
2025-04-17 17:32:32 -07:00
|
|
|
ops_undo_list(&pernet_list, NULL, &net_exit_list, true);
|
2007-11-01 00:44:50 -07:00
|
|
|
|
2018-03-27 18:02:23 +03:00
|
|
|
up_read(&pernet_ops_rwsem);
|
2007-11-01 00:44:50 -07:00
|
|
|
|
|
|
|
|
/* Ensure there are no outstanding rcu callbacks using this
|
|
|
|
|
* network namespace.
|
|
|
|
|
*/
|
|
|
|
|
rcu_barrier();
|
|
|
|
|
|
2024-12-04 12:54:55 +00:00
|
|
|
net_complete_free();
|
|
|
|
|
|
2007-11-01 00:44:50 -07:00
|
|
|
/* Finally it is safe to free my network namespace structure */
|
2009-12-03 02:29:03 +00:00
|
|
|
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
|
|
|
|
|
list_del_init(&net->exit_list);
|
2025-09-17 12:28:08 +02:00
|
|
|
ns_common_free(net);
|
2016-08-08 14:33:23 -05:00
|
|
|
dec_net_namespaces(net->ucounts);
|
2021-09-18 17:04:10 +08:00
|
|
|
#ifdef CONFIG_KEYS
|
2019-06-26 21:02:33 +01:00
|
|
|
key_remove_domain(net->key_domain);
|
2021-09-18 17:04:10 +08:00
|
|
|
#endif
|
2012-06-14 02:31:10 -07:00
|
|
|
put_user_ns(net->user_ns);
|
2025-02-17 11:11:27 -08:00
|
|
|
net_passive_dec(net);
|
2009-11-29 22:25:27 +00:00
|
|
|
}
|
2025-06-04 09:39:28 +00:00
|
|
|
WRITE_ONCE(cleanup_net_task, NULL);
|
2007-11-01 00:44:50 -07:00
|
|
|
}
|
2017-05-30 11:38:12 +02:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* net_ns_barrier - wait until concurrent net_cleanup_work is done
|
|
|
|
|
*
|
|
|
|
|
* cleanup_net runs from work queue and will first remove namespaces
|
|
|
|
|
* from the global list, then run net exit functions.
|
|
|
|
|
*
|
|
|
|
|
* Call this in module exit path to make sure that all netns
|
|
|
|
|
* ->exit ops have been invoked before the function is removed.
|
|
|
|
|
*/
|
|
|
|
|
void net_ns_barrier(void)
|
|
|
|
|
{
|
2018-03-27 18:02:23 +03:00
|
|
|
down_write(&pernet_ops_rwsem);
|
|
|
|
|
up_write(&pernet_ops_rwsem);
|
2017-05-30 11:38:12 +02:00
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(net_ns_barrier);
|
|
|
|
|
|
2009-11-29 22:25:27 +00:00
|
|
|
static DECLARE_WORK(net_cleanup_work, cleanup_net);
|
2007-11-01 00:44:50 -07:00
|
|
|
|
|
|
|
|
void __put_net(struct net *net)
|
|
|
|
|
{
|
2021-12-09 23:44:21 -08:00
|
|
|
ref_tracker_dir_exit(&net->refcnt_tracker);
|
2007-11-01 00:44:50 -07:00
|
|
|
/* Cleanup the network namespace in process context */
|
2018-02-19 12:58:54 +03:00
|
|
|
if (llist_add(&net->cleanup_list, &cleanup_list))
|
|
|
|
|
queue_work(netns_wq, &net_cleanup_work);
|
2007-11-01 00:44:50 -07:00
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(__put_net);
|
|
|
|
|
|
2021-06-11 22:29:59 +08:00
|
|
|
/**
|
|
|
|
|
* get_net_ns - increment the refcount of the network namespace
|
|
|
|
|
* @ns: common namespace (net)
|
|
|
|
|
*
|
2024-06-14 21:13:02 +08:00
|
|
|
* Returns the net's common namespace or ERR_PTR() if ref is zero.
|
2021-06-11 22:29:59 +08:00
|
|
|
*/
|
|
|
|
|
struct ns_common *get_net_ns(struct ns_common *ns)
|
|
|
|
|
{
|
2024-06-14 21:13:02 +08:00
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
|
|
net = maybe_get_net(container_of(ns, struct net, ns));
|
|
|
|
|
if (net)
|
|
|
|
|
return &net->ns;
|
|
|
|
|
return ERR_PTR(-EINVAL);
|
2021-06-11 22:29:59 +08:00
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(get_net_ns);
|
|
|
|
|
|
2011-05-12 13:51:13 +10:00
|
|
|
struct net *get_net_ns_by_fd(int fd)
|
|
|
|
|
{
|
2024-07-19 20:17:58 -04:00
|
|
|
CLASS(fd, f)(fd);
|
2011-05-12 13:51:13 +10:00
|
|
|
|
2024-07-19 20:17:58 -04:00
|
|
|
if (fd_empty(f))
|
2022-05-15 18:16:54 -04:00
|
|
|
return ERR_PTR(-EBADF);
|
2011-05-12 13:51:13 +10:00
|
|
|
|
2024-05-31 14:12:01 -04:00
|
|
|
if (proc_ns_file(fd_file(f))) {
|
|
|
|
|
struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
|
2022-05-15 18:16:54 -04:00
|
|
|
if (ns->ops == &netns_operations)
|
2024-07-19 20:17:58 -04:00
|
|
|
return get_net(container_of(ns, struct net, ns));
|
2022-05-15 18:16:54 -04:00
|
|
|
}
|
2011-05-12 13:51:13 +10:00
|
|
|
|
2024-07-19 20:17:58 -04:00
|
|
|
return ERR_PTR(-EINVAL);
|
2011-05-12 13:51:13 +10:00
|
|
|
}
|
2015-01-12 16:34:05 +02:00
|
|
|
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
|
2021-06-15 07:52:43 +08:00
|
|
|
#endif
|
2007-11-01 00:44:50 -07:00
|
|
|
|
2009-07-10 09:51:35 +00:00
|
|
|
struct net *get_net_ns_by_pid(pid_t pid)
|
|
|
|
|
{
|
|
|
|
|
struct task_struct *tsk;
|
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
|
|
/* Lookup the network namespace */
|
|
|
|
|
net = ERR_PTR(-ESRCH);
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
tsk = find_task_by_vpid(pid);
|
|
|
|
|
if (tsk) {
|
|
|
|
|
struct nsproxy *nsproxy;
|
2014-02-03 19:13:49 -08:00
|
|
|
task_lock(tsk);
|
|
|
|
|
nsproxy = tsk->nsproxy;
|
2009-07-10 09:51:35 +00:00
|
|
|
if (nsproxy)
|
|
|
|
|
net = get_net(nsproxy->net_ns);
|
2014-02-03 19:13:49 -08:00
|
|
|
task_unlock(tsk);
|
2009-07-10 09:51:35 +00:00
|
|
|
}
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
return net;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
|
|
|
|
|
|
2025-06-18 10:24:21 -04:00
|
|
|
#ifdef CONFIG_NET_NS_REFCNT_TRACKER
|
|
|
|
|
static void net_ns_net_debugfs(struct net *net)
|
|
|
|
|
{
|
|
|
|
|
ref_tracker_dir_symlink(&net->refcnt_tracker, "netns-%llx-%u-refcnt",
|
|
|
|
|
net->net_cookie, net->ns.inum);
|
|
|
|
|
ref_tracker_dir_symlink(&net->notrefcnt_tracker, "netns-%llx-%u-notrefcnt",
|
|
|
|
|
net->net_cookie, net->ns.inum);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int __init init_net_debugfs(void)
|
|
|
|
|
{
|
|
|
|
|
ref_tracker_dir_debugfs(&init_net.refcnt_tracker);
|
|
|
|
|
ref_tracker_dir_debugfs(&init_net.notrefcnt_tracker);
|
|
|
|
|
net_ns_net_debugfs(&init_net);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
late_initcall(init_net_debugfs);
|
|
|
|
|
#else
|
|
|
|
|
static void net_ns_net_debugfs(struct net *net)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-06-15 10:21:48 -07:00
|
|
|
static __net_init int net_ns_net_init(struct net *net)
|
|
|
|
|
{
|
Merge tag 'net-next-6.17' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski:
"Core & protocols:
- Wrap datapath globals into net_aligned_data, to avoid false sharing
- Preserve MSG_ZEROCOPY in forwarding (e.g. out of a container)
- Add SO_INQ and SCM_INQ support to AF_UNIX
- Add SIOCINQ support to AF_VSOCK
- Add TCP_MAXSEG sockopt to MPTCP
- Add IPv6 force_forwarding sysctl to enable forwarding per interface
- Make TCP validation of whether packet fully fits in the receive
window and the rcv_buf more strict. With increased use of HW
aggregation a single "packet" can be multiple 100s of kB
- Add MSG_MORE flag to optimize large TCP transmissions via sockmap,
improves latency up to 33% for sockmap users
- Convert TCP send queue handling from tasklet to BH workque
- Improve BPF iteration over TCP sockets to see each socket exactly
once
- Remove obsolete and unused TCP RFC3517/RFC6675 loss recovery code
- Support enabling kernel threads for NAPI processing on per-NAPI
instance basis rather than a whole device. Fully stop the kernel
NAPI thread when threaded NAPI gets disabled. Previously thread
would stick around until ifdown due to tricky synchronization
- Allow multicast routing to take effect on locally-generated packets
- Add output interface argument for End.X in segment routing
- MCTP: add support for gateway routing, improve bind() handling
- Don't require rtnl_lock when fetching an IPv6 neighbor over Netlink
- Add a new neighbor flag ("extern_valid"), which cedes refresh
responsibilities to userspace. This is needed for EVPN multi-homing
where a neighbor entry for a multi-homed host needs to be synced
across all the VTEPs among which the host is multi-homed
- Support NUD_PERMANENT for proxy neighbor entries
- Add a new queuing discipline for IETF RFC9332 DualQ Coupled AQM
- Add sequence numbers to netconsole messages. Unregister
netconsole's console when all net targets are removed. Code
refactoring. Add a number of selftests
- Align IPSec inbound SA lookup to RFC 4301. Only SPI and protocol
should be used for an inbound SA lookup
- Support inspecting ref_tracker state via DebugFS
- Don't force bonding advertisement frames tx to ~333 ms boundaries.
Add broadcast_neighbor option to send ARP/ND on all bonded links
- Allow providing upcall pid for the 'execute' command in openvswitch
- Remove DCCP support from Netfilter's conntrack
- Disallow multiple packet duplications in the queuing layer
- Prevent use of deprecated iptables code on PREEMPT_RT
Driver API:
- Support RSS and hashing configuration over ethtool Netlink
- Add dedicated ethtool callbacks for getting and setting hashing
fields
- Add support for power budget evaluation strategy in PSE /
Power-over-Ethernet. Generate Netlink events for overcurrent etc
- Support DPLL phase offset monitoring across all device inputs.
Support providing clock reference and SYNC over separate DPLL
inputs
- Support traffic classes in devlink rate API for bandwidth
management
- Remove rtnl_lock dependency from UDP tunnel port configuration
Device drivers:
- Add a new Broadcom driver for 800G Ethernet (bnge)
- Add a standalone driver for Microchip ZL3073x DPLL
- Remove IBM's NETIUCV device driver
- Ethernet high-speed NICs:
- Broadcom (bnxt):
- support zero-copy Tx of DMABUF memory
- take page size into account for page pool recycling rings
- Intel (100G, ice, idpf):
- idpf: XDP and AF_XDP support preparations
- idpf: add flow steering
- add link_down_events statistic
- clean up the TSPLL code
- preparations for live VM migration
- nVidia/Mellanox:
- support zero-copy Rx/Tx interfaces (DMABUF and io_uring)
- optimize context memory usage for matchers
- expose serial numbers in devlink info
- support PCIe congestion metrics
- Meta (fbnic):
- add 25G, 50G, and 100G link modes to phylink
- support dumping FW logs
- Marvell/Cavium:
- support for CN20K generation of the Octeon chips
- Amazon:
- add HW clock (without timestamping, just hypervisor time access)
- Ethernet virtual:
- VirtIO net:
- support segmentation of UDP-tunnel-encapsulated packets
- Google (gve):
- support packet timestamping and clock synchronization
- Microsoft vNIC:
- add handler for device-originated servicing events
- allow dynamic MSI-X vector allocation
- support Tx bandwidth clamping
- Ethernet NICs consumer, and embedded:
- AMD:
- amd-xgbe: hardware timestamping and PTP clock support
- Broadcom integrated MACs (bcmgenet, bcmasp):
- use napi_complete_done() return value to support NAPI polling
- add support for re-starting auto-negotiation
- Broadcom switches (b53):
- support BCM5325 switches
- add bcm63xx EPHY power control
- Synopsys (stmmac):
- lots of code refactoring and cleanups
- TI:
- icssg-prueth: read firmware-names from device tree
- icssg: PRP offload support
- Microchip:
- lan78xx: convert to PHYLINK for improved PHY and MAC management
- ksz: add KSZ8463 switch support
- Intel:
- support similar queue priority scheme in multi-queue and
time-sensitive networking (taprio)
- support packet pre-emption in both
- RealTek (r8169):
- enable EEE at 5Gbps on RTL8126
- Airoha:
- add PPPoE offload support
- MDIO bus controller for Airoha AN7583
- Ethernet PHYs:
- support for the IPQ5018 internal GE PHY
- micrel KSZ9477 switch-integrated PHYs:
- add MDI/MDI-X control support
- add RX error counters
- add cable test support
- add Signal Quality Indicator (SQI) reporting
- dp83tg720: improve reset handling and reduce link recovery time
- support bcm54811 (and its MII-Lite interface type)
- air_en8811h: support resume/suspend
- support PHY counters for QCA807x and QCA808x
- support WoL for QCA807x
- CAN drivers:
- rcar_canfd: support for Transceiver Delay Compensation
- kvaser: report FW versions via devlink dev info
- WiFi:
- extended regulatory info support (6 GHz)
- add statistics and beacon monitor for Multi-Link Operation (MLO)
- support S1G aggregation, improve S1G support
- add Radio Measurement action fields
- support per-radio RTS threshold
- some work around how FIPS affects wifi, which was wrong (RC4 is
used by TKIP, not only WEP)
- improvements for unsolicited probe response handling
- WiFi drivers:
- RealTek (rtw88):
- IBSS mode for SDIO devices
- RealTek (rtw89):
- BT coexistence for MLO/WiFi7
- concurrent station + P2P support
- support for USB devices RTL8851BU/RTL8852BU
- Intel (iwlwifi):
- use embedded PNVM in (to be released) FW images to fix
compatibility issues
- many cleanups (unused FW APIs, PCIe code, WoWLAN)
- some FIPS interoperability
- MediaTek (mt76):
- firmware recovery improvements
- more MLO work
- Qualcomm/Atheros (ath12k):
- fix scan on multi-radio devices
- more EHT/Wi-Fi 7 features
- encapsulation/decapsulation offload
- Broadcom (brcm80211):
- support SDIO 43751 device
- Bluetooth:
- hci_event: add support for handling LE BIG Sync Lost event
- ISO: add socket option to report packet seqnum via CMSG
- ISO: support SCM_TIMESTAMPING for ISO TS
- Bluetooth drivers:
- intel_pcie: support Function Level Reset
- nxpuart: add support for 4M baudrate
- nxpuart: implement powerup sequence, reset, FW dump, and FW loading"
* tag 'net-next-6.17' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1742 commits)
dpll: zl3073x: Fix build failure
selftests: bpf: fix legacy netfilter options
ipv6: annotate data-races around rt->fib6_nsiblings
ipv6: fix possible infinite loop in fib6_info_uses_dev()
ipv6: prevent infinite loop in rt6_nlmsg_size()
ipv6: add a retry logic in net6_rt_notify()
vrf: Drop existing dst reference in vrf_ip6_input_dst
net/sched: taprio: align entry index attr validation with mqprio
net: fsl_pq_mdio: use dev_err_probe
selftests: rtnetlink.sh: remove esp4_offload after test
vsock: remove unnecessary null check in vsock_getname()
igb: xsk: solve negative overflow of nb_pkts in zerocopy mode
stmmac: xsk: fix negative overflow of budget in zerocopy mode
dt-bindings: ieee802154: Convert at86rf230.txt yaml format
net: dsa: microchip: Disable PTP function of KSZ8463
net: dsa: microchip: Setup fiber ports for KSZ8463
net: dsa: microchip: Write switch MAC address differently for KSZ8463
net: dsa: microchip: Use different registers for KSZ8463
net: dsa: microchip: Add KSZ8463 switch support to KSZ DSA driver
dt-bindings: net: dsa: microchip: Add KSZ8463 switch support
...
2025-07-30 08:58:55 -07:00
|
|
|
net_ns_net_debugfs(net);
|
|
|
|
|
return 0;
|
2011-06-15 10:21:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct pernet_operations __net_initdata net_ns_ops = {
|
|
|
|
|
.init = net_ns_net_init,
|
|
|
|
|
};
|
|
|
|
|
|
2016-08-31 15:17:49 -07:00
|
|
|
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
|
2015-01-15 15:11:15 +01:00
|
|
|
[NETNSA_NONE] = { .type = NLA_UNSPEC },
|
|
|
|
|
[NETNSA_NSID] = { .type = NLA_S32 },
|
|
|
|
|
[NETNSA_PID] = { .type = NLA_U32 },
|
|
|
|
|
[NETNSA_FD] = { .type = NLA_U32 },
|
2018-11-26 15:42:04 +01:00
|
|
|
[NETNSA_TARGET_NSID] = { .type = NLA_S32 },
|
2015-01-15 15:11:15 +01:00
|
|
|
};
|
|
|
|
|
|
2017-04-16 09:48:24 -07:00
|
|
|
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-01-15 15:11:15 +01:00
|
|
|
{
|
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
|
struct nlattr *tb[NETNSA_MAX + 1];
|
2017-06-09 14:41:56 +02:00
|
|
|
struct nlattr *nla;
|
2015-01-15 15:11:15 +01:00
|
|
|
struct net *peer;
|
|
|
|
|
int nsid, err;
|
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 14:07:28 +02:00
|
|
|
err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
|
|
|
|
|
NETNSA_MAX, rtnl_net_policy, extack);
|
2015-01-15 15:11:15 +01:00
|
|
|
if (err < 0)
|
|
|
|
|
return err;
|
2017-06-09 14:41:56 +02:00
|
|
|
if (!tb[NETNSA_NSID]) {
|
|
|
|
|
NL_SET_ERR_MSG(extack, "nsid is missing");
|
2015-01-15 15:11:15 +01:00
|
|
|
return -EINVAL;
|
2017-06-09 14:41:56 +02:00
|
|
|
}
|
2015-01-15 15:11:15 +01:00
|
|
|
nsid = nla_get_s32(tb[NETNSA_NSID]);
|
|
|
|
|
|
2017-06-09 14:41:56 +02:00
|
|
|
if (tb[NETNSA_PID]) {
|
2015-01-15 15:11:15 +01:00
|
|
|
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
|
2017-06-09 14:41:56 +02:00
|
|
|
nla = tb[NETNSA_PID];
|
|
|
|
|
} else if (tb[NETNSA_FD]) {
|
2015-01-15 15:11:15 +01:00
|
|
|
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
|
2017-06-09 14:41:56 +02:00
|
|
|
nla = tb[NETNSA_FD];
|
|
|
|
|
} else {
|
|
|
|
|
NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
|
2015-01-15 15:11:15 +01:00
|
|
|
return -EINVAL;
|
2017-06-09 14:41:56 +02:00
|
|
|
}
|
|
|
|
|
if (IS_ERR(peer)) {
|
|
|
|
|
NL_SET_BAD_ATTR(extack, nla);
|
|
|
|
|
NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
|
2015-01-15 15:11:15 +01:00
|
|
|
return PTR_ERR(peer);
|
2017-06-09 14:41:56 +02:00
|
|
|
}
|
2015-01-15 15:11:15 +01:00
|
|
|
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_lock(&net->nsid_lock);
|
2015-05-07 11:02:50 +02:00
|
|
|
if (__peernet2id(net, peer) >= 0) {
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2015-01-15 15:11:15 +01:00
|
|
|
err = -EEXIST;
|
2017-06-09 14:41:56 +02:00
|
|
|
NL_SET_BAD_ATTR(extack, nla);
|
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
|
"Peer netns already has a nsid assigned");
|
2015-01-15 15:11:15 +01:00
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = alloc_netid(net, peer, nsid);
|
2025-06-27 16:32:42 +00:00
|
|
|
spin_unlock(&net->nsid_lock);
|
2015-05-07 11:02:50 +02:00
|
|
|
if (err >= 0) {
|
2019-10-09 11:19:10 +02:00
|
|
|
rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 18:39:04 +02:00
|
|
|
nlh, GFP_KERNEL);
|
2015-01-15 15:11:15 +01:00
|
|
|
err = 0;
|
2017-06-09 14:41:56 +02:00
|
|
|
} else if (err == -ENOSPC && nsid >= 0) {
|
2017-06-09 14:41:57 +02:00
|
|
|
err = -EEXIST;
|
2017-06-09 14:41:56 +02:00
|
|
|
NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
|
|
|
|
|
NL_SET_ERR_MSG(extack, "The specified nsid is already used");
|
2015-05-07 11:02:50 +02:00
|
|
|
}
|
2015-01-15 15:11:15 +01:00
|
|
|
out:
|
|
|
|
|
put_net(peer);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int rtnl_net_get_size(void)
|
|
|
|
|
{
|
|
|
|
|
return NLMSG_ALIGN(sizeof(struct rtgenmsg))
|
|
|
|
|
+ nla_total_size(sizeof(s32)) /* NETNSA_NSID */
|
2018-11-26 15:42:06 +01:00
|
|
|
+ nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
|
2015-01-15 15:11:15 +01:00
|
|
|
;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-26 15:42:03 +01:00
|
|
|
struct net_fill_args {
|
|
|
|
|
u32 portid;
|
|
|
|
|
u32 seq;
|
|
|
|
|
int flags;
|
|
|
|
|
int cmd;
|
|
|
|
|
int nsid;
|
2018-11-26 15:42:06 +01:00
|
|
|
bool add_ref;
|
|
|
|
|
int ref_nsid;
|
2018-11-26 15:42:03 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
|
2015-01-15 15:11:15 +01:00
|
|
|
{
|
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
|
struct rtgenmsg *rth;
|
|
|
|
|
|
2018-11-26 15:42:03 +01:00
|
|
|
nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
|
|
|
|
|
args->flags);
|
2015-01-15 15:11:15 +01:00
|
|
|
if (!nlh)
|
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
|
|
rth = nlmsg_data(nlh);
|
|
|
|
|
rth->rtgen_family = AF_UNSPEC;
|
|
|
|
|
|
2018-11-26 15:42:03 +01:00
|
|
|
if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
|
2015-01-15 15:11:15 +01:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
2018-11-26 15:42:06 +01:00
|
|
|
if (args->add_ref &&
|
|
|
|
|
nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
2015-01-15 15:11:15 +01:00
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-18 10:46:17 -08:00
|
|
|
static int rtnl_net_valid_getid_req(struct sk_buff *skb,
|
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
|
struct nlattr **tb,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
|
|
if (!netlink_strict_get_check(skb))
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 14:07:28 +02:00
|
|
|
return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
|
|
|
|
|
tb, NETNSA_MAX, rtnl_net_policy,
|
|
|
|
|
extack);
|
2019-01-18 10:46:17 -08:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 14:07:28 +02:00
|
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
|
|
|
|
|
NETNSA_MAX, rtnl_net_policy,
|
|
|
|
|
extack);
|
2019-01-18 10:46:17 -08:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i <= NETNSA_MAX; i++) {
|
|
|
|
|
if (!tb[i])
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
|
case NETNSA_PID:
|
|
|
|
|
case NETNSA_FD:
|
|
|
|
|
case NETNSA_NSID:
|
|
|
|
|
case NETNSA_TARGET_NSID:
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-16 09:48:24 -07:00
|
|
|
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-01-15 15:11:15 +01:00
|
|
|
{
|
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
|
struct nlattr *tb[NETNSA_MAX + 1];
|
2018-11-26 15:42:03 +01:00
|
|
|
struct net_fill_args fillargs = {
|
|
|
|
|
.portid = NETLINK_CB(skb).portid,
|
|
|
|
|
.seq = nlh->nlmsg_seq,
|
|
|
|
|
.cmd = RTM_NEWNSID,
|
|
|
|
|
};
|
2018-11-26 15:42:04 +01:00
|
|
|
struct net *peer, *target = net;
|
2017-06-09 14:41:56 +02:00
|
|
|
struct nlattr *nla;
|
2015-01-15 15:11:15 +01:00
|
|
|
struct sk_buff *msg;
|
2018-11-26 15:42:03 +01:00
|
|
|
int err;
|
2015-01-15 15:11:15 +01:00
|
|
|
|
2019-01-18 10:46:17 -08:00
|
|
|
err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
|
2015-01-15 15:11:15 +01:00
|
|
|
if (err < 0)
|
|
|
|
|
return err;
|
2017-06-09 14:41:56 +02:00
|
|
|
if (tb[NETNSA_PID]) {
|
2015-01-15 15:11:15 +01:00
|
|
|
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
|
2017-06-09 14:41:56 +02:00
|
|
|
nla = tb[NETNSA_PID];
|
|
|
|
|
} else if (tb[NETNSA_FD]) {
|
2015-01-15 15:11:15 +01:00
|
|
|
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
|
2017-06-09 14:41:56 +02:00
|
|
|
nla = tb[NETNSA_FD];
|
2018-11-26 15:42:05 +01:00
|
|
|
} else if (tb[NETNSA_NSID]) {
|
2019-04-11 16:45:57 +02:00
|
|
|
peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
|
2018-11-26 15:42:05 +01:00
|
|
|
if (!peer)
|
|
|
|
|
peer = ERR_PTR(-ENOENT);
|
|
|
|
|
nla = tb[NETNSA_NSID];
|
2017-06-09 14:41:56 +02:00
|
|
|
} else {
|
|
|
|
|
NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
|
2015-01-15 15:11:15 +01:00
|
|
|
return -EINVAL;
|
2017-06-09 14:41:56 +02:00
|
|
|
}
|
2015-01-15 15:11:15 +01:00
|
|
|
|
2017-06-09 14:41:56 +02:00
|
|
|
if (IS_ERR(peer)) {
|
|
|
|
|
NL_SET_BAD_ATTR(extack, nla);
|
|
|
|
|
NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
|
2015-01-15 15:11:15 +01:00
|
|
|
return PTR_ERR(peer);
|
2017-06-09 14:41:56 +02:00
|
|
|
}
|
2015-01-15 15:11:15 +01:00
|
|
|
|
2018-11-26 15:42:04 +01:00
|
|
|
if (tb[NETNSA_TARGET_NSID]) {
|
|
|
|
|
int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
|
|
|
|
|
|
|
|
|
|
target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
|
|
|
|
|
if (IS_ERR(target)) {
|
|
|
|
|
NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
|
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
|
"Target netns reference is invalid");
|
|
|
|
|
err = PTR_ERR(target);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2018-11-26 15:42:06 +01:00
|
|
|
fillargs.add_ref = true;
|
|
|
|
|
fillargs.ref_nsid = peernet2id(net, peer);
|
2018-11-26 15:42:04 +01:00
|
|
|
}
|
|
|
|
|
|
2015-01-15 15:11:15 +01:00
|
|
|
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
|
|
|
|
|
if (!msg) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-26 15:42:04 +01:00
|
|
|
fillargs.nsid = peernet2id(target, peer);
|
2018-11-26 15:42:03 +01:00
|
|
|
err = rtnl_net_fill(msg, &fillargs);
|
2015-01-15 15:11:15 +01:00
|
|
|
if (err < 0)
|
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
|
|
err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
|
nlmsg_free(msg);
|
|
|
|
|
out:
|
2018-11-26 15:42:06 +01:00
|
|
|
if (fillargs.add_ref)
|
2018-11-26 15:42:04 +01:00
|
|
|
put_net(target);
|
2015-01-15 15:11:15 +01:00
|
|
|
put_net(peer);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-07 11:51:54 +02:00
|
|
|
struct rtnl_net_dump_cb {
|
2018-11-26 15:42:04 +01:00
|
|
|
struct net *tgt_net;
|
2018-11-26 15:42:06 +01:00
|
|
|
struct net *ref_net;
|
2015-04-07 11:51:54 +02:00
|
|
|
struct sk_buff *skb;
|
2018-11-26 15:42:03 +01:00
|
|
|
struct net_fill_args fillargs;
|
2015-04-07 11:51:54 +02:00
|
|
|
int idx;
|
|
|
|
|
int s_idx;
|
|
|
|
|
};
|
|
|
|
|
|
2020-01-13 22:39:22 +01:00
|
|
|
/* Runs in RCU-critical section. */
|
2015-04-07 11:51:54 +02:00
|
|
|
static int rtnl_net_dumpid_one(int id, void *peer, void *data)
|
|
|
|
|
{
|
|
|
|
|
struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (net_cb->idx < net_cb->s_idx)
|
|
|
|
|
goto cont;
|
|
|
|
|
|
2018-11-26 15:42:03 +01:00
|
|
|
net_cb->fillargs.nsid = id;
|
2018-11-26 15:42:06 +01:00
|
|
|
if (net_cb->fillargs.add_ref)
|
|
|
|
|
net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
|
2018-11-26 15:42:03 +01:00
|
|
|
ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
|
2015-04-07 11:51:54 +02:00
|
|
|
if (ret < 0)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
cont:
|
|
|
|
|
net_cb->idx++;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-26 15:42:04 +01:00
|
|
|
static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
|
|
|
|
|
struct rtnl_net_dump_cb *net_cb,
|
|
|
|
|
struct netlink_callback *cb)
|
|
|
|
|
{
|
|
|
|
|
struct netlink_ext_ack *extack = cb->extack;
|
|
|
|
|
struct nlattr *tb[NETNSA_MAX + 1];
|
|
|
|
|
int err, i;
|
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 14:07:28 +02:00
|
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
|
|
|
|
|
NETNSA_MAX, rtnl_net_policy,
|
|
|
|
|
extack);
|
2018-11-26 15:42:04 +01:00
|
|
|
if (err < 0)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i <= NETNSA_MAX; i++) {
|
|
|
|
|
if (!tb[i])
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (i == NETNSA_TARGET_NSID) {
|
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
|
|
net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
|
|
|
|
|
if (IS_ERR(net)) {
|
|
|
|
|
NL_SET_BAD_ATTR(extack, tb[i]);
|
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
|
"Invalid target network namespace id");
|
|
|
|
|
return PTR_ERR(net);
|
|
|
|
|
}
|
2018-11-26 15:42:06 +01:00
|
|
|
net_cb->fillargs.add_ref = true;
|
|
|
|
|
net_cb->ref_net = net_cb->tgt_net;
|
2018-11-26 15:42:04 +01:00
|
|
|
net_cb->tgt_net = net;
|
|
|
|
|
} else {
|
|
|
|
|
NL_SET_BAD_ATTR(extack, tb[i]);
|
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
|
"Unsupported attribute in dump request");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-07 11:51:54 +02:00
|
|
|
static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
|
|
{
|
|
|
|
|
struct rtnl_net_dump_cb net_cb = {
|
2018-11-26 15:42:04 +01:00
|
|
|
.tgt_net = sock_net(skb->sk),
|
2015-04-07 11:51:54 +02:00
|
|
|
.skb = skb,
|
2018-11-26 15:42:03 +01:00
|
|
|
.fillargs = {
|
|
|
|
|
.portid = NETLINK_CB(cb->skb).portid,
|
|
|
|
|
.seq = cb->nlh->nlmsg_seq,
|
|
|
|
|
.flags = NLM_F_MULTI,
|
|
|
|
|
.cmd = RTM_NEWNSID,
|
|
|
|
|
},
|
2015-04-07 11:51:54 +02:00
|
|
|
.idx = 0,
|
|
|
|
|
.s_idx = cb->args[0],
|
|
|
|
|
};
|
2018-11-26 15:42:04 +01:00
|
|
|
int err = 0;
|
2015-04-07 11:51:54 +02:00
|
|
|
|
2018-11-26 15:42:04 +01:00
|
|
|
if (cb->strict_check) {
|
|
|
|
|
err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
|
|
|
|
|
if (err < 0)
|
|
|
|
|
goto end;
|
2018-10-07 20:16:38 -07:00
|
|
|
}
|
|
|
|
|
|
2020-01-13 22:39:22 +01:00
|
|
|
rcu_read_lock();
|
2018-11-26 15:42:04 +01:00
|
|
|
idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
|
2020-01-13 22:39:22 +01:00
|
|
|
rcu_read_unlock();
|
2015-04-07 11:51:54 +02:00
|
|
|
|
|
|
|
|
cb->args[0] = net_cb.idx;
|
2018-11-26 15:42:04 +01:00
|
|
|
end:
|
2018-11-26 15:42:06 +01:00
|
|
|
if (net_cb.fillargs.add_ref)
|
2018-11-26 15:42:04 +01:00
|
|
|
put_net(net_cb.tgt_net);
|
2024-04-16 14:07:39 +00:00
|
|
|
return err;
|
2015-04-07 11:51:54 +02:00
|
|
|
}
|
|
|
|
|
|
2019-10-09 11:19:10 +02:00
|
|
|
static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 18:39:04 +02:00
|
|
|
struct nlmsghdr *nlh, gfp_t gfp)
|
2015-04-07 11:51:53 +02:00
|
|
|
{
|
2018-11-26 15:42:03 +01:00
|
|
|
struct net_fill_args fillargs = {
|
2019-10-09 11:19:10 +02:00
|
|
|
.portid = portid,
|
|
|
|
|
.seq = nlh ? nlh->nlmsg_seq : 0,
|
2018-11-26 15:42:03 +01:00
|
|
|
.cmd = cmd,
|
|
|
|
|
.nsid = id,
|
|
|
|
|
};
|
2015-04-07 11:51:53 +02:00
|
|
|
struct sk_buff *msg;
|
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 18:39:04 +02:00
|
|
|
msg = nlmsg_new(rtnl_net_get_size(), gfp);
|
2015-04-07 11:51:53 +02:00
|
|
|
if (!msg)
|
|
|
|
|
goto out;
|
|
|
|
|
|
2018-11-26 15:42:03 +01:00
|
|
|
err = rtnl_net_fill(msg, &fillargs);
|
2015-04-07 11:51:53 +02:00
|
|
|
if (err < 0)
|
|
|
|
|
goto err_out;
|
|
|
|
|
|
netns: fix GFP flags in rtnl_net_notifyid()
In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
but there are a few paths calling rtnl_net_notifyid() from atomic
context or from RCU critical sections. The later also precludes the use
of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
call is wrong too, as it uses GFP_KERNEL unconditionally.
Therefore, we need to pass the GFP flags as parameter and propagate it
through function calls until the proper flags can be determined.
In most cases, GFP_KERNEL is fine. The exceptions are:
* openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
indirectly call rtnl_net_notifyid() from RCU critical section,
* rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
parameter.
Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
by nlmsg_new(). The function is allowed to sleep, so better make the
flags consistent with the ones used in the following
ovs_vport_cmd_fill_info() call.
Found by code inspection.
Fixes: 9a9634545c70 ("netns: notify netns id events")
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-23 18:39:04 +02:00
|
|
|
rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
|
2015-04-07 11:51:53 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
|
nlmsg_free(msg);
|
|
|
|
|
out:
|
|
|
|
|
rtnl_set_sk_err(net, RTNLGRP_NSID, err);
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-29 07:27:54 +00:00
|
|
|
#ifdef CONFIG_NET_NS
|
|
|
|
|
static void __init netns_ipv4_struct_check(void)
|
|
|
|
|
{
|
|
|
|
|
/* TX readonly hotpath cache lines */
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_early_retrans);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_tso_win_divisor);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_tso_rtt_log);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_autocorking);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_min_snd_mss);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_notsent_lowat);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_limit_output_bytes);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_min_rtt_wlen);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_tcp_wmem);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
|
|
|
|
|
sysctl_ip_fwd_use_pmtu);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
|
|
|
|
|
|
|
|
|
|
/* TXRX readonly hotpath cache lines */
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
|
|
|
|
|
sysctl_tcp_moderate_rcvbuf);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
|
|
|
|
|
|
|
|
|
|
/* RX readonly hotpath cache line */
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
|
|
|
|
|
sysctl_ip_early_demux);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
|
|
|
|
|
sysctl_tcp_early_demux);
|
2024-10-10 03:41:00 +00:00
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
|
|
|
|
|
sysctl_tcp_l3mdev_accept);
|
2023-11-29 07:27:54 +00:00
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
|
|
|
|
|
sysctl_tcp_reordering);
|
|
|
|
|
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
|
|
|
|
|
sysctl_tcp_rmem);
|
2024-10-10 03:41:00 +00:00
|
|
|
CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22);
|
2023-11-29 07:27:54 +00:00
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2024-10-14 13:18:22 -07:00
|
|
|
static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = {
|
|
|
|
|
{.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid,
|
|
|
|
|
.flags = RTNL_FLAG_DOIT_UNLOCKED},
|
|
|
|
|
{.msgtype = RTM_GETNSID, .doit = rtnl_net_getid,
|
|
|
|
|
.dumpit = rtnl_net_dumpid,
|
|
|
|
|
.flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
|
|
|
|
|
};
|
|
|
|
|
|
2022-02-05 09:01:25 -08:00
|
|
|
void __init net_ns_init(void)
|
2007-09-12 11:50:50 +02:00
|
|
|
{
|
2009-02-22 00:07:53 -08:00
|
|
|
struct net_generic *ng;
|
2007-09-12 11:50:50 +02:00
|
|
|
|
2007-11-01 00:46:50 -07:00
|
|
|
#ifdef CONFIG_NET_NS
|
2023-11-29 07:27:54 +00:00
|
|
|
netns_ipv4_struct_check();
|
2007-09-12 11:50:50 +02:00
|
|
|
net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
|
|
|
|
|
SMP_CACHE_BYTES,
|
2018-03-01 15:23:28 +03:00
|
|
|
SLAB_PANIC|SLAB_ACCOUNT, NULL);
|
2007-11-19 23:18:16 -08:00
|
|
|
|
|
|
|
|
/* Create workqueue for cleanup */
|
|
|
|
|
netns_wq = create_singlethread_workqueue("netns");
|
|
|
|
|
if (!netns_wq)
|
|
|
|
|
panic("Could not create netns workq");
|
2007-11-01 00:46:50 -07:00
|
|
|
#endif
|
2007-11-19 23:18:16 -08:00
|
|
|
|
2009-02-22 00:07:53 -08:00
|
|
|
ng = net_alloc_generic();
|
|
|
|
|
if (!ng)
|
|
|
|
|
panic("Could not allocate generic netns");
|
|
|
|
|
|
|
|
|
|
rcu_assign_pointer(init_net.gen, ng);
|
2020-09-30 17:18:16 +02:00
|
|
|
|
2022-02-05 09:01:25 -08:00
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
|
init_net.key_domain = &init_net_key_domain;
|
|
|
|
|
#endif
|
2025-09-12 13:52:34 +02:00
|
|
|
/*
|
|
|
|
|
* This currently cannot fail as the initial network namespace
|
|
|
|
|
* has a static inode number.
|
|
|
|
|
*/
|
|
|
|
|
if (preinit_net(&init_net, &init_user_ns))
|
|
|
|
|
panic("Could not preinitialize the initial network namespace");
|
2024-07-31 13:07:19 -07:00
|
|
|
|
|
|
|
|
down_write(&pernet_ops_rwsem);
|
2024-07-31 13:07:20 -07:00
|
|
|
if (setup_net(&init_net))
|
2009-05-21 15:10:31 -07:00
|
|
|
panic("Could not setup the initial network namespace");
|
2007-09-12 11:50:50 +02:00
|
|
|
|
2016-08-10 14:36:00 -07:00
|
|
|
init_net_initialized = true;
|
2018-03-27 18:02:23 +03:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 11:50:50 +02:00
|
|
|
|
2018-12-23 19:42:38 -06:00
|
|
|
if (register_pernet_subsys(&net_ns_ops))
|
|
|
|
|
panic("Could not register network namespace subsystems");
|
2011-06-15 10:21:48 -07:00
|
|
|
|
2024-10-14 13:18:22 -07:00
|
|
|
rtnl_register_many(net_ns_rtnl_msg_handlers);
|
2007-09-12 11:50:50 +02:00
|
|
|
}
|
|
|
|
|
|
2007-11-13 03:23:21 -08:00
|
|
|
#ifdef CONFIG_NET_NS
|
2009-11-29 22:25:28 +00:00
|
|
|
static int __register_pernet_operations(struct list_head *list,
|
|
|
|
|
struct pernet_operations *ops)
|
2007-09-12 11:50:50 +02:00
|
|
|
{
|
2025-04-11 13:52:31 -07:00
|
|
|
LIST_HEAD(net_exit_list);
|
2009-12-03 02:29:03 +00:00
|
|
|
struct net *net;
|
2007-09-12 11:50:50 +02:00
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
list_add_tail(&ops->list, list);
|
2024-07-31 13:07:17 -07:00
|
|
|
if (ops->init || ops->id) {
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 19:20:32 +03:00
|
|
|
/* We held write locked pernet_ops_rwsem, and parallel
|
|
|
|
|
* setup_net() and cleanup_net() are not possible.
|
|
|
|
|
*/
|
2007-11-01 00:42:43 -07:00
|
|
|
for_each_net(net) {
|
2009-11-29 22:25:28 +00:00
|
|
|
error = ops_init(ops, net);
|
2007-09-12 11:50:50 +02:00
|
|
|
if (error)
|
|
|
|
|
goto out_undo;
|
2009-12-03 02:29:03 +00:00
|
|
|
list_add_tail(&net->exit_list, &net_exit_list);
|
2007-09-12 11:50:50 +02:00
|
|
|
}
|
|
|
|
|
}
|
2007-11-01 00:42:43 -07:00
|
|
|
return 0;
|
2007-09-12 11:50:50 +02:00
|
|
|
|
|
|
|
|
out_undo:
|
|
|
|
|
/* If I have an error cleanup all namespaces I initialized */
|
|
|
|
|
list_del(&ops->list);
|
2025-04-11 13:52:31 -07:00
|
|
|
ops_undo_single(ops, &net_exit_list);
|
2007-11-01 00:42:43 -07:00
|
|
|
return error;
|
2007-09-12 11:50:50 +02:00
|
|
|
}
|
|
|
|
|
|
2009-11-29 22:25:28 +00:00
|
|
|
static void __unregister_pernet_operations(struct pernet_operations *ops)
|
2007-09-12 11:50:50 +02:00
|
|
|
{
|
2009-12-03 02:29:03 +00:00
|
|
|
LIST_HEAD(net_exit_list);
|
2025-04-11 13:52:31 -07:00
|
|
|
struct net *net;
|
2007-09-12 11:50:50 +02:00
|
|
|
|
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-29 19:20:32 +03:00
|
|
|
/* See comment in __register_pernet_operations() */
|
2009-12-03 02:29:03 +00:00
|
|
|
for_each_net(net)
|
|
|
|
|
list_add_tail(&net->exit_list, &net_exit_list);
|
2021-08-17 23:23:00 +08:00
|
|
|
|
2025-04-11 13:52:31 -07:00
|
|
|
list_del(&ops->list);
|
|
|
|
|
ops_undo_single(ops, &net_exit_list);
|
2007-09-12 11:50:50 +02:00
|
|
|
}
|
|
|
|
|
|
2007-11-13 03:23:21 -08:00
|
|
|
#else
|
|
|
|
|
|
2009-11-29 22:25:28 +00:00
|
|
|
static int __register_pernet_operations(struct list_head *list,
|
|
|
|
|
struct pernet_operations *ops)
|
2007-11-13 03:23:21 -08:00
|
|
|
{
|
net: Fix wild-memory-access in __register_pernet_operations() when CONFIG_NET_NS=n.
kernel test robot reported the splat below. [0]
Before commit fed176bf3143 ("net: Add ops_undo_single for module
load/unload."), if CONFIG_NET_NS=n, ops was linked to pernet_list
only when init_net had not been initialised, and ops was unlinked
from pernet_list only under the same condition.
Let's say an ops is loaded before the init_net setup but unloaded
after that. Then, the ops remains in pernet_list, which seems odd.
The cited commit added ops_undo_single(), which calls list_add() for
ops to link it to a temporary list, so a minor change was added to
__register_pernet_operations() and __unregister_pernet_operations()
under CONFIG_NET_NS=n to avoid the pernet_list corruption.
However, the corruption must have been left as is.
When CONFIG_NET_NS=n, pernet_list was used to keep ops registered
before the init_net setup, and after that, pernet_list was not used
at all.
This was because some ops annotated with __net_initdata are cleared
out of memory at some point during boot.
Then, such ops is initialised by POISON_FREE_INITMEM (0xcc), resulting
in that ops->list.{next,prev} suddenly switches from a valid pointer
to a weird value, 0xcccccccccccccccc.
To avoid such wild memory access, let's allow the pernet_list
corruption for CONFIG_NET_NS=n.
[0]:
Oops: general protection fault, probably for non-canonical address 0xf999959999999999: 0000 [#1] SMP KASAN NOPTI
KASAN: maybe wild-memory-access in range [0xccccccccccccccc8-0xcccccccccccccccf]
CPU: 2 UID: 0 PID: 346 Comm: modprobe Not tainted 6.15.0-rc1-00294-ga4cba7e98e35 #85 PREEMPT(voluntary)
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
RIP: 0010:__list_add_valid_or_report (lib/list_debug.c:32)
Code: 48 c1 ea 03 80 3c 02 00 0f 85 5a 01 00 00 49 39 74 24 08 0f 85 83 00 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 f2 48 c1 ea 03 <80> 3c 02 00 0f 85 1f 01 00 00 4c 39 26 0f 85 ab 00 00 00 4c 39 ee
RSP: 0018:ff11000135b87830 EFLAGS: 00010a07
RAX: dffffc0000000000 RBX: ffffffffc02223c0 RCX: ffffffff8406fcc2
RDX: 1999999999999999 RSI: cccccccccccccccc RDI: ffffffffc02223c0
RBP: ffffffff86064e40 R08: 0000000000000001 R09: fffffbfff0a9f5b5
R10: ffffffff854fadaf R11: 676552203a54454e R12: ffffffff86064e40
R13: ffffffffc02223c0 R14: ffffffff86064e48 R15: 0000000000000021
FS: 00007f6fb0d9e1c0(0000) GS:ff11000858ea0000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f6fb0eda580 CR3: 0000000122fec005 CR4: 0000000000771ef0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
<TASK>
register_pernet_operations (./include/linux/list.h:150 (discriminator 5) ./include/linux/list.h:183 (discriminator 5) net/core/net_namespace.c:1315 (discriminator 5) net/core/net_namespace.c:1359 (discriminator 5))
register_pernet_subsys (net/core/net_namespace.c:1401)
inet6_init (net/ipv6/af_inet6.c:535) ipv6
do_one_initcall (init/main.c:1257)
do_init_module (kernel/module/main.c:2942)
load_module (kernel/module/main.c:3409)
init_module_from_file (kernel/module/main.c:3599)
idempotent_init_module (kernel/module/main.c:3611)
__x64_sys_finit_module (./include/linux/file.h:62 ./include/linux/file.h:83 kernel/module/main.c:3634 kernel/module/main.c:3621 kernel/module/main.c:3621)
do_syscall_64 (arch/x86/entry/syscall_64.c:63 arch/x86/entry/syscall_64.c:94)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
RIP: 0033:0x7f6fb0df7e5d
Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 9f 1b 00 f7 d8 64 89 01 48
RSP: 002b:00007fffdc6a8968 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000055b535721b70 RCX: 00007f6fb0df7e5d
RDX: 0000000000000000 RSI: 000055b51e44aa2a RDI: 0000000000000004
RBP: 0000000000040000 R08: 0000000000000000 R09: 000055b535721b30
R10: 0000000000000004 R11: 0000000000000246 R12: 000055b51e44aa2a
R13: 000055b535721bf0 R14: 000055b5357220b0 R15: 0000000000000000
</TASK>
Modules linked in: ipv6(+) crc_ccitt
Fixes: fed176bf3143 ("net: Add ops_undo_single for module load/unload.")
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202504181511.1c3f23e4-lkp@intel.com
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250418215025.87871-1-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-04-18 14:50:20 -07:00
|
|
|
if (!init_net_initialized) {
|
|
|
|
|
list_add_tail(&ops->list, list);
|
2016-08-10 14:36:00 -07:00
|
|
|
return 0;
|
net: Fix wild-memory-access in __register_pernet_operations() when CONFIG_NET_NS=n.
kernel test robot reported the splat below. [0]
Before commit fed176bf3143 ("net: Add ops_undo_single for module
load/unload."), if CONFIG_NET_NS=n, ops was linked to pernet_list
only when init_net had not been initialised, and ops was unlinked
from pernet_list only under the same condition.
Let's say an ops is loaded before the init_net setup but unloaded
after that. Then, the ops remains in pernet_list, which seems odd.
The cited commit added ops_undo_single(), which calls list_add() for
ops to link it to a temporary list, so a minor change was added to
__register_pernet_operations() and __unregister_pernet_operations()
under CONFIG_NET_NS=n to avoid the pernet_list corruption.
However, the corruption must have been left as is.
When CONFIG_NET_NS=n, pernet_list was used to keep ops registered
before the init_net setup, and after that, pernet_list was not used
at all.
This was because some ops annotated with __net_initdata are cleared
out of memory at some point during boot.
Then, such ops is initialised by POISON_FREE_INITMEM (0xcc), resulting
in that ops->list.{next,prev} suddenly switches from a valid pointer
to a weird value, 0xcccccccccccccccc.
To avoid such wild memory access, let's allow the pernet_list
corruption for CONFIG_NET_NS=n.
[0]:
Oops: general protection fault, probably for non-canonical address 0xf999959999999999: 0000 [#1] SMP KASAN NOPTI
KASAN: maybe wild-memory-access in range [0xccccccccccccccc8-0xcccccccccccccccf]
CPU: 2 UID: 0 PID: 346 Comm: modprobe Not tainted 6.15.0-rc1-00294-ga4cba7e98e35 #85 PREEMPT(voluntary)
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
RIP: 0010:__list_add_valid_or_report (lib/list_debug.c:32)
Code: 48 c1 ea 03 80 3c 02 00 0f 85 5a 01 00 00 49 39 74 24 08 0f 85 83 00 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 f2 48 c1 ea 03 <80> 3c 02 00 0f 85 1f 01 00 00 4c 39 26 0f 85 ab 00 00 00 4c 39 ee
RSP: 0018:ff11000135b87830 EFLAGS: 00010a07
RAX: dffffc0000000000 RBX: ffffffffc02223c0 RCX: ffffffff8406fcc2
RDX: 1999999999999999 RSI: cccccccccccccccc RDI: ffffffffc02223c0
RBP: ffffffff86064e40 R08: 0000000000000001 R09: fffffbfff0a9f5b5
R10: ffffffff854fadaf R11: 676552203a54454e R12: ffffffff86064e40
R13: ffffffffc02223c0 R14: ffffffff86064e48 R15: 0000000000000021
FS: 00007f6fb0d9e1c0(0000) GS:ff11000858ea0000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f6fb0eda580 CR3: 0000000122fec005 CR4: 0000000000771ef0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
<TASK>
register_pernet_operations (./include/linux/list.h:150 (discriminator 5) ./include/linux/list.h:183 (discriminator 5) net/core/net_namespace.c:1315 (discriminator 5) net/core/net_namespace.c:1359 (discriminator 5))
register_pernet_subsys (net/core/net_namespace.c:1401)
inet6_init (net/ipv6/af_inet6.c:535) ipv6
do_one_initcall (init/main.c:1257)
do_init_module (kernel/module/main.c:2942)
load_module (kernel/module/main.c:3409)
init_module_from_file (kernel/module/main.c:3599)
idempotent_init_module (kernel/module/main.c:3611)
__x64_sys_finit_module (./include/linux/file.h:62 ./include/linux/file.h:83 kernel/module/main.c:3634 kernel/module/main.c:3621 kernel/module/main.c:3621)
do_syscall_64 (arch/x86/entry/syscall_64.c:63 arch/x86/entry/syscall_64.c:94)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
RIP: 0033:0x7f6fb0df7e5d
Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 9f 1b 00 f7 d8 64 89 01 48
RSP: 002b:00007fffdc6a8968 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000055b535721b70 RCX: 00007f6fb0df7e5d
RDX: 0000000000000000 RSI: 000055b51e44aa2a RDI: 0000000000000004
RBP: 0000000000040000 R08: 0000000000000000 R09: 000055b535721b30
R10: 0000000000000004 R11: 0000000000000246 R12: 000055b51e44aa2a
R13: 000055b535721bf0 R14: 000055b5357220b0 R15: 0000000000000000
</TASK>
Modules linked in: ipv6(+) crc_ccitt
Fixes: fed176bf3143 ("net: Add ops_undo_single for module load/unload.")
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202504181511.1c3f23e4-lkp@intel.com
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250418215025.87871-1-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-04-18 14:50:20 -07:00
|
|
|
}
|
2016-08-10 14:36:00 -07:00
|
|
|
|
2012-04-16 04:43:15 +00:00
|
|
|
return ops_init(ops, &init_net);
|
2007-11-13 03:23:21 -08:00
|
|
|
}
|
|
|
|
|
|
2009-11-29 22:25:28 +00:00
|
|
|
static void __unregister_pernet_operations(struct pernet_operations *ops)
|
2007-11-13 03:23:21 -08:00
|
|
|
{
|
net: Fix wild-memory-access in __register_pernet_operations() when CONFIG_NET_NS=n.
kernel test robot reported the splat below. [0]
Before commit fed176bf3143 ("net: Add ops_undo_single for module
load/unload."), if CONFIG_NET_NS=n, ops was linked to pernet_list
only when init_net had not been initialised, and ops was unlinked
from pernet_list only under the same condition.
Let's say an ops is loaded before the init_net setup but unloaded
after that. Then, the ops remains in pernet_list, which seems odd.
The cited commit added ops_undo_single(), which calls list_add() for
ops to link it to a temporary list, so a minor change was added to
__register_pernet_operations() and __unregister_pernet_operations()
under CONFIG_NET_NS=n to avoid the pernet_list corruption.
However, the corruption must have been left as is.
When CONFIG_NET_NS=n, pernet_list was used to keep ops registered
before the init_net setup, and after that, pernet_list was not used
at all.
This was because some ops annotated with __net_initdata are cleared
out of memory at some point during boot.
Then, such ops is initialised by POISON_FREE_INITMEM (0xcc), resulting
in that ops->list.{next,prev} suddenly switches from a valid pointer
to a weird value, 0xcccccccccccccccc.
To avoid such wild memory access, let's allow the pernet_list
corruption for CONFIG_NET_NS=n.
[0]:
Oops: general protection fault, probably for non-canonical address 0xf999959999999999: 0000 [#1] SMP KASAN NOPTI
KASAN: maybe wild-memory-access in range [0xccccccccccccccc8-0xcccccccccccccccf]
CPU: 2 UID: 0 PID: 346 Comm: modprobe Not tainted 6.15.0-rc1-00294-ga4cba7e98e35 #85 PREEMPT(voluntary)
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
RIP: 0010:__list_add_valid_or_report (lib/list_debug.c:32)
Code: 48 c1 ea 03 80 3c 02 00 0f 85 5a 01 00 00 49 39 74 24 08 0f 85 83 00 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 f2 48 c1 ea 03 <80> 3c 02 00 0f 85 1f 01 00 00 4c 39 26 0f 85 ab 00 00 00 4c 39 ee
RSP: 0018:ff11000135b87830 EFLAGS: 00010a07
RAX: dffffc0000000000 RBX: ffffffffc02223c0 RCX: ffffffff8406fcc2
RDX: 1999999999999999 RSI: cccccccccccccccc RDI: ffffffffc02223c0
RBP: ffffffff86064e40 R08: 0000000000000001 R09: fffffbfff0a9f5b5
R10: ffffffff854fadaf R11: 676552203a54454e R12: ffffffff86064e40
R13: ffffffffc02223c0 R14: ffffffff86064e48 R15: 0000000000000021
FS: 00007f6fb0d9e1c0(0000) GS:ff11000858ea0000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f6fb0eda580 CR3: 0000000122fec005 CR4: 0000000000771ef0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
<TASK>
register_pernet_operations (./include/linux/list.h:150 (discriminator 5) ./include/linux/list.h:183 (discriminator 5) net/core/net_namespace.c:1315 (discriminator 5) net/core/net_namespace.c:1359 (discriminator 5))
register_pernet_subsys (net/core/net_namespace.c:1401)
inet6_init (net/ipv6/af_inet6.c:535) ipv6
do_one_initcall (init/main.c:1257)
do_init_module (kernel/module/main.c:2942)
load_module (kernel/module/main.c:3409)
init_module_from_file (kernel/module/main.c:3599)
idempotent_init_module (kernel/module/main.c:3611)
__x64_sys_finit_module (./include/linux/file.h:62 ./include/linux/file.h:83 kernel/module/main.c:3634 kernel/module/main.c:3621 kernel/module/main.c:3621)
do_syscall_64 (arch/x86/entry/syscall_64.c:63 arch/x86/entry/syscall_64.c:94)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
RIP: 0033:0x7f6fb0df7e5d
Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 9f 1b 00 f7 d8 64 89 01 48
RSP: 002b:00007fffdc6a8968 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000055b535721b70 RCX: 00007f6fb0df7e5d
RDX: 0000000000000000 RSI: 000055b51e44aa2a RDI: 0000000000000004
RBP: 0000000000040000 R08: 0000000000000000 R09: 000055b535721b30
R10: 0000000000000004 R11: 0000000000000246 R12: 000055b51e44aa2a
R13: 000055b535721bf0 R14: 000055b5357220b0 R15: 0000000000000000
</TASK>
Modules linked in: ipv6(+) crc_ccitt
Fixes: fed176bf3143 ("net: Add ops_undo_single for module load/unload.")
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202504181511.1c3f23e4-lkp@intel.com
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250418215025.87871-1-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-04-18 14:50:20 -07:00
|
|
|
if (!init_net_initialized) {
|
|
|
|
|
list_del(&ops->list);
|
|
|
|
|
} else {
|
2016-08-10 14:36:00 -07:00
|
|
|
LIST_HEAD(net_exit_list);
|
2025-04-11 13:52:31 -07:00
|
|
|
|
2016-08-10 14:36:00 -07:00
|
|
|
list_add(&init_net.exit_list, &net_exit_list);
|
2025-04-11 13:52:31 -07:00
|
|
|
ops_undo_single(ops, &net_exit_list);
|
2016-08-10 14:36:00 -07:00
|
|
|
}
|
2007-11-13 03:23:21 -08:00
|
|
|
}
|
2009-11-29 22:25:28 +00:00
|
|
|
|
|
|
|
|
#endif /* CONFIG_NET_NS */
|
2007-11-13 03:23:21 -08:00
|
|
|
|
2008-04-15 00:35:23 -07:00
|
|
|
static DEFINE_IDA(net_generic_ids);
|
|
|
|
|
|
2009-11-29 22:25:28 +00:00
|
|
|
static int register_pernet_operations(struct list_head *list,
|
|
|
|
|
struct pernet_operations *ops)
|
|
|
|
|
{
|
|
|
|
|
int error;
|
|
|
|
|
|
2024-07-31 13:07:17 -07:00
|
|
|
if (WARN_ON(!!ops->id ^ !!ops->size))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2009-11-29 22:25:28 +00:00
|
|
|
if (ops->id) {
|
2018-06-17 05:37:08 -04:00
|
|
|
error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (error < 0)
|
2009-11-29 22:25:28 +00:00
|
|
|
return error;
|
2018-06-17 05:37:08 -04:00
|
|
|
*ops->id = error;
|
2024-05-02 10:20:06 -03:00
|
|
|
/* This does not require READ_ONCE as writers already hold
|
|
|
|
|
* pernet_ops_rwsem. But WRITE_ONCE is needed to protect
|
|
|
|
|
* net_alloc_generic.
|
|
|
|
|
*/
|
|
|
|
|
WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
|
2009-11-29 22:25:28 +00:00
|
|
|
}
|
|
|
|
|
error = __register_pernet_operations(list, ops);
|
2009-12-03 02:29:06 +00:00
|
|
|
if (error) {
|
|
|
|
|
rcu_barrier();
|
|
|
|
|
if (ops->id)
|
2018-06-17 05:37:08 -04:00
|
|
|
ida_free(&net_generic_ids, *ops->id);
|
2009-12-03 02:29:06 +00:00
|
|
|
}
|
2009-11-29 22:25:28 +00:00
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void unregister_pernet_operations(struct pernet_operations *ops)
|
|
|
|
|
{
|
|
|
|
|
__unregister_pernet_operations(ops);
|
2009-12-03 02:29:06 +00:00
|
|
|
rcu_barrier();
|
2009-11-29 22:25:28 +00:00
|
|
|
if (ops->id)
|
2018-06-17 05:37:08 -04:00
|
|
|
ida_free(&net_generic_ids, *ops->id);
|
2009-11-29 22:25:28 +00:00
|
|
|
}
|
|
|
|
|
|
2007-09-12 11:50:50 +02:00
|
|
|
/**
|
|
|
|
|
* register_pernet_subsys - register a network namespace subsystem
|
|
|
|
|
* @ops: pernet operations structure for the subsystem
|
|
|
|
|
*
|
|
|
|
|
* Register a subsystem which has init and exit functions
|
|
|
|
|
* that are called when network namespaces are created and
|
|
|
|
|
* destroyed respectively.
|
|
|
|
|
*
|
|
|
|
|
* When registered all network namespace init functions are
|
|
|
|
|
* called for every existing network namespace. Allowing kernel
|
|
|
|
|
* modules to have a race free view of the set of network namespaces.
|
|
|
|
|
*
|
|
|
|
|
* When a new network namespace is created all of the init
|
|
|
|
|
* methods are called in the order in which they were registered.
|
|
|
|
|
*
|
|
|
|
|
* When a network namespace is destroyed all of the exit methods
|
|
|
|
|
* are called in the reverse of the order with which they were
|
|
|
|
|
* registered.
|
|
|
|
|
*/
|
|
|
|
|
int register_pernet_subsys(struct pernet_operations *ops)
|
|
|
|
|
{
|
|
|
|
|
int error;
|
2018-03-27 18:02:23 +03:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2007-09-12 11:50:50 +02:00
|
|
|
error = register_pernet_operations(first_device, ops);
|
2018-03-27 18:02:23 +03:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 11:50:50 +02:00
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(register_pernet_subsys);
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* unregister_pernet_subsys - unregister a network namespace subsystem
|
|
|
|
|
* @ops: pernet operations structure to manipulate
|
|
|
|
|
*
|
|
|
|
|
* Remove the pernet operations structure from the list to be
|
2008-02-03 17:56:48 +02:00
|
|
|
* used when network namespaces are created or destroyed. In
|
2007-09-12 11:50:50 +02:00
|
|
|
* addition run the exit method for all existing network
|
|
|
|
|
* namespaces.
|
|
|
|
|
*/
|
2010-04-25 00:49:56 -07:00
|
|
|
void unregister_pernet_subsys(struct pernet_operations *ops)
|
2007-09-12 11:50:50 +02:00
|
|
|
{
|
2018-03-27 18:02:23 +03:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2010-04-25 00:49:56 -07:00
|
|
|
unregister_pernet_operations(ops);
|
2018-03-27 18:02:23 +03:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 11:50:50 +02:00
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* register_pernet_device - register a network namespace device
|
|
|
|
|
* @ops: pernet operations structure for the subsystem
|
|
|
|
|
*
|
|
|
|
|
* Register a device which has init and exit functions
|
|
|
|
|
* that are called when network namespaces are created and
|
|
|
|
|
* destroyed respectively.
|
|
|
|
|
*
|
|
|
|
|
* When registered all network namespace init functions are
|
|
|
|
|
* called for every existing network namespace. Allowing kernel
|
|
|
|
|
* modules to have a race free view of the set of network namespaces.
|
|
|
|
|
*
|
|
|
|
|
* When a new network namespace is created all of the init
|
|
|
|
|
* methods are called in the order in which they were registered.
|
|
|
|
|
*
|
|
|
|
|
* When a network namespace is destroyed all of the exit methods
|
|
|
|
|
* are called in the reverse of the order with which they were
|
|
|
|
|
* registered.
|
|
|
|
|
*/
|
|
|
|
|
int register_pernet_device(struct pernet_operations *ops)
|
|
|
|
|
{
|
|
|
|
|
int error;
|
2018-03-27 18:02:23 +03:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2007-09-12 11:50:50 +02:00
|
|
|
error = register_pernet_operations(&pernet_list, ops);
|
|
|
|
|
if (!error && (first_device == &pernet_list))
|
|
|
|
|
first_device = &ops->list;
|
2018-03-27 18:02:23 +03:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 11:50:50 +02:00
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(register_pernet_device);
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* unregister_pernet_device - unregister a network namespace netdevice
|
|
|
|
|
* @ops: pernet operations structure to manipulate
|
|
|
|
|
*
|
|
|
|
|
* Remove the pernet operations structure from the list to be
|
2008-02-03 17:56:48 +02:00
|
|
|
* used when network namespaces are created or destroyed. In
|
2007-09-12 11:50:50 +02:00
|
|
|
* addition run the exit method for all existing network
|
|
|
|
|
* namespaces.
|
|
|
|
|
*/
|
|
|
|
|
void unregister_pernet_device(struct pernet_operations *ops)
|
|
|
|
|
{
|
2018-03-27 18:02:23 +03:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2007-09-12 11:50:50 +02:00
|
|
|
if (&ops->list == first_device)
|
|
|
|
|
first_device = first_device->next;
|
|
|
|
|
unregister_pernet_operations(ops);
|
2018-03-27 18:02:23 +03:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2007-09-12 11:50:50 +02:00
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(unregister_pernet_device);
|
2010-03-07 18:14:23 -08:00
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_NS
|
2014-11-01 00:37:32 -04:00
|
|
|
static struct ns_common *netns_get(struct task_struct *task)
|
2010-03-07 18:14:23 -08:00
|
|
|
{
|
2011-05-04 17:51:50 -07:00
|
|
|
struct net *net = NULL;
|
|
|
|
|
struct nsproxy *nsproxy;
|
|
|
|
|
|
2014-02-03 19:13:49 -08:00
|
|
|
task_lock(task);
|
|
|
|
|
nsproxy = task->nsproxy;
|
2011-05-04 17:51:50 -07:00
|
|
|
if (nsproxy)
|
|
|
|
|
net = get_net(nsproxy->net_ns);
|
2014-02-03 19:13:49 -08:00
|
|
|
task_unlock(task);
|
2011-05-04 17:51:50 -07:00
|
|
|
|
2014-11-01 00:10:50 -04:00
|
|
|
return net ? &net->ns : NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-01 00:37:32 -04:00
|
|
|
static void netns_put(struct ns_common *ns)
|
2010-03-07 18:14:23 -08:00
|
|
|
{
|
2014-11-01 00:10:50 -04:00
|
|
|
put_net(to_net_ns(ns));
|
2010-03-07 18:14:23 -08:00
|
|
|
}
|
|
|
|
|
|
2020-05-05 16:04:30 +02:00
|
|
|
static int netns_install(struct nsset *nsset, struct ns_common *ns)
|
2010-03-07 18:14:23 -08:00
|
|
|
{
|
2020-05-05 16:04:30 +02:00
|
|
|
struct nsproxy *nsproxy = nsset->nsproxy;
|
2014-11-01 00:10:50 -04:00
|
|
|
struct net *net = to_net_ns(ns);
|
2012-07-26 01:13:20 -07:00
|
|
|
|
2012-12-14 07:55:36 -08:00
|
|
|
if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
|
2020-05-05 16:04:30 +02:00
|
|
|
!ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
|
2012-07-26 01:13:20 -07:00
|
|
|
return -EPERM;
|
|
|
|
|
|
2010-03-07 18:14:23 -08:00
|
|
|
put_net(nsproxy->net_ns);
|
2012-07-26 01:13:20 -07:00
|
|
|
nsproxy->net_ns = get_net(net);
|
2010-03-07 18:14:23 -08:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-06 00:47:13 -07:00
|
|
|
static struct user_namespace *netns_owner(struct ns_common *ns)
|
|
|
|
|
{
|
|
|
|
|
return to_net_ns(ns)->user_ns;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-07 18:14:23 -08:00
|
|
|
const struct proc_ns_operations netns_operations = {
|
|
|
|
|
.name = "net",
|
|
|
|
|
.get = netns_get,
|
|
|
|
|
.put = netns_put,
|
|
|
|
|
.install = netns_install,
|
2016-09-06 00:47:13 -07:00
|
|
|
.owner = netns_owner,
|
2010-03-07 18:14:23 -08:00
|
|
|
};
|
|
|
|
|
#endif
|