2019-05-27 13:55:01 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
|
|
|
|
* Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
|
|
|
|
* Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
2006-01-12 03:17:47 +07:00
|
|
|
#include <linux/capability.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/in.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/kernel.h>
|
2017-02-09 00:51:30 +07:00
|
|
|
#include <linux/sched/signal.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/sockios.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <net/ax25.h>
|
|
|
|
#include <linux/inet.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/skbuff.h>
|
2007-09-12 17:01:34 +07:00
|
|
|
#include <net/net_namespace.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <net/sock.h>
|
2014-10-18 03:00:22 +07:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <net/netrom.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <net/ip.h>
|
2005-08-10 10:08:28 +07:00
|
|
|
#include <net/tcp_states.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <net/arp.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
|
|
|
static int nr_ndevs = 4;
|
|
|
|
|
|
|
|
int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL;
|
|
|
|
int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS;
|
|
|
|
int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL;
|
|
|
|
int sysctl_netrom_transport_timeout = NR_DEFAULT_T1;
|
|
|
|
int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2;
|
|
|
|
int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2;
|
|
|
|
int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4;
|
|
|
|
int sysctl_netrom_transport_requested_window_size = NR_DEFAULT_WINDOW;
|
|
|
|
int sysctl_netrom_transport_no_activity_timeout = NR_DEFAULT_IDLE;
|
|
|
|
int sysctl_netrom_routing_control = NR_DEFAULT_ROUTING;
|
|
|
|
int sysctl_netrom_link_fails_count = NR_DEFAULT_FAILS;
|
2005-09-13 04:27:37 +07:00
|
|
|
int sysctl_netrom_reset_circuit = NR_DEFAULT_RESET;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
static unsigned short circuit = 0x101;
|
|
|
|
|
|
|
|
static HLIST_HEAD(nr_list);
|
|
|
|
static DEFINE_SPINLOCK(nr_list_lock);
|
|
|
|
|
2005-12-23 03:49:22 +07:00
|
|
|
static const struct proto_ops nr_proto_ops;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Socket removal during an interrupt is now safe.
|
|
|
|
*/
|
|
|
|
static void nr_remove_socket(struct sock *sk)
|
|
|
|
{
|
|
|
|
spin_lock_bh(&nr_list_lock);
|
|
|
|
sk_del_node_init(sk);
|
|
|
|
spin_unlock_bh(&nr_list_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kill all bound sockets on a dropped device.
|
|
|
|
*/
|
|
|
|
static void nr_kill_by_device(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct sock *s;
|
|
|
|
|
|
|
|
spin_lock_bh(&nr_list_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
sk_for_each(s, &nr_list)
|
2005-04-17 05:20:36 +07:00
|
|
|
if (nr_sk(s)->device == dev)
|
|
|
|
nr_disconnect(s, ENETUNREACH);
|
|
|
|
spin_unlock_bh(&nr_list_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle device status changes.
|
|
|
|
*/
|
|
|
|
static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
|
|
|
|
{
|
2013-05-28 08:30:21 +07:00
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-20 12:34:43 +07:00
|
|
|
if (!net_eq(dev_net(dev), &init_net))
|
2007-09-12 18:02:17 +07:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (event != NETDEV_DOWN)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
nr_kill_by_device(dev);
|
|
|
|
nr_rt_device_down(dev);
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a socket to the bound sockets list.
|
|
|
|
*/
|
|
|
|
static void nr_insert_socket(struct sock *sk)
|
|
|
|
{
|
|
|
|
spin_lock_bh(&nr_list_lock);
|
|
|
|
sk_add_node(sk, &nr_list);
|
|
|
|
spin_unlock_bh(&nr_list_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a socket that wants to accept the Connect Request we just
|
|
|
|
* received.
|
|
|
|
*/
|
|
|
|
static struct sock *nr_find_listener(ax25_address *addr)
|
|
|
|
{
|
|
|
|
struct sock *s;
|
|
|
|
|
|
|
|
spin_lock_bh(&nr_list_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
sk_for_each(s, &nr_list)
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
|
|
|
|
s->sk_state == TCP_LISTEN) {
|
2018-12-30 04:56:38 +07:00
|
|
|
sock_hold(s);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
s = NULL;
|
|
|
|
found:
|
|
|
|
spin_unlock_bh(&nr_list_lock);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a connected NET/ROM socket given my circuit IDs.
|
|
|
|
*/
|
|
|
|
static struct sock *nr_find_socket(unsigned char index, unsigned char id)
|
|
|
|
{
|
|
|
|
struct sock *s;
|
|
|
|
|
|
|
|
spin_lock_bh(&nr_list_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
sk_for_each(s, &nr_list) {
|
2005-04-17 05:20:36 +07:00
|
|
|
struct nr_sock *nr = nr_sk(s);
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (nr->my_index == index && nr->my_id == id) {
|
2018-12-30 04:56:38 +07:00
|
|
|
sock_hold(s);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s = NULL;
|
|
|
|
found:
|
|
|
|
spin_unlock_bh(&nr_list_lock);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a connected NET/ROM socket given their circuit IDs.
|
|
|
|
*/
|
|
|
|
static struct sock *nr_find_peer(unsigned char index, unsigned char id,
|
|
|
|
ax25_address *dest)
|
|
|
|
{
|
|
|
|
struct sock *s;
|
|
|
|
|
|
|
|
spin_lock_bh(&nr_list_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
sk_for_each(s, &nr_list) {
|
2005-04-17 05:20:36 +07:00
|
|
|
struct nr_sock *nr = nr_sk(s);
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (nr->your_index == index && nr->your_id == id &&
|
|
|
|
!ax25cmp(&nr->dest_addr, dest)) {
|
2018-12-30 04:56:38 +07:00
|
|
|
sock_hold(s);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s = NULL;
|
|
|
|
found:
|
|
|
|
spin_unlock_bh(&nr_list_lock);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find next free circuit ID.
|
|
|
|
*/
|
|
|
|
static unsigned short nr_find_next_circuit(void)
|
|
|
|
{
|
|
|
|
unsigned short id = circuit;
|
|
|
|
unsigned char i, j;
|
|
|
|
struct sock *sk;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
i = id / 256;
|
|
|
|
j = id % 256;
|
|
|
|
|
|
|
|
if (i != 0 && j != 0) {
|
|
|
|
if ((sk=nr_find_socket(i, j)) == NULL)
|
|
|
|
break;
|
2018-12-30 04:56:38 +07:00
|
|
|
sock_put(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
id++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Deferred destroy.
|
|
|
|
*/
|
|
|
|
void nr_destroy_socket(struct sock *);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handler for deferred kills.
|
|
|
|
*/
|
2017-10-17 07:29:36 +07:00
|
|
|
static void nr_destroy_timer(struct timer_list *t)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2017-10-17 07:29:36 +07:00
|
|
|
struct sock *sk = from_timer(sk, t, sk_timer);
|
2005-04-17 05:20:36 +07:00
|
|
|
bh_lock_sock(sk);
|
|
|
|
sock_hold(sk);
|
|
|
|
nr_destroy_socket(sk);
|
|
|
|
bh_unlock_sock(sk);
|
|
|
|
sock_put(sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called from user mode and the timers. Thus it protects itself
|
|
|
|
* against interrupt users but doesn't worry about being called during
|
|
|
|
* work. Once it is removed from the queue no interrupt or bottom half
|
|
|
|
* will touch it and we are (fairly 8-) ) safe.
|
|
|
|
*/
|
|
|
|
void nr_destroy_socket(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
nr_remove_socket(sk);
|
|
|
|
|
|
|
|
nr_stop_heartbeat(sk);
|
|
|
|
nr_stop_t1timer(sk);
|
|
|
|
nr_stop_t2timer(sk);
|
|
|
|
nr_stop_t4timer(sk);
|
|
|
|
nr_stop_idletimer(sk);
|
|
|
|
|
|
|
|
nr_clear_queues(sk); /* Flush the queues */
|
|
|
|
|
|
|
|
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
|
|
|
if (skb->sk != sk) { /* A pending connection */
|
|
|
|
/* Queue the unaccepted socket for death */
|
|
|
|
sock_set_flag(skb->sk, SOCK_DEAD);
|
|
|
|
nr_start_heartbeat(skb->sk);
|
|
|
|
nr_sk(skb->sk)->state = NR_STATE_0;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
2009-06-16 17:12:03 +07:00
|
|
|
if (sk_has_allocations(sk)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Defer: outstanding buffers */
|
2017-10-23 14:40:42 +07:00
|
|
|
sk->sk_timer.function = nr_destroy_timer;
|
2005-04-17 05:20:36 +07:00
|
|
|
sk->sk_timer.expires = jiffies + 2 * HZ;
|
|
|
|
add_timer(&sk->sk_timer);
|
|
|
|
} else
|
|
|
|
sock_put(sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handling for system calls applied via the various interfaces to a
|
|
|
|
* NET/ROM socket object.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int nr_setsockopt(struct socket *sock, int level, int optname,
|
2009-10-01 06:12:20 +07:00
|
|
|
char __user *optval, unsigned int optlen)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct nr_sock *nr = nr_sk(sk);
|
2011-12-27 16:44:53 +07:00
|
|
|
unsigned long opt;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (level != SOL_NETROM)
|
|
|
|
return -ENOPROTOOPT;
|
|
|
|
|
2011-12-27 16:44:53 +07:00
|
|
|
if (optlen < sizeof(unsigned int))
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-12-27 16:44:53 +07:00
|
|
|
if (get_user(opt, (unsigned int __user *)optval))
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
switch (optname) {
|
|
|
|
case NETROM_T1:
|
2011-12-27 16:44:53 +07:00
|
|
|
if (opt < 1 || opt > ULONG_MAX / HZ)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
nr->t1 = opt * HZ;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case NETROM_T2:
|
2011-12-27 16:44:53 +07:00
|
|
|
if (opt < 1 || opt > ULONG_MAX / HZ)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
nr->t2 = opt * HZ;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case NETROM_N2:
|
|
|
|
if (opt < 1 || opt > 31)
|
|
|
|
return -EINVAL;
|
|
|
|
nr->n2 = opt;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case NETROM_T4:
|
2011-12-27 16:44:53 +07:00
|
|
|
if (opt < 1 || opt > ULONG_MAX / HZ)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
nr->t4 = opt * HZ;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case NETROM_IDLE:
|
2011-12-27 16:44:53 +07:00
|
|
|
if (opt > ULONG_MAX / (60 * HZ))
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
nr->idle = opt * 60 * HZ;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -ENOPROTOOPT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nr_getsockopt(struct socket *sock, int level, int optname,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct nr_sock *nr = nr_sk(sk);
|
|
|
|
int val = 0;
|
2007-02-09 21:25:09 +07:00
|
|
|
int len;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (level != SOL_NETROM)
|
|
|
|
return -ENOPROTOOPT;
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (get_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (len < 0)
|
|
|
|
return -EINVAL;
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
switch (optname) {
|
|
|
|
case NETROM_T1:
|
|
|
|
val = nr->t1 / HZ;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NETROM_T2:
|
|
|
|
val = nr->t2 / HZ;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NETROM_N2:
|
|
|
|
val = nr->n2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NETROM_T4:
|
|
|
|
val = nr->t4 / HZ;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NETROM_IDLE:
|
|
|
|
val = nr->idle / (60 * HZ);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -ENOPROTOOPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = min_t(unsigned int, len, sizeof(int));
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nr_listen(struct socket *sock, int backlog)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
if (sk->sk_state != TCP_LISTEN) {
|
|
|
|
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
|
|
|
|
sk->sk_max_ack_backlog = backlog;
|
|
|
|
sk->sk_state = TCP_LISTEN;
|
|
|
|
release_sock(sk);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct proto nr_proto = {
|
|
|
|
.name = "NETROM",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.obj_size = sizeof(struct nr_sock),
|
|
|
|
};
|
|
|
|
|
2009-11-06 13:18:14 +07:00
|
|
|
static int nr_create(struct net *net, struct socket *sock, int protocol,
|
|
|
|
int kern)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct sock *sk;
|
|
|
|
struct nr_sock *nr;
|
|
|
|
|
2009-11-26 06:14:13 +07:00
|
|
|
if (!net_eq(net, &init_net))
|
2007-10-09 13:24:22 +07:00
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (sock->type != SOCK_SEQPACKET || protocol != 0)
|
|
|
|
return -ESOCKTNOSUPPORT;
|
|
|
|
|
2015-05-09 09:09:13 +07:00
|
|
|
sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, kern);
|
2007-11-01 14:39:31 +07:00
|
|
|
if (sk == NULL)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nr = nr_sk(sk);
|
|
|
|
|
|
|
|
sock_init_data(sock, sk);
|
|
|
|
|
|
|
|
sock->ops = &nr_proto_ops;
|
|
|
|
sk->sk_protocol = protocol;
|
|
|
|
|
|
|
|
skb_queue_head_init(&nr->ack_queue);
|
|
|
|
skb_queue_head_init(&nr->reseq_queue);
|
|
|
|
skb_queue_head_init(&nr->frag_queue);
|
|
|
|
|
|
|
|
nr_init_timers(sk);
|
|
|
|
|
2006-05-04 13:27:47 +07:00
|
|
|
nr->t1 =
|
|
|
|
msecs_to_jiffies(sysctl_netrom_transport_timeout);
|
|
|
|
nr->t2 =
|
|
|
|
msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
|
|
|
|
nr->n2 =
|
|
|
|
msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
|
|
|
|
nr->t4 =
|
|
|
|
msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
|
|
|
|
nr->idle =
|
|
|
|
msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
|
2005-04-17 05:20:36 +07:00
|
|
|
nr->window = sysctl_netrom_transport_requested_window_size;
|
|
|
|
|
|
|
|
nr->bpqext = 1;
|
|
|
|
nr->state = NR_STATE_0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sock *nr_make_new(struct sock *osk)
|
|
|
|
{
|
|
|
|
struct sock *sk;
|
|
|
|
struct nr_sock *nr, *onr;
|
|
|
|
|
|
|
|
if (osk->sk_type != SOCK_SEQPACKET)
|
|
|
|
return NULL;
|
|
|
|
|
2015-05-09 09:09:13 +07:00
|
|
|
sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot, 0);
|
2007-11-01 14:39:31 +07:00
|
|
|
if (sk == NULL)
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
nr = nr_sk(sk);
|
|
|
|
|
|
|
|
sock_init_data(NULL, sk);
|
|
|
|
|
|
|
|
sk->sk_type = osk->sk_type;
|
|
|
|
sk->sk_priority = osk->sk_priority;
|
|
|
|
sk->sk_protocol = osk->sk_protocol;
|
|
|
|
sk->sk_rcvbuf = osk->sk_rcvbuf;
|
|
|
|
sk->sk_sndbuf = osk->sk_sndbuf;
|
|
|
|
sk->sk_state = TCP_ESTABLISHED;
|
2005-08-24 00:11:30 +07:00
|
|
|
sock_copy_flags(sk, osk);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
skb_queue_head_init(&nr->ack_queue);
|
|
|
|
skb_queue_head_init(&nr->reseq_queue);
|
|
|
|
skb_queue_head_init(&nr->frag_queue);
|
|
|
|
|
|
|
|
nr_init_timers(sk);
|
|
|
|
|
|
|
|
onr = nr_sk(osk);
|
|
|
|
|
|
|
|
nr->t1 = onr->t1;
|
|
|
|
nr->t2 = onr->t2;
|
|
|
|
nr->n2 = onr->n2;
|
|
|
|
nr->t4 = onr->t4;
|
|
|
|
nr->idle = onr->idle;
|
|
|
|
nr->window = onr->window;
|
|
|
|
|
|
|
|
nr->device = onr->device;
|
|
|
|
nr->bpqext = onr->bpqext;
|
|
|
|
|
|
|
|
return sk;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nr_release(struct socket *sock)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct nr_sock *nr;
|
|
|
|
|
|
|
|
if (sk == NULL) return 0;
|
|
|
|
|
|
|
|
sock_hold(sk);
|
2008-10-07 02:54:57 +07:00
|
|
|
sock_orphan(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
lock_sock(sk);
|
|
|
|
nr = nr_sk(sk);
|
|
|
|
|
|
|
|
switch (nr->state) {
|
|
|
|
case NR_STATE_0:
|
|
|
|
case NR_STATE_1:
|
|
|
|
case NR_STATE_2:
|
|
|
|
nr_disconnect(sk, 0);
|
|
|
|
nr_destroy_socket(sk);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NR_STATE_3:
|
|
|
|
nr_clear_queues(sk);
|
|
|
|
nr->n2count = 0;
|
|
|
|
nr_write_internal(sk, NR_DISCREQ);
|
|
|
|
nr_start_t1timer(sk);
|
|
|
|
nr_stop_t2timer(sk);
|
|
|
|
nr_stop_t4timer(sk);
|
|
|
|
nr_stop_idletimer(sk);
|
|
|
|
nr->state = NR_STATE_2;
|
|
|
|
sk->sk_state = TCP_CLOSE;
|
|
|
|
sk->sk_shutdown |= SEND_SHUTDOWN;
|
|
|
|
sk->sk_state_change(sk);
|
|
|
|
sock_set_flag(sk, SOCK_DESTROY);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-02-09 21:25:09 +07:00
|
|
|
sock->sk = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
release_sock(sk);
|
|
|
|
sock_put(sk);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct nr_sock *nr = nr_sk(sk);
|
|
|
|
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
|
|
|
|
struct net_device *dev;
|
2005-08-24 00:11:45 +07:00
|
|
|
ax25_uid_assoc *user;
|
|
|
|
ax25_address *source;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
if (!sock_flag(sk, SOCK_ZAPPED)) {
|
|
|
|
release_sock(sk);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) {
|
|
|
|
release_sock(sk);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) {
|
|
|
|
release_sock(sk);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (addr->fsa_ax25.sax25_family != AF_NETROM) {
|
|
|
|
release_sock(sk);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
|
|
|
|
release_sock(sk);
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only the super user can set an arbitrary user callsign.
|
|
|
|
*/
|
|
|
|
if (addr->fsa_ax25.sax25_ndigis == 1) {
|
|
|
|
if (!capable(CAP_NET_BIND_SERVICE)) {
|
|
|
|
dev_put(dev);
|
|
|
|
release_sock(sk);
|
2012-09-21 05:37:25 +07:00
|
|
|
return -EPERM;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
nr->user_addr = addr->fsa_digipeater[0];
|
|
|
|
nr->source_addr = addr->fsa_ax25.sax25_call;
|
|
|
|
} else {
|
|
|
|
source = &addr->fsa_ax25.sax25_call;
|
|
|
|
|
2008-11-14 06:39:08 +07:00
|
|
|
user = ax25_findbyuid(current_euid());
|
2005-08-24 00:11:45 +07:00
|
|
|
if (user) {
|
|
|
|
nr->user_addr = user->call;
|
|
|
|
ax25_uid_put(user);
|
|
|
|
} else {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
|
|
|
|
release_sock(sk);
|
|
|
|
dev_put(dev);
|
|
|
|
return -EPERM;
|
|
|
|
}
|
2005-08-24 00:11:45 +07:00
|
|
|
nr->user_addr = *source;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
nr->source_addr = *source;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr->device = dev;
|
|
|
|
nr_insert_socket(sk);
|
|
|
|
|
|
|
|
sock_reset_flag(sk, SOCK_ZAPPED);
|
|
|
|
dev_put(dev);
|
|
|
|
release_sock(sk);
|
2011-04-14 14:20:07 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
|
|
|
|
int addr_len, int flags)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct nr_sock *nr = nr_sk(sk);
|
|
|
|
struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr;
|
2005-08-24 00:11:45 +07:00
|
|
|
ax25_address *source = NULL;
|
|
|
|
ax25_uid_assoc *user;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net_device *dev;
|
2007-04-21 06:06:45 +07:00
|
|
|
int err = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
|
|
|
|
sock->state = SS_CONNECTED;
|
2007-04-21 06:06:45 +07:00
|
|
|
goto out_release; /* Connect completed during a ERESTARTSYS event */
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
|
|
|
|
sock->state = SS_UNCONNECTED;
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -ECONNREFUSED;
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sk->sk_state == TCP_ESTABLISHED) {
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -EISCONN; /* No reconnect on a seqpacket socket */
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-02-09 21:25:09 +07:00
|
|
|
sk->sk_state = TCP_CLOSE;
|
2005-04-17 05:20:36 +07:00
|
|
|
sock->state = SS_UNCONNECTED;
|
|
|
|
|
|
|
|
if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) {
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
if (addr->sax25_family != AF_NETROM) {
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
|
|
|
|
sock_reset_flag(sk, SOCK_ZAPPED);
|
|
|
|
|
|
|
|
if ((dev = nr_dev_first()) == NULL) {
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -ENETUNREACH;
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
source = (ax25_address *)dev->dev_addr;
|
|
|
|
|
2008-11-14 06:39:08 +07:00
|
|
|
user = ax25_findbyuid(current_euid());
|
2005-08-24 00:11:45 +07:00
|
|
|
if (user) {
|
|
|
|
nr->user_addr = user->call;
|
|
|
|
ax25_uid_put(user);
|
|
|
|
} else {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) {
|
|
|
|
dev_put(dev);
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -EPERM;
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2005-08-24 00:11:45 +07:00
|
|
|
nr->user_addr = *source;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
nr->source_addr = *source;
|
|
|
|
nr->device = dev;
|
|
|
|
|
|
|
|
dev_put(dev);
|
|
|
|
nr_insert_socket(sk); /* Finish the bind */
|
|
|
|
}
|
|
|
|
|
|
|
|
nr->dest_addr = addr->sax25_call;
|
|
|
|
|
|
|
|
release_sock(sk);
|
|
|
|
circuit = nr_find_next_circuit();
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
nr->my_index = circuit / 256;
|
|
|
|
nr->my_id = circuit % 256;
|
|
|
|
|
|
|
|
circuit++;
|
|
|
|
|
|
|
|
/* Move to connecting socket, start sending Connect Requests */
|
|
|
|
sock->state = SS_CONNECTING;
|
|
|
|
sk->sk_state = TCP_SYN_SENT;
|
|
|
|
|
|
|
|
nr_establish_data_link(sk);
|
|
|
|
|
|
|
|
nr->state = NR_STATE_1;
|
|
|
|
|
|
|
|
nr_start_heartbeat(sk);
|
|
|
|
|
|
|
|
/* Now the loop */
|
|
|
|
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -EINPROGRESS;
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* A Connect Ack with Choke or timeout or failed routing will go to
|
|
|
|
* closed.
|
|
|
|
*/
|
|
|
|
if (sk->sk_state == TCP_SYN_SENT) {
|
2007-04-21 06:06:45 +07:00
|
|
|
DEFINE_WAIT(wait);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
for (;;) {
|
2010-04-20 20:03:51 +07:00
|
|
|
prepare_to_wait(sk_sleep(sk), &wait,
|
2007-07-19 08:44:32 +07:00
|
|
|
TASK_INTERRUPTIBLE);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (sk->sk_state != TCP_SYN_SENT)
|
|
|
|
break;
|
2007-04-21 06:06:45 +07:00
|
|
|
if (!signal_pending(current)) {
|
|
|
|
release_sock(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
schedule();
|
|
|
|
lock_sock(sk);
|
|
|
|
continue;
|
|
|
|
}
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -ERESTARTSYS;
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2010-04-20 20:03:51 +07:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2007-04-21 06:06:45 +07:00
|
|
|
if (err)
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sk->sk_state != TCP_ESTABLISHED) {
|
|
|
|
sock->state = SS_UNCONNECTED;
|
2007-04-21 06:06:45 +07:00
|
|
|
err = sock_error(sk); /* Always set at this point */
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
sock->state = SS_CONNECTED;
|
2007-04-21 06:06:45 +07:00
|
|
|
|
|
|
|
out_release:
|
2005-04-17 05:20:36 +07:00
|
|
|
release_sock(sk);
|
|
|
|
|
2007-04-21 06:06:45 +07:00
|
|
|
return err;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2017-03-09 15:09:05 +07:00
|
|
|
static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
|
|
|
|
bool kern)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct sock *newsk;
|
2007-04-21 06:06:45 +07:00
|
|
|
DEFINE_WAIT(wait);
|
2005-04-17 05:20:36 +07:00
|
|
|
struct sock *sk;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if ((sk = sock->sk) == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
if (sk->sk_type != SOCK_SEQPACKET) {
|
|
|
|
err = -EOPNOTSUPP;
|
2007-04-21 06:06:45 +07:00
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sk->sk_state != TCP_LISTEN) {
|
|
|
|
err = -EINVAL;
|
2007-04-21 06:06:45 +07:00
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The write queue this time is holding sockets ready to use
|
|
|
|
* hooked into the SABM we saved
|
|
|
|
*/
|
|
|
|
for (;;) {
|
2010-04-20 20:03:51 +07:00
|
|
|
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
2005-04-17 05:20:36 +07:00
|
|
|
skb = skb_dequeue(&sk->sk_receive_queue);
|
|
|
|
if (skb)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (flags & O_NONBLOCK) {
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -EWOULDBLOCK;
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2007-04-21 06:06:45 +07:00
|
|
|
if (!signal_pending(current)) {
|
|
|
|
release_sock(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
schedule();
|
|
|
|
lock_sock(sk);
|
|
|
|
continue;
|
|
|
|
}
|
2007-04-21 06:06:45 +07:00
|
|
|
err = -ERESTARTSYS;
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2010-04-20 20:03:51 +07:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2007-04-21 06:06:45 +07:00
|
|
|
if (err)
|
|
|
|
goto out_release;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
newsk = skb->sk;
|
2008-06-17 16:36:44 +07:00
|
|
|
sock_graft(newsk, newsock);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Now attach up the new socket */
|
|
|
|
kfree_skb(skb);
|
2006-07-04 09:31:14 +07:00
|
|
|
sk_acceptq_removed(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-04-21 06:06:45 +07:00
|
|
|
out_release:
|
2005-04-17 05:20:36 +07:00
|
|
|
release_sock(sk);
|
2007-04-21 06:06:45 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
|
2018-02-13 02:00:20 +07:00
|
|
|
int peer)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct full_sockaddr_ax25 *sax = (struct full_sockaddr_ax25 *)uaddr;
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct nr_sock *nr = nr_sk(sk);
|
2018-02-13 02:00:20 +07:00
|
|
|
int uaddr_len;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-04-23 03:22:51 +07:00
|
|
|
memset(&sax->fsa_ax25, 0, sizeof(struct sockaddr_ax25));
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
lock_sock(sk);
|
|
|
|
if (peer != 0) {
|
|
|
|
if (sk->sk_state != TCP_ESTABLISHED) {
|
|
|
|
release_sock(sk);
|
|
|
|
return -ENOTCONN;
|
|
|
|
}
|
|
|
|
sax->fsa_ax25.sax25_family = AF_NETROM;
|
|
|
|
sax->fsa_ax25.sax25_ndigis = 1;
|
|
|
|
sax->fsa_ax25.sax25_call = nr->user_addr;
|
2009-08-06 10:31:07 +07:00
|
|
|
memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
|
2005-04-17 05:20:36 +07:00
|
|
|
sax->fsa_digipeater[0] = nr->dest_addr;
|
2018-02-13 02:00:20 +07:00
|
|
|
uaddr_len = sizeof(struct full_sockaddr_ax25);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
|
|
|
sax->fsa_ax25.sax25_family = AF_NETROM;
|
|
|
|
sax->fsa_ax25.sax25_ndigis = 0;
|
|
|
|
sax->fsa_ax25.sax25_call = nr->source_addr;
|
2018-02-13 02:00:20 +07:00
|
|
|
uaddr_len = sizeof(struct sockaddr_ax25);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
release_sock(sk);
|
|
|
|
|
2018-02-13 02:00:20 +07:00
|
|
|
return uaddr_len;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct sock *sk;
|
2007-02-09 21:25:09 +07:00
|
|
|
struct sock *make;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct nr_sock *nr_make;
|
|
|
|
ax25_address *src, *dest, *user;
|
|
|
|
unsigned short circuit_index, circuit_id;
|
|
|
|
unsigned short peer_circuit_index, peer_circuit_id;
|
|
|
|
unsigned short frametype, flags, window, timeout;
|
|
|
|
int ret;
|
|
|
|
|
2019-06-28 04:30:58 +07:00
|
|
|
skb_orphan(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* skb->data points to the netrom frame start
|
|
|
|
*/
|
|
|
|
|
|
|
|
src = (ax25_address *)(skb->data + 0);
|
|
|
|
dest = (ax25_address *)(skb->data + 7);
|
|
|
|
|
|
|
|
circuit_index = skb->data[15];
|
|
|
|
circuit_id = skb->data[16];
|
|
|
|
peer_circuit_index = skb->data[17];
|
|
|
|
peer_circuit_id = skb->data[18];
|
|
|
|
frametype = skb->data[19] & 0x0F;
|
|
|
|
flags = skb->data[19] & 0xF0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for an incoming IP over NET/ROM frame.
|
|
|
|
*/
|
2005-08-25 01:35:51 +07:00
|
|
|
if (frametype == NR_PROTOEXT &&
|
|
|
|
circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) {
|
2005-04-17 05:20:36 +07:00
|
|
|
skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
|
2007-03-13 23:06:52 +07:00
|
|
|
skb_reset_transport_header(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return nr_rx_ip(skb, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an existing socket connection, based on circuit ID, if it's
|
|
|
|
* a Connect Request base it on their circuit ID.
|
|
|
|
*
|
|
|
|
* Circuit ID 0/0 is not valid but it could still be a "reset" for a
|
|
|
|
* circuit that no longer exists at the other end ...
|
|
|
|
*/
|
|
|
|
|
|
|
|
sk = NULL;
|
|
|
|
|
|
|
|
if (circuit_index == 0 && circuit_id == 0) {
|
|
|
|
if (frametype == NR_CONNACK && flags == NR_CHOKE_FLAG)
|
|
|
|
sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src);
|
|
|
|
} else {
|
|
|
|
if (frametype == NR_CONNREQ)
|
|
|
|
sk = nr_find_peer(circuit_index, circuit_id, src);
|
|
|
|
else
|
|
|
|
sk = nr_find_socket(circuit_index, circuit_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sk != NULL) {
|
2018-12-30 04:56:38 +07:00
|
|
|
bh_lock_sock(sk);
|
2007-03-13 23:06:52 +07:00
|
|
|
skb_reset_transport_header(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (frametype == NR_CONNACK && skb->len == 22)
|
|
|
|
nr_sk(sk)->bpqext = 1;
|
|
|
|
else
|
|
|
|
nr_sk(sk)->bpqext = 0;
|
|
|
|
|
|
|
|
ret = nr_process_rx_frame(sk, skb);
|
|
|
|
bh_unlock_sock(sk);
|
2018-12-30 04:56:38 +07:00
|
|
|
sock_put(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now it should be a CONNREQ.
|
|
|
|
*/
|
|
|
|
if (frametype != NR_CONNREQ) {
|
|
|
|
/*
|
|
|
|
* Here it would be nice to be able to send a reset but
|
2005-09-13 04:27:37 +07:00
|
|
|
* NET/ROM doesn't have one. We've tried to extend the protocol
|
|
|
|
* by sending NR_CONNACK | NR_CHOKE_FLAGS replies but that
|
|
|
|
* apparently kills BPQ boxes... :-(
|
|
|
|
* So now we try to follow the established behaviour of
|
|
|
|
* G8PZT's Xrouter which is sending packets with command type 7
|
|
|
|
* as an extension of the protocol.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2005-09-13 04:27:37 +07:00
|
|
|
if (sysctl_netrom_reset_circuit &&
|
|
|
|
(frametype != NR_RESET || flags != 0))
|
|
|
|
nr_transmit_reset(skb, 1);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sk = nr_find_listener(dest);
|
|
|
|
|
|
|
|
user = (ax25_address *)(skb->data + 21);
|
|
|
|
|
|
|
|
if (sk == NULL || sk_acceptq_is_full(sk) ||
|
|
|
|
(make = nr_make_new(sk)) == NULL) {
|
|
|
|
nr_transmit_refusal(skb, 0);
|
|
|
|
if (sk)
|
2018-12-30 04:56:38 +07:00
|
|
|
sock_put(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-30 04:56:38 +07:00
|
|
|
bh_lock_sock(sk);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
window = skb->data[20];
|
|
|
|
|
2019-07-23 10:41:22 +07:00
|
|
|
sock_hold(make);
|
2005-04-17 05:20:36 +07:00
|
|
|
skb->sk = make;
|
2019-06-28 04:30:58 +07:00
|
|
|
skb->destructor = sock_efree;
|
2005-04-17 05:20:36 +07:00
|
|
|
make->sk_state = TCP_ESTABLISHED;
|
|
|
|
|
|
|
|
/* Fill in his circuit details */
|
|
|
|
nr_make = nr_sk(make);
|
|
|
|
nr_make->source_addr = *dest;
|
|
|
|
nr_make->dest_addr = *src;
|
|
|
|
nr_make->user_addr = *user;
|
|
|
|
|
|
|
|
nr_make->your_index = circuit_index;
|
|
|
|
nr_make->your_id = circuit_id;
|
|
|
|
|
|
|
|
bh_unlock_sock(sk);
|
|
|
|
circuit = nr_find_next_circuit();
|
|
|
|
bh_lock_sock(sk);
|
|
|
|
|
|
|
|
nr_make->my_index = circuit / 256;
|
|
|
|
nr_make->my_id = circuit % 256;
|
|
|
|
|
|
|
|
circuit++;
|
|
|
|
|
|
|
|
/* Window negotiation */
|
|
|
|
if (window < nr_make->window)
|
|
|
|
nr_make->window = window;
|
|
|
|
|
|
|
|
/* L4 timeout negotiation */
|
|
|
|
if (skb->len == 37) {
|
|
|
|
timeout = skb->data[36] * 256 + skb->data[35];
|
|
|
|
if (timeout * HZ < nr_make->t1)
|
|
|
|
nr_make->t1 = timeout * HZ;
|
|
|
|
nr_make->bpqext = 1;
|
|
|
|
} else {
|
|
|
|
nr_make->bpqext = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr_write_internal(make, NR_CONNACK);
|
|
|
|
|
|
|
|
nr_make->condition = 0x00;
|
|
|
|
nr_make->vs = 0;
|
|
|
|
nr_make->va = 0;
|
|
|
|
nr_make->vr = 0;
|
|
|
|
nr_make->vl = 0;
|
|
|
|
nr_make->state = NR_STATE_3;
|
2006-07-04 09:31:14 +07:00
|
|
|
sk_acceptq_added(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
skb_queue_head(&sk->sk_receive_queue, skb);
|
|
|
|
|
|
|
|
if (!sock_flag(sk, SOCK_DEAD))
|
2014-04-12 03:15:36 +07:00
|
|
|
sk->sk_data_ready(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
bh_unlock_sock(sk);
|
2018-12-30 04:56:38 +07:00
|
|
|
sock_put(sk);
|
2006-07-11 06:23:21 +07:00
|
|
|
|
|
|
|
nr_insert_socket(make);
|
|
|
|
|
|
|
|
nr_start_heartbeat(make);
|
|
|
|
nr_start_idletimer(make);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-03-02 14:37:48 +07:00
|
|
|
static int nr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct nr_sock *nr = nr_sk(sk);
|
2014-01-18 04:53:15 +07:00
|
|
|
DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name);
|
2005-04-17 05:20:36 +07:00
|
|
|
int err;
|
|
|
|
struct sockaddr_ax25 sax;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned char *asmptr;
|
|
|
|
int size;
|
|
|
|
|
|
|
|
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
if (sock_flag(sk, SOCK_ZAPPED)) {
|
|
|
|
err = -EADDRNOTAVAIL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sk->sk_shutdown & SEND_SHUTDOWN) {
|
|
|
|
send_sig(SIGPIPE, current, 0);
|
|
|
|
err = -EPIPE;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nr->device == NULL) {
|
|
|
|
err = -ENETUNREACH;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (usax) {
|
|
|
|
if (msg->msg_namelen < sizeof(sax)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
sax = *usax;
|
|
|
|
if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) {
|
|
|
|
err = -EISCONN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (sax.sax25_family != AF_NETROM) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (sk->sk_state != TCP_ESTABLISHED) {
|
|
|
|
err = -ENOTCONN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
sax.sax25_family = AF_NETROM;
|
|
|
|
sax.sax25_call = nr->dest_addr;
|
|
|
|
}
|
|
|
|
|
2009-03-27 14:28:21 +07:00
|
|
|
/* Build a packet - the conventional user limit is 236 bytes. We can
|
|
|
|
do ludicrously large NetROM frames but must not overflow */
|
2009-04-22 14:49:51 +07:00
|
|
|
if (len > 65536) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-03-27 14:28:21 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
|
|
|
|
|
|
|
|
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
skb_reserve(skb, size - len);
|
2007-03-15 07:04:34 +07:00
|
|
|
skb_reset_transport_header(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Push down the NET/ROM header
|
|
|
|
*/
|
|
|
|
|
|
|
|
asmptr = skb_push(skb, NR_TRANSPORT_LEN);
|
|
|
|
|
|
|
|
/* Build a NET/ROM Transport header */
|
|
|
|
|
|
|
|
*asmptr++ = nr->your_index;
|
|
|
|
*asmptr++ = nr->your_id;
|
|
|
|
*asmptr++ = 0; /* To be filled in later */
|
|
|
|
*asmptr++ = 0; /* Ditto */
|
|
|
|
*asmptr++ = NR_INFO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put the data on the end
|
|
|
|
*/
|
2007-03-15 07:04:34 +07:00
|
|
|
skb_put(skb, len);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* User data follows immediately after the NET/ROM transport header */
|
2014-04-07 08:25:44 +07:00
|
|
|
if (memcpy_from_msg(skb_transport_header(skb), msg, len)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
kfree_skb(skb);
|
|
|
|
err = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sk->sk_state != TCP_ESTABLISHED) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
err = -ENOTCONN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr_output(sk, skb); /* Shove it onto the queue */
|
|
|
|
|
|
|
|
err = len;
|
|
|
|
out:
|
|
|
|
release_sock(sk);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-03-02 14:37:48 +07:00
|
|
|
static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
|
|
|
int flags)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
2014-01-18 04:53:15 +07:00
|
|
|
DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name);
|
2005-04-17 05:20:36 +07:00
|
|
|
size_t copied;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int er;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This works for seqpacket too. The receiver has ordered the queue for
|
|
|
|
* us! We do one quick check first though
|
|
|
|
*/
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
if (sk->sk_state != TCP_ESTABLISHED) {
|
|
|
|
release_sock(sk);
|
|
|
|
return -ENOTCONN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now we can treat all alike */
|
|
|
|
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
|
|
|
|
release_sock(sk);
|
|
|
|
return er;
|
|
|
|
}
|
|
|
|
|
2007-03-13 23:06:52 +07:00
|
|
|
skb_reset_transport_header(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
copied = skb->len;
|
|
|
|
|
|
|
|
if (copied > size) {
|
|
|
|
copied = size;
|
|
|
|
msg->msg_flags |= MSG_TRUNC;
|
|
|
|
}
|
|
|
|
|
2014-11-06 04:46:40 +07:00
|
|
|
er = skb_copy_datagram_msg(skb, 0, msg, copied);
|
2012-09-04 11:13:18 +07:00
|
|
|
if (er < 0) {
|
|
|
|
skb_free_datagram(sk, skb);
|
|
|
|
release_sock(sk);
|
|
|
|
return er;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (sax != NULL) {
|
2013-04-09 09:07:19 +07:00
|
|
|
memset(sax, 0, sizeof(*sax));
|
2005-04-17 05:20:36 +07:00
|
|
|
sax->sax25_family = AF_NETROM;
|
2007-03-28 04:55:52 +07:00
|
|
|
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
|
|
|
|
AX25_ADDR_LEN);
|
2013-11-21 09:14:22 +07:00
|
|
|
msg->msg_namelen = sizeof(*sax);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2009-03-28 07:22:55 +07:00
|
|
|
skb_free_datagram(sk, skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
release_sock(sk);
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case TIOCOUTQ: {
|
|
|
|
long amount;
|
2006-01-04 05:14:46 +07:00
|
|
|
|
|
|
|
lock_sock(sk);
|
2009-06-18 09:05:41 +07:00
|
|
|
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (amount < 0)
|
|
|
|
amount = 0;
|
|
|
|
release_sock(sk);
|
|
|
|
return put_user(amount, (int __user *)argp);
|
|
|
|
}
|
|
|
|
|
|
|
|
case TIOCINQ: {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
long amount = 0L;
|
2006-01-04 05:14:46 +07:00
|
|
|
|
|
|
|
lock_sock(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* These two are safe on a single CPU system as only user tasks fiddle here */
|
|
|
|
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
|
|
|
|
amount = skb->len;
|
|
|
|
release_sock(sk);
|
|
|
|
return put_user(amount, (int __user *)argp);
|
|
|
|
}
|
|
|
|
|
|
|
|
case SIOCGIFADDR:
|
|
|
|
case SIOCSIFADDR:
|
|
|
|
case SIOCGIFDSTADDR:
|
|
|
|
case SIOCSIFDSTADDR:
|
|
|
|
case SIOCGIFBRDADDR:
|
|
|
|
case SIOCSIFBRDADDR:
|
|
|
|
case SIOCGIFNETMASK:
|
|
|
|
case SIOCSIFNETMASK:
|
|
|
|
case SIOCGIFMETRIC:
|
|
|
|
case SIOCSIFMETRIC:
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
case SIOCADDRT:
|
|
|
|
case SIOCDELRT:
|
|
|
|
case SIOCNRDECOBS:
|
2011-11-25 06:54:10 +07:00
|
|
|
if (!capable(CAP_NET_ADMIN))
|
|
|
|
return -EPERM;
|
2005-04-17 05:20:36 +07:00
|
|
|
return nr_rt_ioctl(cmd, argp);
|
|
|
|
|
|
|
|
default:
|
2006-01-04 05:18:33 +07:00
|
|
|
return -ENOIOCTLCMD;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
|
|
|
|
static void *nr_info_start(struct seq_file *seq, loff_t *pos)
|
2020-02-24 06:16:49 +07:00
|
|
|
__acquires(&nr_list_lock)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
spin_lock_bh(&nr_list_lock);
|
2010-02-09 06:19:42 +07:00
|
|
|
return seq_hlist_start_head(&nr_list, *pos);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
2010-02-09 06:19:42 +07:00
|
|
|
return seq_hlist_next(v, &nr_list, pos);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static void nr_info_stop(struct seq_file *seq, void *v)
|
2020-02-24 06:16:50 +07:00
|
|
|
__releases(&nr_list_lock)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
spin_unlock_bh(&nr_list_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nr_info_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
2010-02-09 06:19:42 +07:00
|
|
|
struct sock *s = sk_entry(v);
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net_device *dev;
|
|
|
|
struct nr_sock *nr;
|
|
|
|
const char *devname;
|
2005-09-07 05:49:39 +07:00
|
|
|
char buf[11];
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (v == SEQ_START_TOKEN)
|
|
|
|
seq_puts(seq,
|
|
|
|
"user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n");
|
|
|
|
|
|
|
|
else {
|
|
|
|
|
|
|
|
bh_lock_sock(s);
|
|
|
|
nr = nr_sk(s);
|
|
|
|
|
|
|
|
if ((dev = nr->device) == NULL)
|
|
|
|
devname = "???";
|
|
|
|
else
|
|
|
|
devname = dev->name;
|
|
|
|
|
2005-09-07 05:49:39 +07:00
|
|
|
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr));
|
|
|
|
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr));
|
2007-02-09 21:25:09 +07:00
|
|
|
seq_printf(seq,
|
2005-04-17 05:20:36 +07:00
|
|
|
"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n",
|
2005-09-07 05:49:39 +07:00
|
|
|
ax2asc(buf, &nr->source_addr),
|
2005-04-17 05:20:36 +07:00
|
|
|
devname,
|
|
|
|
nr->my_index,
|
|
|
|
nr->my_id,
|
|
|
|
nr->your_index,
|
|
|
|
nr->your_id,
|
|
|
|
nr->state,
|
|
|
|
nr->vs,
|
|
|
|
nr->vr,
|
|
|
|
nr->va,
|
|
|
|
ax25_display_timer(&nr->t1timer) / HZ,
|
|
|
|
nr->t1 / HZ,
|
|
|
|
ax25_display_timer(&nr->t2timer) / HZ,
|
|
|
|
nr->t2 / HZ,
|
|
|
|
ax25_display_timer(&nr->t4timer) / HZ,
|
|
|
|
nr->t4 / HZ,
|
|
|
|
ax25_display_timer(&nr->idletimer) / (60 * HZ),
|
|
|
|
nr->idle / (60 * HZ),
|
|
|
|
nr->n2count,
|
|
|
|
nr->n2,
|
|
|
|
nr->window,
|
2009-06-18 09:05:41 +07:00
|
|
|
sk_wmem_alloc_get(s),
|
|
|
|
sk_rmem_alloc_get(s),
|
2005-04-17 05:20:36 +07:00
|
|
|
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
|
|
|
|
|
|
|
|
bh_unlock_sock(s);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-11 13:07:31 +07:00
|
|
|
static const struct seq_operations nr_info_seqops = {
|
2005-04-17 05:20:36 +07:00
|
|
|
.start = nr_info_start,
|
|
|
|
.next = nr_info_next,
|
|
|
|
.stop = nr_info_stop,
|
|
|
|
.show = nr_info_show,
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
|
2009-10-05 12:58:39 +07:00
|
|
|
static const struct net_proto_family nr_family_ops = {
|
2005-04-17 05:20:36 +07:00
|
|
|
.family = PF_NETROM,
|
|
|
|
.create = nr_create,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2005-12-23 03:49:22 +07:00
|
|
|
static const struct proto_ops nr_proto_ops = {
|
2005-04-17 05:20:36 +07:00
|
|
|
.family = PF_NETROM,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.release = nr_release,
|
|
|
|
.bind = nr_bind,
|
|
|
|
.connect = nr_connect,
|
|
|
|
.socketpair = sock_no_socketpair,
|
|
|
|
.accept = nr_accept,
|
|
|
|
.getname = nr_getname,
|
2018-06-28 23:43:44 +07:00
|
|
|
.poll = datagram_poll,
|
2005-04-17 05:20:36 +07:00
|
|
|
.ioctl = nr_ioctl,
|
2019-04-18 03:51:48 +07:00
|
|
|
.gettstamp = sock_gettstamp,
|
2005-04-17 05:20:36 +07:00
|
|
|
.listen = nr_listen,
|
|
|
|
.shutdown = sock_no_shutdown,
|
|
|
|
.setsockopt = nr_setsockopt,
|
|
|
|
.getsockopt = nr_getsockopt,
|
|
|
|
.sendmsg = nr_sendmsg,
|
|
|
|
.recvmsg = nr_recvmsg,
|
|
|
|
.mmap = sock_no_mmap,
|
|
|
|
.sendpage = sock_no_sendpage,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct notifier_block nr_dev_notifier = {
|
|
|
|
.notifier_call = nr_device_event,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct net_device **dev_nr;
|
|
|
|
|
2006-12-15 06:50:01 +07:00
|
|
|
static struct ax25_protocol nr_pid = {
|
|
|
|
.pid = AX25_P_NETROM,
|
|
|
|
.func = nr_route_frame
|
|
|
|
};
|
|
|
|
|
2006-12-15 06:51:23 +07:00
|
|
|
static struct ax25_linkfail nr_linkfail_notifier = {
|
|
|
|
.func = nr_link_failed,
|
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static int __init nr_proto_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int rc = proto_register(&nr_proto, 0);
|
|
|
|
|
2019-04-09 18:53:55 +07:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
|
2019-04-09 18:53:55 +07:00
|
|
|
pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
|
|
|
|
__func__);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto unregister_proto;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:03:40 +07:00
|
|
|
dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
|
2019-04-09 18:53:55 +07:00
|
|
|
if (!dev_nr) {
|
|
|
|
pr_err("NET/ROM: %s - unable to allocate device array\n",
|
|
|
|
__func__);
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto unregister_proto;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nr_ndevs; i++) {
|
|
|
|
char name[IFNAMSIZ];
|
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
sprintf(name, "nr%d", i);
|
net: set name_assign_type in alloc_netdev()
Extend alloc_netdev{,_mq{,s}}() to take name_assign_type as argument, and convert
all users to pass NET_NAME_UNKNOWN.
Coccinelle patch:
@@
expression sizeof_priv, name, setup, txqs, rxqs, count;
@@
(
-alloc_netdev_mqs(sizeof_priv, name, setup, txqs, rxqs)
+alloc_netdev_mqs(sizeof_priv, name, NET_NAME_UNKNOWN, setup, txqs, rxqs)
|
-alloc_netdev_mq(sizeof_priv, name, setup, count)
+alloc_netdev_mq(sizeof_priv, name, NET_NAME_UNKNOWN, setup, count)
|
-alloc_netdev(sizeof_priv, name, setup)
+alloc_netdev(sizeof_priv, name, NET_NAME_UNKNOWN, setup)
)
v9: move comments here from the wrong commit
Signed-off-by: Tom Gundersen <teg@jklm.no>
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-07-14 21:37:24 +07:00
|
|
|
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!dev) {
|
2019-04-09 18:53:55 +07:00
|
|
|
rc = -ENOMEM;
|
2005-04-17 05:20:36 +07:00
|
|
|
goto fail;
|
|
|
|
}
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
dev->base_addr = i;
|
2019-04-09 18:53:55 +07:00
|
|
|
rc = register_netdev(dev);
|
|
|
|
if (rc) {
|
2005-04-17 05:20:36 +07:00
|
|
|
free_netdev(dev);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
dev_nr[i] = dev;
|
|
|
|
}
|
|
|
|
|
2019-04-09 18:53:55 +07:00
|
|
|
rc = sock_register(&nr_family_ops);
|
|
|
|
if (rc)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto fail;
|
2007-02-09 21:25:09 +07:00
|
|
|
|
2019-04-09 18:53:55 +07:00
|
|
|
rc = register_netdevice_notifier(&nr_dev_notifier);
|
|
|
|
if (rc)
|
|
|
|
goto out_sock;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-12-15 06:50:01 +07:00
|
|
|
ax25_register_pid(&nr_pid);
|
2006-12-15 06:51:23 +07:00
|
|
|
ax25_linkfail_register(&nr_linkfail_notifier);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SYSCTL
|
2019-04-09 18:53:55 +07:00
|
|
|
rc = nr_register_sysctl();
|
|
|
|
if (rc)
|
|
|
|
goto out_sysctl;
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
nr_loopback_init();
|
|
|
|
|
2019-04-09 18:53:55 +07:00
|
|
|
rc = -ENOMEM;
|
|
|
|
if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
|
|
|
|
goto proc_remove1;
|
|
|
|
if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
|
|
|
|
&nr_neigh_seqops))
|
|
|
|
goto proc_remove2;
|
|
|
|
if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
|
|
|
|
&nr_node_seqops))
|
|
|
|
goto proc_remove3;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
proc_remove3:
|
|
|
|
remove_proc_entry("nr_neigh", init_net.proc_net);
|
|
|
|
proc_remove2:
|
|
|
|
remove_proc_entry("nr", init_net.proc_net);
|
|
|
|
proc_remove1:
|
|
|
|
|
|
|
|
nr_loopback_clear();
|
|
|
|
nr_rt_free();
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
nr_unregister_sysctl();
|
|
|
|
out_sysctl:
|
|
|
|
#endif
|
|
|
|
ax25_linkfail_release(&nr_linkfail_notifier);
|
|
|
|
ax25_protocol_release(AX25_P_NETROM);
|
|
|
|
unregister_netdevice_notifier(&nr_dev_notifier);
|
|
|
|
out_sock:
|
|
|
|
sock_unregister(PF_NETROM);
|
2005-04-17 05:20:36 +07:00
|
|
|
fail:
|
|
|
|
while (--i >= 0) {
|
|
|
|
unregister_netdev(dev_nr[i]);
|
|
|
|
free_netdev(dev_nr[i]);
|
|
|
|
}
|
|
|
|
kfree(dev_nr);
|
2019-04-09 18:53:55 +07:00
|
|
|
unregister_proto:
|
2005-04-17 05:20:36 +07:00
|
|
|
proto_unregister(&nr_proto);
|
2019-04-09 18:53:55 +07:00
|
|
|
return rc;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(nr_proto_init);
|
|
|
|
|
|
|
|
module_param(nr_ndevs, int, 0);
|
|
|
|
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
|
|
|
|
MODULE_DESCRIPTION("The amateur radio NET/ROM network and transport layer protocol");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_ALIAS_NETPROTO(PF_NETROM);
|
|
|
|
|
|
|
|
static void __exit nr_exit(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2013-02-18 08:34:56 +07:00
|
|
|
remove_proc_entry("nr", init_net.proc_net);
|
|
|
|
remove_proc_entry("nr_neigh", init_net.proc_net);
|
|
|
|
remove_proc_entry("nr_nodes", init_net.proc_net);
|
2005-04-17 05:20:36 +07:00
|
|
|
nr_loopback_clear();
|
|
|
|
|
|
|
|
nr_rt_free();
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
nr_unregister_sysctl();
|
|
|
|
#endif
|
|
|
|
|
2006-12-15 06:51:23 +07:00
|
|
|
ax25_linkfail_release(&nr_linkfail_notifier);
|
2005-04-17 05:20:36 +07:00
|
|
|
ax25_protocol_release(AX25_P_NETROM);
|
|
|
|
|
|
|
|
unregister_netdevice_notifier(&nr_dev_notifier);
|
|
|
|
|
|
|
|
sock_unregister(PF_NETROM);
|
|
|
|
|
|
|
|
for (i = 0; i < nr_ndevs; i++) {
|
|
|
|
struct net_device *dev = dev_nr[i];
|
|
|
|
if (dev) {
|
|
|
|
unregister_netdev(dev);
|
|
|
|
free_netdev(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(dev_nr);
|
|
|
|
proto_unregister(&nr_proto);
|
|
|
|
}
|
|
|
|
module_exit(nr_exit);
|