2019-05-19 19:08:20 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-12-15 09:44:35 +07:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/sock_diag.h>
|
|
|
|
#include <linux/unix_diag.h>
|
|
|
|
#include <linux/skbuff.h>
|
2011-12-20 11:33:03 +07:00
|
|
|
#include <linux/module.h>
|
2019-05-21 07:43:51 +07:00
|
|
|
#include <linux/uidgid.h>
|
2011-12-15 09:44:35 +07:00
|
|
|
#include <net/netlink.h>
|
|
|
|
#include <net/af_unix.h>
|
|
|
|
#include <net/tcp_states.h>
|
2019-05-21 07:43:51 +07:00
|
|
|
#include <net/sock.h>
|
2011-12-15 09:44:35 +07:00
|
|
|
|
2011-12-15 09:45:24 +07:00
|
|
|
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
|
|
|
|
{
|
2019-02-16 03:09:35 +07:00
|
|
|
/* might or might not have unix_table_lock */
|
|
|
|
struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
|
2011-12-15 09:45:24 +07:00
|
|
|
|
2012-06-27 06:36:10 +07:00
|
|
|
if (!addr)
|
|
|
|
return 0;
|
2011-12-15 09:45:24 +07:00
|
|
|
|
2012-06-27 06:36:10 +07:00
|
|
|
return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
|
|
|
|
addr->name->sun_path);
|
2011-12-15 09:45:24 +07:00
|
|
|
}
|
|
|
|
|
2011-12-15 09:45:43 +07:00
|
|
|
static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
|
|
|
|
{
|
2012-03-15 08:54:32 +07:00
|
|
|
struct dentry *dentry = unix_sk(sk)->path.dentry;
|
2011-12-15 09:45:43 +07:00
|
|
|
|
|
|
|
if (dentry) {
|
2012-06-27 06:36:10 +07:00
|
|
|
struct unix_diag_vfs uv = {
|
2015-03-18 05:26:21 +07:00
|
|
|
.udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
|
2012-06-27 06:36:10 +07:00
|
|
|
.udiag_vfs_dev = dentry->d_sb->s_dev,
|
|
|
|
};
|
|
|
|
|
|
|
|
return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
|
2011-12-15 09:45:43 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-15 09:45:58 +07:00
|
|
|
static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
|
|
|
|
{
|
|
|
|
struct sock *peer;
|
|
|
|
int ino;
|
|
|
|
|
|
|
|
peer = unix_peer_get(sk);
|
|
|
|
if (peer) {
|
|
|
|
unix_state_lock(peer);
|
|
|
|
ino = sock_i_ino(peer);
|
|
|
|
unix_state_unlock(peer);
|
|
|
|
sock_put(peer);
|
|
|
|
|
2012-06-27 06:36:10 +07:00
|
|
|
return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
|
2011-12-15 09:45:58 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-15 09:46:14 +07:00
|
|
|
static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
2012-06-27 06:36:10 +07:00
|
|
|
struct nlattr *attr;
|
2011-12-15 09:46:14 +07:00
|
|
|
u32 *buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (sk->sk_state == TCP_LISTEN) {
|
|
|
|
spin_lock(&sk->sk_receive_queue.lock);
|
2012-06-27 06:36:10 +07:00
|
|
|
|
|
|
|
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
|
|
|
|
sk->sk_receive_queue.qlen * sizeof(u32));
|
|
|
|
if (!attr)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
buf = nla_data(attr);
|
2011-12-15 09:46:14 +07:00
|
|
|
i = 0;
|
|
|
|
skb_queue_walk(&sk->sk_receive_queue, skb) {
|
|
|
|
struct sock *req, *peer;
|
|
|
|
|
|
|
|
req = skb->sk;
|
|
|
|
/*
|
|
|
|
* The state lock is outer for the same sk's
|
|
|
|
* queue lock. With the other's queue locked it's
|
|
|
|
* OK to lock the state.
|
|
|
|
*/
|
|
|
|
unix_state_lock_nested(req);
|
|
|
|
peer = unix_sk(req)->peer;
|
2011-12-27 02:41:55 +07:00
|
|
|
buf[i++] = (peer ? sock_i_ino(peer) : 0);
|
2011-12-15 09:46:14 +07:00
|
|
|
unix_state_unlock(req);
|
|
|
|
}
|
|
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2012-06-27 06:36:10 +07:00
|
|
|
errout:
|
2011-12-15 09:46:14 +07:00
|
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2011-12-15 09:46:31 +07:00
|
|
|
static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
|
|
|
|
{
|
2012-06-27 06:36:10 +07:00
|
|
|
struct unix_diag_rqlen rql;
|
2011-12-30 07:54:39 +07:00
|
|
|
|
|
|
|
if (sk->sk_state == TCP_LISTEN) {
|
2012-06-27 06:36:10 +07:00
|
|
|
rql.udiag_rqueue = sk->sk_receive_queue.qlen;
|
|
|
|
rql.udiag_wqueue = sk->sk_max_ack_backlog;
|
2011-12-30 07:54:39 +07:00
|
|
|
} else {
|
2012-06-27 06:36:10 +07:00
|
|
|
rql.udiag_rqueue = (u32) unix_inq_len(sk);
|
|
|
|
rql.udiag_wqueue = (u32) unix_outq_len(sk);
|
2011-12-30 07:54:39 +07:00
|
|
|
}
|
|
|
|
|
2012-06-27 06:36:10 +07:00
|
|
|
return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
|
2011-12-15 09:46:31 +07:00
|
|
|
}
|
|
|
|
|
2019-05-21 07:43:51 +07:00
|
|
|
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
|
|
|
|
{
|
|
|
|
uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
|
|
|
|
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
|
|
|
|
}
|
|
|
|
|
2011-12-15 09:44:52 +07:00
|
|
|
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
|
2012-09-08 03:12:54 +07:00
|
|
|
u32 portid, u32 seq, u32 flags, int sk_ino)
|
2011-12-15 09:44:52 +07:00
|
|
|
{
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct unix_diag_msg *rep;
|
|
|
|
|
2012-09-08 03:12:54 +07:00
|
|
|
nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
|
2012-06-27 06:36:10 +07:00
|
|
|
flags);
|
2012-06-27 11:41:00 +07:00
|
|
|
if (!nlh)
|
2012-06-27 06:36:10 +07:00
|
|
|
return -EMSGSIZE;
|
2011-12-15 09:44:52 +07:00
|
|
|
|
2012-06-27 11:41:00 +07:00
|
|
|
rep = nlmsg_data(nlh);
|
2011-12-15 09:44:52 +07:00
|
|
|
rep->udiag_family = AF_UNIX;
|
|
|
|
rep->udiag_type = sk->sk_type;
|
|
|
|
rep->udiag_state = sk->sk_state;
|
2013-10-01 03:05:40 +07:00
|
|
|
rep->pad = 0;
|
2011-12-15 09:44:52 +07:00
|
|
|
rep->udiag_ino = sk_ino;
|
|
|
|
sock_diag_save_cookie(sk, rep->udiag_cookie);
|
|
|
|
|
2011-12-15 09:45:24 +07:00
|
|
|
if ((req->udiag_show & UDIAG_SHOW_NAME) &&
|
2011-12-30 16:27:43 +07:00
|
|
|
sk_diag_dump_name(sk, skb))
|
2012-06-27 11:41:00 +07:00
|
|
|
goto out_nlmsg_trim;
|
2011-12-15 09:45:24 +07:00
|
|
|
|
2011-12-15 09:45:43 +07:00
|
|
|
if ((req->udiag_show & UDIAG_SHOW_VFS) &&
|
2011-12-30 16:27:43 +07:00
|
|
|
sk_diag_dump_vfs(sk, skb))
|
2012-06-27 11:41:00 +07:00
|
|
|
goto out_nlmsg_trim;
|
2011-12-15 09:45:43 +07:00
|
|
|
|
2011-12-15 09:45:58 +07:00
|
|
|
if ((req->udiag_show & UDIAG_SHOW_PEER) &&
|
2011-12-30 16:27:43 +07:00
|
|
|
sk_diag_dump_peer(sk, skb))
|
2012-06-27 11:41:00 +07:00
|
|
|
goto out_nlmsg_trim;
|
2011-12-15 09:45:58 +07:00
|
|
|
|
2011-12-15 09:46:14 +07:00
|
|
|
if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
|
2011-12-30 16:27:43 +07:00
|
|
|
sk_diag_dump_icons(sk, skb))
|
2012-06-27 11:41:00 +07:00
|
|
|
goto out_nlmsg_trim;
|
2011-12-15 09:46:14 +07:00
|
|
|
|
2011-12-15 09:46:31 +07:00
|
|
|
if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
|
2011-12-30 16:27:43 +07:00
|
|
|
sk_diag_show_rqlen(sk, skb))
|
2012-06-27 11:41:00 +07:00
|
|
|
goto out_nlmsg_trim;
|
2011-12-30 16:27:43 +07:00
|
|
|
|
|
|
|
if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
|
|
|
|
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
|
2012-06-27 11:41:00 +07:00
|
|
|
goto out_nlmsg_trim;
|
2011-12-15 09:46:31 +07:00
|
|
|
|
2012-10-24 01:29:56 +07:00
|
|
|
if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
|
|
|
|
goto out_nlmsg_trim;
|
|
|
|
|
2019-05-21 07:43:51 +07:00
|
|
|
if ((req->udiag_show & UDIAG_SHOW_UID) &&
|
|
|
|
sk_diag_dump_uid(sk, skb))
|
|
|
|
goto out_nlmsg_trim;
|
|
|
|
|
2015-01-17 04:09:00 +07:00
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
return 0;
|
2011-12-15 09:44:52 +07:00
|
|
|
|
2012-06-27 11:41:00 +07:00
|
|
|
out_nlmsg_trim:
|
2012-06-27 06:36:10 +07:00
|
|
|
nlmsg_cancel(skb, nlh);
|
2011-12-15 09:44:52 +07:00
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
|
2012-09-08 03:12:54 +07:00
|
|
|
u32 portid, u32 seq, u32 flags)
|
2011-12-15 09:44:52 +07:00
|
|
|
{
|
|
|
|
int sk_ino;
|
|
|
|
|
|
|
|
unix_state_lock(sk);
|
|
|
|
sk_ino = sock_i_ino(sk);
|
|
|
|
unix_state_unlock(sk);
|
|
|
|
|
|
|
|
if (!sk_ino)
|
|
|
|
return 0;
|
|
|
|
|
2012-09-08 03:12:54 +07:00
|
|
|
return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
|
2011-12-15 09:44:52 +07:00
|
|
|
}
|
|
|
|
|
2011-12-15 09:44:35 +07:00
|
|
|
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
|
{
|
2011-12-15 09:44:52 +07:00
|
|
|
struct unix_diag_req *req;
|
|
|
|
int num, s_num, slot, s_slot;
|
2012-07-16 11:28:49 +07:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2011-12-15 09:44:52 +07:00
|
|
|
|
2012-06-27 11:41:00 +07:00
|
|
|
req = nlmsg_data(cb->nlh);
|
2011-12-15 09:44:52 +07:00
|
|
|
|
|
|
|
s_slot = cb->args[0];
|
|
|
|
num = s_num = cb->args[1];
|
|
|
|
|
|
|
|
spin_lock(&unix_table_lock);
|
2012-06-08 12:03:21 +07:00
|
|
|
for (slot = s_slot;
|
|
|
|
slot < ARRAY_SIZE(unix_socket_table);
|
|
|
|
s_num = 0, slot++) {
|
2011-12-15 09:44:52 +07:00
|
|
|
struct sock *sk;
|
|
|
|
|
|
|
|
num = 0;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
sk_for_each(sk, &unix_socket_table[slot]) {
|
2012-07-16 11:28:49 +07:00
|
|
|
if (!net_eq(sock_net(sk), net))
|
|
|
|
continue;
|
2011-12-15 09:44:52 +07:00
|
|
|
if (num < s_num)
|
|
|
|
goto next;
|
|
|
|
if (!(req->udiag_states & (1 << sk->sk_state)))
|
|
|
|
goto next;
|
|
|
|
if (sk_diag_dump(sk, skb, req,
|
2012-09-08 03:12:54 +07:00
|
|
|
NETLINK_CB(cb->skb).portid,
|
2011-12-30 16:27:43 +07:00
|
|
|
cb->nlh->nlmsg_seq,
|
|
|
|
NLM_F_MULTI) < 0)
|
2011-12-15 09:44:52 +07:00
|
|
|
goto done;
|
|
|
|
next:
|
|
|
|
num++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
spin_unlock(&unix_table_lock);
|
|
|
|
cb->args[0] = slot;
|
|
|
|
cb->args[1] = num;
|
|
|
|
|
|
|
|
return skb->len;
|
2011-12-15 09:44:35 +07:00
|
|
|
}
|
|
|
|
|
2016-02-19 08:27:48 +07:00
|
|
|
static struct sock *unix_lookup_by_ino(unsigned int ino)
|
2011-12-15 09:45:07 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct sock *sk;
|
|
|
|
|
|
|
|
spin_lock(&unix_table_lock);
|
2012-06-08 12:03:21 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
sk_for_each(sk, &unix_socket_table[i])
|
2011-12-15 09:45:07 +07:00
|
|
|
if (ino == sock_i_ino(sk)) {
|
|
|
|
sock_hold(sk);
|
|
|
|
spin_unlock(&unix_table_lock);
|
|
|
|
|
|
|
|
return sk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&unix_table_lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-12-15 09:44:35 +07:00
|
|
|
static int unix_diag_get_exact(struct sk_buff *in_skb,
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
struct unix_diag_req *req)
|
|
|
|
{
|
2011-12-15 09:45:07 +07:00
|
|
|
int err = -EINVAL;
|
|
|
|
struct sock *sk;
|
|
|
|
struct sk_buff *rep;
|
|
|
|
unsigned int extra_len;
|
2012-07-16 11:28:49 +07:00
|
|
|
struct net *net = sock_net(in_skb->sk);
|
2011-12-15 09:45:07 +07:00
|
|
|
|
|
|
|
if (req->udiag_ino == 0)
|
|
|
|
goto out_nosk;
|
|
|
|
|
|
|
|
sk = unix_lookup_by_ino(req->udiag_ino);
|
|
|
|
err = -ENOENT;
|
|
|
|
if (sk == NULL)
|
|
|
|
goto out_nosk;
|
2017-10-26 00:16:42 +07:00
|
|
|
if (!net_eq(sock_net(sk), net))
|
|
|
|
goto out;
|
2011-12-15 09:45:07 +07:00
|
|
|
|
|
|
|
err = sock_diag_check_cookie(sk, req->udiag_cookie);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
extra_len = 256;
|
|
|
|
again:
|
|
|
|
err = -ENOMEM;
|
2012-06-27 06:36:10 +07:00
|
|
|
rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
|
2011-12-15 09:45:07 +07:00
|
|
|
if (!rep)
|
|
|
|
goto out;
|
|
|
|
|
2012-09-08 03:12:54 +07:00
|
|
|
err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
|
2011-12-15 09:45:07 +07:00
|
|
|
nlh->nlmsg_seq, 0, req->udiag_ino);
|
|
|
|
if (err < 0) {
|
2012-06-27 06:36:10 +07:00
|
|
|
nlmsg_free(rep);
|
2011-12-15 09:45:07 +07:00
|
|
|
extra_len += 256;
|
|
|
|
if (extra_len >= PAGE_SIZE)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
goto again;
|
|
|
|
}
|
2012-09-08 03:12:54 +07:00
|
|
|
err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
|
2011-12-15 09:45:07 +07:00
|
|
|
MSG_DONTWAIT);
|
|
|
|
if (err > 0)
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
if (sk)
|
|
|
|
sock_put(sk);
|
|
|
|
out_nosk:
|
|
|
|
return err;
|
2011-12-15 09:44:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
|
|
|
{
|
|
|
|
int hdrlen = sizeof(struct unix_diag_req);
|
2012-07-16 11:28:49 +07:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2011-12-15 09:44:35 +07:00
|
|
|
|
|
|
|
if (nlmsg_len(h) < hdrlen)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-02-24 21:30:15 +07:00
|
|
|
if (h->nlmsg_flags & NLM_F_DUMP) {
|
|
|
|
struct netlink_dump_control c = {
|
|
|
|
.dump = unix_diag_dump,
|
|
|
|
};
|
2012-07-16 11:28:49 +07:00
|
|
|
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
|
2012-02-24 21:30:15 +07:00
|
|
|
} else
|
2012-06-27 11:41:00 +07:00
|
|
|
return unix_diag_get_exact(skb, h, nlmsg_data(h));
|
2011-12-15 09:44:35 +07:00
|
|
|
}
|
|
|
|
|
2012-04-25 01:21:07 +07:00
|
|
|
static const struct sock_diag_handler unix_diag_handler = {
|
2011-12-15 09:44:35 +07:00
|
|
|
.family = AF_UNIX,
|
|
|
|
.dump = unix_diag_handler_dump,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init unix_diag_init(void)
|
|
|
|
{
|
|
|
|
return sock_diag_register(&unix_diag_handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit unix_diag_exit(void)
|
|
|
|
{
|
|
|
|
sock_diag_unregister(&unix_diag_handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(unix_diag_init);
|
|
|
|
module_exit(unix_diag_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
|