linux_dsm_epyc7002/drivers/net/wireguard/socket.c
Jason A. Donenfeld b673e24aad wireguard: socket: remove errant restriction on looping to self
It's already possible to create two different interfaces and loop
packets between them. This has always been possible with tunnels in the
kernel, and isn't specific to wireguard. Therefore, the networking stack
already needs to deal with that. At the very least, the packet winds up
exceeding the MTU and is discarded at that point. So, since this is
already something that happens, there's no need to forbid the not very
exceptional case of routing a packet back to the same interface; this
loop is no different than others, and we shouldn't special case it, but
rather rely on generic handling of loops in general. This also makes it
easier to do interesting things with wireguard such as onion routing.

At the same time, we add a selftest for this, ensuring that both onion
routing works and infinite routing loops do not crash the kernel. We
also add a test case for wireguard interfaces nesting packets and
sending traffic between each other, as well as the loop in this case
too. We make sure to send some throughput-heavy traffic for this use
case, to stress out any possible recursion issues with the locks around
workqueues.

Fixes: e7096c131e ("net: WireGuard secure network tunnel")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-06 20:03:47 -07:00

426 lines
11 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#include "device.h"
#include "peer.h"
#include "socket.h"
#include "queueing.h"
#include "messages.h"
#include <linux/ctype.h>
#include <linux/net.h>
#include <linux/if_vlan.h>
#include <linux/if_ether.h>
#include <linux/inetdevice.h>
#include <net/udp_tunnel.h>
#include <net/ipv6.h>
static int send4(struct wg_device *wg, struct sk_buff *skb,
struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
{
struct flowi4 fl = {
.saddr = endpoint->src4.s_addr,
.daddr = endpoint->addr4.sin_addr.s_addr,
.fl4_dport = endpoint->addr4.sin_port,
.flowi4_mark = wg->fwmark,
.flowi4_proto = IPPROTO_UDP
};
struct rtable *rt = NULL;
struct sock *sock;
int ret = 0;
skb_mark_not_on_list(skb);
skb->dev = wg->dev;
skb->mark = wg->fwmark;
rcu_read_lock_bh();
sock = rcu_dereference_bh(wg->sock4);
if (unlikely(!sock)) {
ret = -ENONET;
goto err;
}
fl.fl4_sport = inet_sk(sock)->inet_sport;
if (cache)
rt = dst_cache_get_ip4(cache, &fl.saddr);
if (!rt) {
security_sk_classify_flow(sock, flowi4_to_flowi(&fl));
if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
fl.saddr, RT_SCOPE_HOST))) {
endpoint->src4.s_addr = 0;
*(__force __be32 *)&endpoint->src_if4 = 0;
fl.saddr = 0;
if (cache)
dst_cache_reset(cache);
}
rt = ip_route_output_flow(sock_net(sock), &fl, sock);
if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) &&
PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
rt->dst.dev->ifindex != endpoint->src_if4)))) {
endpoint->src4.s_addr = 0;
*(__force __be32 *)&endpoint->src_if4 = 0;
fl.saddr = 0;
if (cache)
dst_cache_reset(cache);
if (!IS_ERR(rt))
ip_rt_put(rt);
rt = ip_route_output_flow(sock_net(sock), &fl, sock);
}
if (unlikely(IS_ERR(rt))) {
ret = PTR_ERR(rt);
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
goto err;
}
if (cache)
dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
}
skb->ignore_df = 1;
udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds,
ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
fl.fl4_dport, false, false);
goto out;
err:
kfree_skb(skb);
out:
rcu_read_unlock_bh();
return ret;
}
static int send6(struct wg_device *wg, struct sk_buff *skb,
struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
{
#if IS_ENABLED(CONFIG_IPV6)
struct flowi6 fl = {
.saddr = endpoint->src6,
.daddr = endpoint->addr6.sin6_addr,
.fl6_dport = endpoint->addr6.sin6_port,
.flowi6_mark = wg->fwmark,
.flowi6_oif = endpoint->addr6.sin6_scope_id,
.flowi6_proto = IPPROTO_UDP
/* TODO: addr->sin6_flowinfo */
};
struct dst_entry *dst = NULL;
struct sock *sock;
int ret = 0;
skb_mark_not_on_list(skb);
skb->dev = wg->dev;
skb->mark = wg->fwmark;
rcu_read_lock_bh();
sock = rcu_dereference_bh(wg->sock6);
if (unlikely(!sock)) {
ret = -ENONET;
goto err;
}
fl.fl6_sport = inet_sk(sock)->inet_sport;
if (cache)
dst = dst_cache_get_ip6(cache, &fl.saddr);
if (!dst) {
security_sk_classify_flow(sock, flowi6_to_flowi(&fl));
if (unlikely(!ipv6_addr_any(&fl.saddr) &&
!ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
endpoint->src6 = fl.saddr = in6addr_any;
if (cache)
dst_cache_reset(cache);
}
dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl,
NULL);
if (unlikely(IS_ERR(dst))) {
ret = PTR_ERR(dst);
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
goto err;
}
if (cache)
dst_cache_set_ip6(cache, dst, &fl.saddr);
}
skb->ignore_df = 1;
udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
fl.fl6_dport, false);
goto out;
err:
kfree_skb(skb);
out:
rcu_read_unlock_bh();
return ret;
#else
return -EAFNOSUPPORT;
#endif
}
int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds)
{
size_t skb_len = skb->len;
int ret = -EAFNOSUPPORT;
read_lock_bh(&peer->endpoint_lock);
if (peer->endpoint.addr.sa_family == AF_INET)
ret = send4(peer->device, skb, &peer->endpoint, ds,
&peer->endpoint_cache);
else if (peer->endpoint.addr.sa_family == AF_INET6)
ret = send6(peer->device, skb, &peer->endpoint, ds,
&peer->endpoint_cache);
else
dev_kfree_skb(skb);
if (likely(!ret))
peer->tx_bytes += skb_len;
read_unlock_bh(&peer->endpoint_lock);
return ret;
}
int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer,
size_t len, u8 ds)
{
struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
if (unlikely(!skb))
return -ENOMEM;
skb_reserve(skb, SKB_HEADER_LEN);
skb_set_inner_network_header(skb, 0);
skb_put_data(skb, buffer, len);
return wg_socket_send_skb_to_peer(peer, skb, ds);
}
int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
struct sk_buff *in_skb, void *buffer,
size_t len)
{
int ret = 0;
struct sk_buff *skb;
struct endpoint endpoint;
if (unlikely(!in_skb))
return -EINVAL;
ret = wg_socket_endpoint_from_skb(&endpoint, in_skb);
if (unlikely(ret < 0))
return ret;
skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
if (unlikely(!skb))
return -ENOMEM;
skb_reserve(skb, SKB_HEADER_LEN);
skb_set_inner_network_header(skb, 0);
skb_put_data(skb, buffer, len);
if (endpoint.addr.sa_family == AF_INET)
ret = send4(wg, skb, &endpoint, 0, NULL);
else if (endpoint.addr.sa_family == AF_INET6)
ret = send6(wg, skb, &endpoint, 0, NULL);
/* No other possibilities if the endpoint is valid, which it is,
* as we checked above.
*/
return ret;
}
int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
const struct sk_buff *skb)
{
memset(endpoint, 0, sizeof(*endpoint));
if (skb->protocol == htons(ETH_P_IP)) {
endpoint->addr4.sin_family = AF_INET;
endpoint->addr4.sin_port = udp_hdr(skb)->source;
endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
endpoint->src4.s_addr = ip_hdr(skb)->daddr;
endpoint->src_if4 = skb->skb_iif;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
endpoint->addr6.sin6_family = AF_INET6;
endpoint->addr6.sin6_port = udp_hdr(skb)->source;
endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id(
&ipv6_hdr(skb)->saddr, skb->skb_iif);
endpoint->src6 = ipv6_hdr(skb)->daddr;
} else {
return -EINVAL;
}
return 0;
}
static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b)
{
return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET &&
a->addr4.sin_port == b->addr4.sin_port &&
a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr &&
a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) ||
(a->addr.sa_family == AF_INET6 &&
b->addr.sa_family == AF_INET6 &&
a->addr6.sin6_port == b->addr6.sin6_port &&
ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) &&
a->addr6.sin6_scope_id == b->addr6.sin6_scope_id &&
ipv6_addr_equal(&a->src6, &b->src6)) ||
unlikely(!a->addr.sa_family && !b->addr.sa_family);
}
void wg_socket_set_peer_endpoint(struct wg_peer *peer,
const struct endpoint *endpoint)
{
/* First we check unlocked, in order to optimize, since it's pretty rare
* that an endpoint will change. If we happen to be mid-write, and two
* CPUs wind up writing the same thing or something slightly different,
* it doesn't really matter much either.
*/
if (endpoint_eq(endpoint, &peer->endpoint))
return;
write_lock_bh(&peer->endpoint_lock);
if (endpoint->addr.sa_family == AF_INET) {
peer->endpoint.addr4 = endpoint->addr4;
peer->endpoint.src4 = endpoint->src4;
peer->endpoint.src_if4 = endpoint->src_if4;
} else if (endpoint->addr.sa_family == AF_INET6) {
peer->endpoint.addr6 = endpoint->addr6;
peer->endpoint.src6 = endpoint->src6;
} else {
goto out;
}
dst_cache_reset(&peer->endpoint_cache);
out:
write_unlock_bh(&peer->endpoint_lock);
}
void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
const struct sk_buff *skb)
{
struct endpoint endpoint;
if (!wg_socket_endpoint_from_skb(&endpoint, skb))
wg_socket_set_peer_endpoint(peer, &endpoint);
}
void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
{
write_lock_bh(&peer->endpoint_lock);
memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
dst_cache_reset(&peer->endpoint_cache);
write_unlock_bh(&peer->endpoint_lock);
}
static int wg_receive(struct sock *sk, struct sk_buff *skb)
{
struct wg_device *wg;
if (unlikely(!sk))
goto err;
wg = sk->sk_user_data;
if (unlikely(!wg))
goto err;
skb_mark_not_on_list(skb);
wg_packet_receive(wg, skb);
return 0;
err:
kfree_skb(skb);
return 0;
}
static void sock_free(struct sock *sock)
{
if (unlikely(!sock))
return;
sk_clear_memalloc(sock);
udp_tunnel_sock_release(sock->sk_socket);
}
static void set_sock_opts(struct socket *sock)
{
sock->sk->sk_allocation = GFP_ATOMIC;
sock->sk->sk_sndbuf = INT_MAX;
sk_set_memalloc(sock->sk);
}
int wg_socket_init(struct wg_device *wg, u16 port)
{
int ret;
struct udp_tunnel_sock_cfg cfg = {
.sk_user_data = wg,
.encap_type = 1,
.encap_rcv = wg_receive
};
struct socket *new4 = NULL, *new6 = NULL;
struct udp_port_cfg port4 = {
.family = AF_INET,
.local_ip.s_addr = htonl(INADDR_ANY),
.local_udp_port = htons(port),
.use_udp_checksums = true
};
#if IS_ENABLED(CONFIG_IPV6)
int retries = 0;
struct udp_port_cfg port6 = {
.family = AF_INET6,
.local_ip6 = IN6ADDR_ANY_INIT,
.use_udp6_tx_checksums = true,
.use_udp6_rx_checksums = true,
.ipv6_v6only = true
};
#endif
#if IS_ENABLED(CONFIG_IPV6)
retry:
#endif
ret = udp_sock_create(wg->creating_net, &port4, &new4);
if (ret < 0) {
pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
return ret;
}
set_sock_opts(new4);
setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6_mod_enabled()) {
port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
ret = udp_sock_create(wg->creating_net, &port6, &new6);
if (ret < 0) {
udp_tunnel_sock_release(new4);
if (ret == -EADDRINUSE && !port && retries++ < 100)
goto retry;
pr_err("%s: Could not create IPv6 socket\n",
wg->dev->name);
return ret;
}
set_sock_opts(new6);
setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
}
#endif
wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
return 0;
}
void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
struct sock *new6)
{
struct sock *old4, *old6;
mutex_lock(&wg->socket_update_lock);
old4 = rcu_dereference_protected(wg->sock4,
lockdep_is_held(&wg->socket_update_lock));
old6 = rcu_dereference_protected(wg->sock6,
lockdep_is_held(&wg->socket_update_lock));
rcu_assign_pointer(wg->sock4, new4);
rcu_assign_pointer(wg->sock6, new6);
if (new4)
wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
mutex_unlock(&wg->socket_update_lock);
synchronize_rcu();
sock_free(old4);
sock_free(old6);
}