mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-27 05:20:58 +07:00
50fba2aa7c
This patch moves the sending of ICMP messages when there are no IPv4/IPv6 tunnels present to tunnel4/tunnel6 respectively. Please note that for now if xfrm4_tunnel/xfrm6_tunnel is loaded then no ICMP messages will ever be sent. This is similar to how we handle AH/ESP/IPCOMP. This move fixes the bug where we always send an ICMP message when there is no ip6_tunnel device present for a given packet even if it is later handled by IPsec. It also causes ICMP messages to be sent when no IPIP tunnel is present. I've decided to use the "port unreachable" ICMP message over the current value of "address unreachable" (and "protocol unreachable" by GRE) because it is not ambiguous unlike the other ones which can be triggered by other conditions. There seems to be no standard specifying what value must be used so this change should be OK. In fact we should change GRE to use this value as well. Incidentally, this patch also fixes a fairly serious bug in xfrm6_tunnel where we don't check whether the embedded IPv6 header is present before dereferencing it for the inside source address. This patch is inspired by a previous patch by Hugo Santos <hsantos@av.it.pt>. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
193 lines
4.0 KiB
C
193 lines
4.0 KiB
C
/*
|
|
* xfrm4_input.c
|
|
*
|
|
* Changes:
|
|
* YOSHIFUJI Hideaki @USAGI
|
|
* Split up af-specific portion
|
|
* Derek Atkins <derek@ihtfp.com>
|
|
* Add Encapsulation support
|
|
*
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/string.h>
|
|
#include <linux/netfilter.h>
|
|
#include <linux/netfilter_ipv4.h>
|
|
#include <net/inet_ecn.h>
|
|
#include <net/ip.h>
|
|
#include <net/xfrm.h>
|
|
|
|
int xfrm4_rcv(struct sk_buff *skb)
|
|
{
|
|
return xfrm4_rcv_encap(skb, 0);
|
|
}
|
|
|
|
EXPORT_SYMBOL(xfrm4_rcv);
|
|
|
|
static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
|
|
{
|
|
struct iphdr *outer_iph = skb->nh.iph;
|
|
struct iphdr *inner_iph = skb->h.ipiph;
|
|
|
|
if (INET_ECN_is_ce(outer_iph->tos))
|
|
IP_ECN_set_ce(inner_iph);
|
|
}
|
|
|
|
static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
|
|
{
|
|
switch (nexthdr) {
|
|
case IPPROTO_IPIP:
|
|
*spi = skb->nh.iph->saddr;
|
|
*seq = 0;
|
|
return 0;
|
|
}
|
|
|
|
return xfrm_parse_spi(skb, nexthdr, spi, seq);
|
|
}
|
|
|
|
#ifdef CONFIG_NETFILTER
|
|
static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
|
|
{
|
|
struct iphdr *iph = skb->nh.iph;
|
|
|
|
if (skb->dst == NULL) {
|
|
if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
|
|
skb->dev))
|
|
goto drop;
|
|
}
|
|
return dst_input(skb);
|
|
drop:
|
|
kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
}
|
|
#endif
|
|
|
|
int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
|
|
{
|
|
int err;
|
|
u32 spi, seq;
|
|
struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
|
|
struct xfrm_state *x;
|
|
int xfrm_nr = 0;
|
|
int decaps = 0;
|
|
|
|
if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0)
|
|
goto drop;
|
|
|
|
do {
|
|
struct iphdr *iph = skb->nh.iph;
|
|
|
|
if (xfrm_nr == XFRM_MAX_DEPTH)
|
|
goto drop;
|
|
|
|
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, iph->protocol, AF_INET);
|
|
if (x == NULL)
|
|
goto drop;
|
|
|
|
spin_lock(&x->lock);
|
|
if (unlikely(x->km.state != XFRM_STATE_VALID))
|
|
goto drop_unlock;
|
|
|
|
if ((x->encap ? x->encap->encap_type : 0) != encap_type)
|
|
goto drop_unlock;
|
|
|
|
if (x->props.replay_window && xfrm_replay_check(x, seq))
|
|
goto drop_unlock;
|
|
|
|
if (xfrm_state_check_expire(x))
|
|
goto drop_unlock;
|
|
|
|
if (x->type->input(x, skb))
|
|
goto drop_unlock;
|
|
|
|
/* only the first xfrm gets the encap type */
|
|
encap_type = 0;
|
|
|
|
if (x->props.replay_window)
|
|
xfrm_replay_advance(x, seq);
|
|
|
|
x->curlft.bytes += skb->len;
|
|
x->curlft.packets++;
|
|
|
|
spin_unlock(&x->lock);
|
|
|
|
xfrm_vec[xfrm_nr++] = x;
|
|
|
|
iph = skb->nh.iph;
|
|
|
|
if (x->props.mode) {
|
|
if (iph->protocol != IPPROTO_IPIP)
|
|
goto drop;
|
|
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
|
goto drop;
|
|
if (skb_cloned(skb) &&
|
|
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
|
|
goto drop;
|
|
if (x->props.flags & XFRM_STATE_DECAP_DSCP)
|
|
ipv4_copy_dscp(iph, skb->h.ipiph);
|
|
if (!(x->props.flags & XFRM_STATE_NOECN))
|
|
ipip_ecn_decapsulate(skb);
|
|
skb->mac.raw = memmove(skb->data - skb->mac_len,
|
|
skb->mac.raw, skb->mac_len);
|
|
skb->nh.raw = skb->data;
|
|
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
|
|
decaps = 1;
|
|
break;
|
|
}
|
|
|
|
if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0)
|
|
goto drop;
|
|
} while (!err);
|
|
|
|
/* Allocate new secpath or COW existing one. */
|
|
|
|
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
|
|
struct sec_path *sp;
|
|
sp = secpath_dup(skb->sp);
|
|
if (!sp)
|
|
goto drop;
|
|
if (skb->sp)
|
|
secpath_put(skb->sp);
|
|
skb->sp = sp;
|
|
}
|
|
if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
|
|
goto drop;
|
|
|
|
memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec,
|
|
xfrm_nr * sizeof(xfrm_vec[0]));
|
|
skb->sp->len += xfrm_nr;
|
|
|
|
nf_reset(skb);
|
|
|
|
if (decaps) {
|
|
if (!(skb->dev->flags&IFF_LOOPBACK)) {
|
|
dst_release(skb->dst);
|
|
skb->dst = NULL;
|
|
}
|
|
netif_rx(skb);
|
|
return 0;
|
|
} else {
|
|
#ifdef CONFIG_NETFILTER
|
|
__skb_push(skb, skb->data - skb->nh.raw);
|
|
skb->nh.iph->tot_len = htons(skb->len);
|
|
ip_send_check(skb->nh.iph);
|
|
|
|
NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
|
|
xfrm4_rcv_encap_finish);
|
|
return 0;
|
|
#else
|
|
return -skb->nh.iph->protocol;
|
|
#endif
|
|
}
|
|
|
|
drop_unlock:
|
|
spin_unlock(&x->lock);
|
|
xfrm_state_put(x);
|
|
drop:
|
|
while (--xfrm_nr >= 0)
|
|
xfrm_state_put(xfrm_vec[xfrm_nr]);
|
|
|
|
kfree_skb(skb);
|
|
return 0;
|
|
}
|