mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-13 23:47:04 +07:00
3dca3f38cf
We change the ESP GSO handlers to only segment the packets. The ESP handling and encryption is defered to validate_xmit_xfrm() where this is done for non GRO packets too. This makes the code more robust and prepares for asynchronous crypto handling. Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
280 lines
6.1 KiB
C
280 lines
6.1 KiB
C
/*
|
|
* xfrm_device.c - IPsec device offloading code.
|
|
*
|
|
* Copyright (c) 2015 secunet Security Networks AG
|
|
*
|
|
* Author:
|
|
* Steffen Klassert <steffen.klassert@secunet.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/module.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/spinlock.h>
|
|
#include <net/dst.h>
|
|
#include <net/xfrm.h>
|
|
#include <linux/notifier.h>
|
|
|
|
#ifdef CONFIG_XFRM_OFFLOAD
|
|
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
|
|
{
|
|
int err;
|
|
__u32 seq;
|
|
struct xfrm_state *x;
|
|
struct sk_buff *skb2;
|
|
netdev_features_t esp_features = features;
|
|
struct xfrm_offload *xo = xfrm_offload(skb);
|
|
|
|
if (!xo)
|
|
return skb;
|
|
|
|
if (!(features & NETIF_F_HW_ESP))
|
|
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
|
|
|
|
x = skb->sp->xvec[skb->sp->len - 1];
|
|
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
|
|
return skb;
|
|
|
|
if (skb_is_gso(skb)) {
|
|
struct net_device *dev = skb->dev;
|
|
|
|
if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
|
|
struct sk_buff *segs;
|
|
|
|
/* Packet got rerouted, fixup features and segment it. */
|
|
esp_features = esp_features & ~(NETIF_F_HW_ESP
|
|
| NETIF_F_GSO_ESP);
|
|
|
|
segs = skb_gso_segment(skb, esp_features);
|
|
if (IS_ERR(segs)) {
|
|
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
|
|
kfree_skb(skb);
|
|
return NULL;
|
|
} else {
|
|
consume_skb(skb);
|
|
skb = segs;
|
|
}
|
|
} else {
|
|
return skb;
|
|
}
|
|
}
|
|
|
|
if (!skb->next) {
|
|
x->outer_mode->xmit(x, skb);
|
|
|
|
err = x->type_offload->xmit(x, skb, esp_features);
|
|
if (err) {
|
|
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
|
|
kfree_skb(skb);
|
|
return NULL;
|
|
}
|
|
|
|
skb_push(skb, skb->data - skb_mac_header(skb));
|
|
|
|
return skb;
|
|
}
|
|
|
|
skb2 = skb;
|
|
seq = xo->seq.low;
|
|
|
|
do {
|
|
struct sk_buff *nskb = skb2->next;
|
|
|
|
xo = xfrm_offload(skb2);
|
|
xo->flags |= XFRM_GSO_SEGMENT;
|
|
xo->seq.low = seq;
|
|
xo->seq.hi = xfrm_replay_seqhi(x, seq);
|
|
|
|
if(!(features & NETIF_F_HW_ESP))
|
|
xo->flags |= CRYPTO_FALLBACK;
|
|
|
|
x->outer_mode->xmit(x, skb2);
|
|
|
|
err = x->type_offload->xmit(x, skb2, esp_features);
|
|
if (err) {
|
|
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
|
|
skb2->next = nskb;
|
|
kfree_skb_list(skb2);
|
|
return NULL;
|
|
}
|
|
|
|
if (!skb_is_gso(skb2))
|
|
seq++;
|
|
else
|
|
seq += skb_shinfo(skb2)->gso_segs;
|
|
|
|
skb_push(skb2, skb2->data - skb_mac_header(skb2));
|
|
|
|
skb2 = nskb;
|
|
} while (skb2);
|
|
|
|
return skb;
|
|
}
|
|
EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
|
|
|
|
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
|
|
struct xfrm_user_offload *xuo)
|
|
{
|
|
int err;
|
|
struct dst_entry *dst;
|
|
struct net_device *dev;
|
|
struct xfrm_state_offload *xso = &x->xso;
|
|
xfrm_address_t *saddr;
|
|
xfrm_address_t *daddr;
|
|
|
|
if (!x->type_offload)
|
|
return -EINVAL;
|
|
|
|
/* We don't yet support UDP encapsulation, TFC padding and ESN. */
|
|
if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN))
|
|
return -EINVAL;
|
|
|
|
dev = dev_get_by_index(net, xuo->ifindex);
|
|
if (!dev) {
|
|
if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
|
|
saddr = &x->props.saddr;
|
|
daddr = &x->id.daddr;
|
|
} else {
|
|
saddr = &x->id.daddr;
|
|
daddr = &x->props.saddr;
|
|
}
|
|
|
|
dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
|
|
x->props.family, x->props.output_mark);
|
|
if (IS_ERR(dst))
|
|
return 0;
|
|
|
|
dev = dst->dev;
|
|
|
|
dev_hold(dev);
|
|
dst_release(dst);
|
|
}
|
|
|
|
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
|
|
xso->dev = NULL;
|
|
dev_put(dev);
|
|
return 0;
|
|
}
|
|
|
|
xso->dev = dev;
|
|
xso->num_exthdrs = 1;
|
|
xso->flags = xuo->flags;
|
|
|
|
err = dev->xfrmdev_ops->xdo_dev_state_add(x);
|
|
if (err) {
|
|
dev_put(dev);
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
|
|
|
|
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
|
|
{
|
|
int mtu;
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
|
struct net_device *dev = x->xso.dev;
|
|
|
|
if (!x->type_offload || x->encap)
|
|
return false;
|
|
|
|
if ((x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev)) &&
|
|
!xdst->child->xfrm && x->type->get_mtu) {
|
|
mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
|
|
|
|
if (skb->len <= mtu)
|
|
goto ok;
|
|
|
|
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
|
|
goto ok;
|
|
}
|
|
|
|
return false;
|
|
|
|
ok:
|
|
if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
|
|
return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
|
|
#endif
|
|
|
|
static int xfrm_dev_register(struct net_device *dev)
|
|
{
|
|
if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
|
|
return NOTIFY_BAD;
|
|
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
|
|
!(dev->features & NETIF_F_HW_ESP))
|
|
return NOTIFY_BAD;
|
|
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static int xfrm_dev_unregister(struct net_device *dev)
|
|
{
|
|
xfrm_policy_cache_flush();
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static int xfrm_dev_feat_change(struct net_device *dev)
|
|
{
|
|
if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
|
|
return NOTIFY_BAD;
|
|
else if (!(dev->features & NETIF_F_HW_ESP))
|
|
dev->xfrmdev_ops = NULL;
|
|
|
|
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
|
|
!(dev->features & NETIF_F_HW_ESP))
|
|
return NOTIFY_BAD;
|
|
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static int xfrm_dev_down(struct net_device *dev)
|
|
{
|
|
if (dev->features & NETIF_F_HW_ESP)
|
|
xfrm_dev_state_flush(dev_net(dev), dev, true);
|
|
|
|
xfrm_policy_cache_flush();
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
|
|
{
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
switch (event) {
|
|
case NETDEV_REGISTER:
|
|
return xfrm_dev_register(dev);
|
|
|
|
case NETDEV_UNREGISTER:
|
|
return xfrm_dev_unregister(dev);
|
|
|
|
case NETDEV_FEAT_CHANGE:
|
|
return xfrm_dev_feat_change(dev);
|
|
|
|
case NETDEV_DOWN:
|
|
return xfrm_dev_down(dev);
|
|
}
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static struct notifier_block xfrm_dev_notifier = {
|
|
.notifier_call = xfrm_dev_event,
|
|
};
|
|
|
|
void __net_init xfrm_dev_init(void)
|
|
{
|
|
register_netdevice_notifier(&xfrm_dev_notifier);
|
|
}
|