mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
6f9dbadc1a
This patch fixes skb_shared area, which will be corrupted
upon reception of 4K jumbo packets.
Originally build_skb usage purpose was to reuse page for skb to eliminate
needs of extra fragments. But that logic does not take into account that
skb_shared_info should be reserved at the end of skb data area.
In case packet data consumes all the page (4K), skb_shinfo location
overflows the page. As a consequence, __build_skb zeroed shinfo data above
the allocated page, corrupting next page.
The issue is rarely seen in real life because jumbo are normally larger
than 4K and that causes another code path to trigger.
But it 100% reproducible with simple scapy packet, like:
sendp(IP(dst="192.168.100.3") / TCP(dport=443) \
/ Raw(RandString(size=(4096-40))), iface="enp1s0")
Fixes: 018423e90b
("net: ethernet: aquantia: Add ring support code")
Reported-by: Friedemann Gerold <f.gerold@b-c-s.de>
Reported-by: Michael Rauch <michael@rauch.be>
Signed-off-by: Friedemann Gerold <f.gerold@b-c-s.de>
Tested-by: Nikita Danilov <nikita.danilov@aquantia.com>
Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
373 lines
8.0 KiB
C
373 lines
8.0 KiB
C
/*
|
|
* aQuantia Corporation Network Driver
|
|
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*/
|
|
|
|
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
|
|
|
|
#include "aq_ring.h"
|
|
#include "aq_nic.h"
|
|
#include "aq_hw.h"
|
|
|
|
#include <linux/netdevice.h>
|
|
#include <linux/etherdevice.h>
|
|
|
|
static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
|
|
struct aq_nic_s *aq_nic)
|
|
{
|
|
int err = 0;
|
|
|
|
self->buff_ring =
|
|
kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
|
|
|
|
if (!self->buff_ring) {
|
|
err = -ENOMEM;
|
|
goto err_exit;
|
|
}
|
|
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
|
|
self->size * self->dx_size,
|
|
&self->dx_ring_pa, GFP_KERNEL);
|
|
if (!self->dx_ring) {
|
|
err = -ENOMEM;
|
|
goto err_exit;
|
|
}
|
|
|
|
err_exit:
|
|
if (err < 0) {
|
|
aq_ring_free(self);
|
|
self = NULL;
|
|
}
|
|
return self;
|
|
}
|
|
|
|
struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
|
|
struct aq_nic_s *aq_nic,
|
|
unsigned int idx,
|
|
struct aq_nic_cfg_s *aq_nic_cfg)
|
|
{
|
|
int err = 0;
|
|
|
|
self->aq_nic = aq_nic;
|
|
self->idx = idx;
|
|
self->size = aq_nic_cfg->txds;
|
|
self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
|
|
|
|
self = aq_ring_alloc(self, aq_nic);
|
|
if (!self) {
|
|
err = -ENOMEM;
|
|
goto err_exit;
|
|
}
|
|
|
|
err_exit:
|
|
if (err < 0) {
|
|
aq_ring_free(self);
|
|
self = NULL;
|
|
}
|
|
return self;
|
|
}
|
|
|
|
struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
|
|
struct aq_nic_s *aq_nic,
|
|
unsigned int idx,
|
|
struct aq_nic_cfg_s *aq_nic_cfg)
|
|
{
|
|
int err = 0;
|
|
|
|
self->aq_nic = aq_nic;
|
|
self->idx = idx;
|
|
self->size = aq_nic_cfg->rxds;
|
|
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
|
|
|
|
self = aq_ring_alloc(self, aq_nic);
|
|
if (!self) {
|
|
err = -ENOMEM;
|
|
goto err_exit;
|
|
}
|
|
|
|
err_exit:
|
|
if (err < 0) {
|
|
aq_ring_free(self);
|
|
self = NULL;
|
|
}
|
|
return self;
|
|
}
|
|
|
|
int aq_ring_init(struct aq_ring_s *self)
|
|
{
|
|
self->hw_head = 0;
|
|
self->sw_head = 0;
|
|
self->sw_tail = 0;
|
|
return 0;
|
|
}
|
|
|
|
static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
|
|
unsigned int t)
|
|
{
|
|
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
|
|
}
|
|
|
|
void aq_ring_update_queue_state(struct aq_ring_s *ring)
|
|
{
|
|
if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
|
|
aq_ring_queue_stop(ring);
|
|
else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
|
|
aq_ring_queue_wake(ring);
|
|
}
|
|
|
|
void aq_ring_queue_wake(struct aq_ring_s *ring)
|
|
{
|
|
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
|
|
|
|
if (__netif_subqueue_stopped(ndev, ring->idx)) {
|
|
netif_wake_subqueue(ndev, ring->idx);
|
|
ring->stats.tx.queue_restarts++;
|
|
}
|
|
}
|
|
|
|
void aq_ring_queue_stop(struct aq_ring_s *ring)
|
|
{
|
|
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
|
|
|
|
if (!__netif_subqueue_stopped(ndev, ring->idx))
|
|
netif_stop_subqueue(ndev, ring->idx);
|
|
}
|
|
|
|
bool aq_ring_tx_clean(struct aq_ring_s *self)
|
|
{
|
|
struct device *dev = aq_nic_get_dev(self->aq_nic);
|
|
unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
|
|
|
|
for (; self->sw_head != self->hw_head && budget--;
|
|
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
|
|
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
|
|
|
|
if (likely(buff->is_mapped)) {
|
|
if (unlikely(buff->is_sop)) {
|
|
if (!buff->is_eop &&
|
|
buff->eop_index != 0xffffU &&
|
|
(!aq_ring_dx_in_range(self->sw_head,
|
|
buff->eop_index,
|
|
self->hw_head)))
|
|
break;
|
|
|
|
dma_unmap_single(dev, buff->pa, buff->len,
|
|
DMA_TO_DEVICE);
|
|
} else {
|
|
dma_unmap_page(dev, buff->pa, buff->len,
|
|
DMA_TO_DEVICE);
|
|
}
|
|
}
|
|
|
|
if (unlikely(buff->is_eop))
|
|
dev_kfree_skb_any(buff->skb);
|
|
|
|
buff->pa = 0U;
|
|
buff->eop_index = 0xffffU;
|
|
}
|
|
|
|
return !!budget;
|
|
}
|
|
|
|
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
|
|
int aq_ring_rx_clean(struct aq_ring_s *self,
|
|
struct napi_struct *napi,
|
|
int *work_done,
|
|
int budget)
|
|
{
|
|
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
|
|
int err = 0;
|
|
bool is_rsc_completed = true;
|
|
|
|
for (; (self->sw_head != self->hw_head) && budget;
|
|
self->sw_head = aq_ring_next_dx(self, self->sw_head),
|
|
--budget, ++(*work_done)) {
|
|
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
|
|
struct sk_buff *skb = NULL;
|
|
unsigned int next_ = 0U;
|
|
unsigned int i = 0U;
|
|
struct aq_ring_buff_s *buff_ = NULL;
|
|
|
|
if (buff->is_error) {
|
|
__free_pages(buff->page, 0);
|
|
continue;
|
|
}
|
|
|
|
if (buff->is_cleaned)
|
|
continue;
|
|
|
|
if (!buff->is_eop) {
|
|
for (next_ = buff->next,
|
|
buff_ = &self->buff_ring[next_]; true;
|
|
next_ = buff_->next,
|
|
buff_ = &self->buff_ring[next_]) {
|
|
is_rsc_completed =
|
|
aq_ring_dx_in_range(self->sw_head,
|
|
next_,
|
|
self->hw_head);
|
|
|
|
if (unlikely(!is_rsc_completed)) {
|
|
is_rsc_completed = false;
|
|
break;
|
|
}
|
|
|
|
if (buff_->is_eop)
|
|
break;
|
|
}
|
|
|
|
if (!is_rsc_completed) {
|
|
err = 0;
|
|
goto err_exit;
|
|
}
|
|
}
|
|
|
|
/* for single fragment packets use build_skb() */
|
|
if (buff->is_eop &&
|
|
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
|
|
skb = build_skb(page_address(buff->page),
|
|
AQ_CFG_RX_FRAME_MAX);
|
|
if (unlikely(!skb)) {
|
|
err = -ENOMEM;
|
|
goto err_exit;
|
|
}
|
|
|
|
skb_put(skb, buff->len);
|
|
} else {
|
|
skb = netdev_alloc_skb(ndev, ETH_HLEN);
|
|
if (unlikely(!skb)) {
|
|
err = -ENOMEM;
|
|
goto err_exit;
|
|
}
|
|
skb_put(skb, ETH_HLEN);
|
|
memcpy(skb->data, page_address(buff->page), ETH_HLEN);
|
|
|
|
skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
|
|
buff->len - ETH_HLEN,
|
|
SKB_TRUESIZE(buff->len - ETH_HLEN));
|
|
|
|
if (!buff->is_eop) {
|
|
for (i = 1U, next_ = buff->next,
|
|
buff_ = &self->buff_ring[next_];
|
|
true; next_ = buff_->next,
|
|
buff_ = &self->buff_ring[next_], ++i) {
|
|
skb_add_rx_frag(skb, i,
|
|
buff_->page, 0,
|
|
buff_->len,
|
|
SKB_TRUESIZE(buff->len -
|
|
ETH_HLEN));
|
|
buff_->is_cleaned = 1;
|
|
|
|
if (buff_->is_eop)
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
skb->protocol = eth_type_trans(skb, ndev);
|
|
if (unlikely(buff->is_cso_err)) {
|
|
++self->stats.rx.errors;
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
} else {
|
|
if (buff->is_ip_cso) {
|
|
__skb_incr_checksum_unnecessary(skb);
|
|
if (buff->is_udp_cso || buff->is_tcp_cso)
|
|
__skb_incr_checksum_unnecessary(skb);
|
|
} else {
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
}
|
|
}
|
|
|
|
skb_set_hash(skb, buff->rss_hash,
|
|
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
|
|
PKT_HASH_TYPE_NONE);
|
|
|
|
skb_record_rx_queue(skb, self->idx);
|
|
|
|
++self->stats.rx.packets;
|
|
self->stats.rx.bytes += skb->len;
|
|
|
|
napi_gro_receive(napi, skb);
|
|
}
|
|
|
|
err_exit:
|
|
return err;
|
|
}
|
|
|
|
int aq_ring_rx_fill(struct aq_ring_s *self)
|
|
{
|
|
unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
|
|
(AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
|
|
struct aq_ring_buff_s *buff = NULL;
|
|
int err = 0;
|
|
int i = 0;
|
|
|
|
for (i = aq_ring_avail_dx(self); i--;
|
|
self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
|
|
buff = &self->buff_ring[self->sw_tail];
|
|
|
|
buff->flags = 0U;
|
|
buff->len = AQ_CFG_RX_FRAME_MAX;
|
|
|
|
buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
|
|
if (!buff->page) {
|
|
err = -ENOMEM;
|
|
goto err_exit;
|
|
}
|
|
|
|
buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
|
|
buff->page, 0,
|
|
AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
|
|
err = -ENOMEM;
|
|
goto err_exit;
|
|
}
|
|
|
|
buff = NULL;
|
|
}
|
|
|
|
err_exit:
|
|
if (err < 0) {
|
|
if (buff && buff->page)
|
|
__free_pages(buff->page, 0);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
void aq_ring_rx_deinit(struct aq_ring_s *self)
|
|
{
|
|
if (!self)
|
|
goto err_exit;
|
|
|
|
for (; self->sw_head != self->sw_tail;
|
|
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
|
|
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
|
|
|
|
dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
|
|
AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
|
|
|
|
__free_pages(buff->page, 0);
|
|
}
|
|
|
|
err_exit:;
|
|
}
|
|
|
|
void aq_ring_free(struct aq_ring_s *self)
|
|
{
|
|
if (!self)
|
|
goto err_exit;
|
|
|
|
kfree(self->buff_ring);
|
|
|
|
if (self->dx_ring)
|
|
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
|
|
self->size * self->dx_size, self->dx_ring,
|
|
self->dx_ring_pa);
|
|
|
|
err_exit:;
|
|
}
|