backport to 4.4.180

Signed-off-by: Jim Ma <majinjing3@gmail.com>
This commit is contained in:
Jim Ma 2022-10-26 03:28:02 +08:00
parent d799b8ebf3
commit 4aa3b7c57e
8 changed files with 693 additions and 192 deletions

8
.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
.vscode
*.o
*.o.cmd
*.ko
*.ko.cmd
.tmp_versions
igc.mod.c
modules.order

View File

@ -5,7 +5,8 @@
# Intel(R) I225-LM/I225-V 2.5G Ethernet Controller
#
obj-$(CONFIG_IGC) += igc.o
obj-m = igc.o
# TODO enable ethtool api
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o
igc_diag.o igc_ptp.o igc_dump.o igc_tsn.o

181
backport.h Normal file
View File

@ -0,0 +1,181 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Jim Ma */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/if_vlan.h>
#include <linux/aer.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
#include <net/pkt_sched.h>
#include <net/ipv6.h>
static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
return skb->head + skb->csum_start;
}
static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
{
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}
static inline void net_prefetch(void *p)
{
prefetch(p);
#if L1_CACHE_BYTES < 128
prefetch((u8 *)p + L1_CACHE_BYTES);
#endif
}
static inline bool dev_page_is_reusable(struct page *page)
{
return likely(page_to_nid(page) == numa_mem_id() &&
!page_is_pfmemalloc(page));
}
/**
* refcount_read - get a refcount's value
* @r: the refcount
*
* Return: the refcount's value
*/
static inline unsigned int refcount_read(atomic_t *r)
{
return atomic_read(r);
}
// static inline __must_check bool __refcount_sub_and_test(int i, atomic_t *r, int *oldp)
// {
// // int old = atomic_fetch_sub_release(i, r);
// // if (oldp)
// // *oldp = old;
// // if (old == i) {
// // smp_acquire__after_ctrl_dep();
// // return true;
// // }
// // if (unlikely(old < 0 || old - i < 0))
// // refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
// return false;
// }
/**
* refcount_sub_and_test - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
* Similar to atomic_dec_and_test(), but it will WARN, return false and
* ultimately leak on underflow and will fail to decrement when saturated
* at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_dec(), or one of its variants, should instead be used to
* decrement a reference count.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
// static inline __must_check bool refcount_sub_and_test(int i, atomic_t *r)
// {
// return __refcount_sub_and_test(i, r, NULL);
// }
// static inline __must_check bool __refcount_dec_and_test(atomic_t *r, int *oldp)
// {
// return __refcount_sub_and_test(1, r, oldp);
// }
/**
* refcount_dec_and_test - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
* decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
// static inline __must_check bool refcount_dec_and_test(atomic_t *r)
// {
// return __refcount_dec_and_test(r, NULL);
// }
/**
* skb_unref - decrement the skb's reference count
* @skb: buffer
*
* Returns true if we can free the skb.
*/
// static inline bool skb_unref(struct sk_buff *skb)
// {
// if (unlikely(!skb))
// return false;
// if (likely(atomic_read(&skb->users) == 1))
// smp_rmb();
// else if (likely(!refcount_dec_and_test(&skb->users)))
// return false;
// return true;
// }
// static void skb_release_all(struct sk_buff *skb)
// {
// skb_release_head_state(skb);
// if (likely(skb->head))
// skb_release_data(skb);
// }
// static void napi_consume_skb(struct sk_buff *skb, int budget)
// {
// /* Zero budget indicate non-NAPI context called us, like netpoll */
// if (unlikely(!budget)) {
// dev_consume_skb_any(skb);
// return;
// }
// // lockdep_assert_in_softirq();
// if (!skb_unref(skb))
// return;
// /* if reaching here SKB is ready to free */
// trace_consume_skb(skb);
// /* if SKB is a clone, don't handle this case */
// if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
// __kfree_skb(skb);
// return;
// }
// skb_release_all(skb);
// napi_skb_cache_put(skb);
// }
static inline int
pci_request_mem_regions(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM), name);
}
static inline void
pci_release_mem_regions(struct pci_dev *pdev)
{
return pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
}

278
backport_overflow.h Normal file
View File

@ -0,0 +1,278 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
#ifndef __LINUX_OVERFLOW_H
#define __LINUX_OVERFLOW_H
#include <linux/compiler.h>
/*
* In the fallback code below, we need to compute the minimum and
* maximum values representable in a given type. These macros may also
* be useful elsewhere, so we provide them outside the
* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
*
* It would seem more obvious to do something like
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
*
* Unfortunately, the middle expressions, strictly speaking, have
* undefined behaviour, and at least some versions of gcc warn about
* the type_max expression (but not if -fsanitize=undefined is in
* effect; in that case, the warning is deferred to runtime...).
*
* The slightly excessive casting in type_min is to make sure the
* macros also produce sensible values for the exotic type _Bool. [The
* overflow checkers only almost work for _Bool, but that's
* a-feature-not-a-bug, since people shouldn't be doing arithmetic on
* _Bools. Besides, the gcc builtins don't allow _Bool* as third
* argument.]
*
* Idea stolen from
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
/*
* For simplicity and code hygiene, the fallback code below insists on
* a, b and *d having the same type (similar to the min() and max()
* macros), whereas gcc's type-generic overflow checkers accept
* different types. Hence we don't just make check_add_overflow an
* alias for __builtin_add_overflow, but add type checks similar to
* below.
*/
#define check_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_add_overflow(__a, __b, __d); \
})
#define check_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_sub_overflow(__a, __b, __d); \
})
#define check_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_mul_overflow(__a, __b, __d); \
})
#else
/* Checking for unsigned overflow is relatively easy without causing UB. */
#define __unsigned_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a + __b; \
*__d < __a; \
})
#define __unsigned_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a - __b; \
__a < __b; \
})
/*
* If one of a or b is a compile-time constant, this avoids a division.
*/
#define __unsigned_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a * __b; \
__builtin_constant_p(__b) ? \
__b > 0 && __a > type_max(typeof(__a)) / __b : \
__a > 0 && __b > type_max(typeof(__b)) / __a; \
})
/*
* For signed types, detecting overflow is much harder, especially if
* we want to avoid UB. But the interface of these macros is such that
* we must provide a result in *d, and in fact we must produce the
* result promised by gcc's builtins, which is simply the possibly
* wrapped-around value. Fortunately, we can just formally do the
* operations in the widest relevant unsigned type (u64) and then
* truncate the result - gcc is smart enough to generate the same code
* with and without the (u64) casts.
*/
/*
* Adding two signed integers can overflow only if they have the same
* sign, and overflow has happened iff the result has the opposite
* sign.
*/
#define __signed_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a + (u64)__b; \
(((~(__a ^ __b)) & (*__d ^ __a)) \
& type_min(typeof(__a))) != 0; \
})
/*
* Subtraction is similar, except that overflow can now happen only
* when the signs are opposite. In this case, overflow has happened if
* the result has the opposite sign of a.
*/
#define __signed_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a - (u64)__b; \
((((__a ^ __b)) & (*__d ^ __a)) \
& type_min(typeof(__a))) != 0; \
})
/*
* Signed multiplication is rather hard. gcc always follows C99, so
* division is truncated towards 0. This means that we can write the
* overflow check like this:
*
* (a > 0 && (b > MAX/a || b < MIN/a)) ||
* (a < -1 && (b > MIN/a || b < MAX/a) ||
* (a == -1 && b == MIN)
*
* The redundant casts of -1 are to silence an annoying -Wtype-limits
* (included in -Wextra) warning: When the type is u8 or u16, the
* __b_c_e in check_mul_overflow obviously selects
* __unsigned_mul_overflow, but unfortunately gcc still parses this
* code and warns about the limited range of __b.
*/
#define __signed_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
typeof(a) __tmax = type_max(typeof(a)); \
typeof(a) __tmin = type_min(typeof(a)); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a * (u64)__b; \
(__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
(__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
(__b == (typeof(__b))-1 && __a == __tmin); \
})
#define check_add_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_add_overflow(a, b, d), \
__unsigned_add_overflow(a, b, d))
#define check_sub_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_sub_overflow(a, b, d), \
__unsigned_sub_overflow(a, b, d))
#define check_mul_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_mul_overflow(a, b, d), \
__unsigned_mul_overflow(a, b, d))
#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
/**
* array_size() - Calculate size of 2-dimensional array.
*
* @a: dimension one
* @b: dimension two
*
* Calculates size of 2-dimensional array: @a * @b.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
static inline __must_check size_t array_size(size_t a, size_t b)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* array3_size() - Calculate size of 3-dimensional array.
*
* @a: dimension one
* @b: dimension two
* @c: dimension three
*
* Calculates size of 3-dimensional array: @a * @b * @c.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
if (check_mul_overflow(bytes, c, &bytes))
return SIZE_MAX;
return bytes;
}
static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
{
size_t bytes;
if (check_mul_overflow(n, size, &bytes))
return SIZE_MAX;
if (check_add_overflow(bytes, c, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* struct_size() - Calculate size of structure with trailing array.
* @p: Pointer to the structure.
* @member: Name of the array member.
* @n: Number of elements in the array.
*
* Calculates size of memory needed for structure @p followed by an
* array of @n @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define struct_size(p, member, n) \
__ab_c_size(n, \
sizeof(*(p)->member) + __must_be_array((p)->member),\
sizeof(*(p)))
#endif /* __LINUX_OVERFLOW_H */

View File

@ -8,6 +8,7 @@
#include "igc.h"
#include "igc_diag.h"
#include "backport_overflow.h"
/* forward declaration */
struct igc_stats {
@ -16,6 +17,14 @@ struct igc_stats {
int stat_offset;
};
/**
* sizeof_field(TYPE, MEMBER)
*
* @TYPE: The structure containing the field of interest
* @MEMBER: The field to return the size of
*/
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
#define IGC_STAT(_name, _stat) { \
.stat_string = _name, \
.sizeof_stat = sizeof_field(struct igc_adapter, _stat), \
@ -380,7 +389,7 @@ static int igc_ethtool_set_wol(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))

View File

@ -16,6 +16,8 @@
#include "igc.h"
#include "igc_hw.h"
#include "igc_tsn.h"
#include "backport.h"
#include "backport_overflow.h"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
@ -362,13 +364,11 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
DMA_FROM_DEVICE);
/* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev,
dma_unmap_page(rx_ring->dev,
buffer_info->dma,
igc_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IGC_RX_DMA_ATTR);
__page_frag_cache_drain(buffer_info->page,
buffer_info->pagecnt_bias);
DMA_FROM_DEVICE);
__free_page(buffer_info->page);
i++;
if (i == rx_ring->count)
@ -629,6 +629,29 @@ static void igc_configure_tx(struct igc_adapter *adapter)
igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
// FIXME copy from igc_ethtool.c, remove this function after enable ethtool api
void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 reg = IGC_RETA(0);
u32 shift = 0;
int i = 0;
while (i < IGC_RETA_SIZE) {
u32 val = 0;
int j;
for (j = 3; j >= 0; j--) {
val <<= 8;
val |= adapter->rss_indir_tbl[i + j];
}
wr32(reg, val << shift);
reg += 4;
i += 4;
}
}
/**
* igc_setup_mrqc - configure the multiple receive queue control registers
* @adapter: Board private structure
@ -897,6 +920,7 @@ static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
{
ktime_t cycle_time = adapter->cycle_time;
ktime_t base_time = adapter->base_time;
ktime_t sub_time = ktime_sub(txtime, base_time);
u32 launchtime;
/* FIXME: when using ETF together with taprio, we may have a
@ -905,7 +929,7 @@ static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
* IGC_BASET, as the value writen into the launchtime
* descriptor field may be misinterpreted.
*/
div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
div_s64_rem(sub_time.tv64, cycle_time.tv64, &launchtime);
return cpu_to_le32(launchtime);
}
@ -966,16 +990,17 @@ csum_failed:
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
fallthrough;
break;
case offsetof(struct udphdr, check):
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
if (skb_csum_is_sctp(skb)) {
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
break;
}
fallthrough;
// if (skb->csum_not_inet) {
// type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
// break;
// }
skb_checksum_help(skb);
goto csum_failed;
default:
skb_checksum_help(skb);
goto csum_failed;
@ -1173,7 +1198,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
}
@ -1596,7 +1621,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGC_RX_HDR_LEN)
headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
headlen = eth_get_headlen(va, IGC_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@ -1659,7 +1684,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
if (unlikely(atomic_read(&page->_count) - pagecnt_bias > 1))
return false;
#else
#define IGC_LAST_OFFSET \
@ -1674,7 +1699,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
* number of references the driver holds.
*/
if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
atomic_add(USHRT_MAX, &page->_count);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@ -1752,11 +1777,10 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring,
/* We are not reusing the buffer so unmap it and free
* any references we are holding to it
*/
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
IGC_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
igc_rx_pg_size(rx_ring),
DMA_FROM_DEVICE);
__free_page(rx_buffer->page);
}
/* clear contents of rx_buffer */
@ -1786,10 +1810,9 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
}
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
dma = dma_map_page(rx_ring->dev, page, 0,
igc_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IGC_RX_DMA_ATTR);
DMA_FROM_DEVICE);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@ -2016,7 +2039,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget);
dev_consume_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@ -3291,10 +3314,10 @@ static int igc_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
napi_complete_done(napi, work_done);
igc_ring_irq_enable(q_vector);
return min(work_done, budget - 1);
return 0;
}
/**
@ -3771,7 +3794,7 @@ void igc_down(struct igc_adapter *adapter)
/* flush and sleep below */
/* set trans_start so we don't get spurious watchdogs during reset */
netif_trans_update(netdev);
netdev->trans_start = jiffies;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
@ -3951,23 +3974,11 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
mac_hdr_len = skb_network_header(skb) - skb->data;
if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
NETIF_F_HW_VLAN_CTAG_TX);
network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_TSO |
NETIF_F_TSO6);
/* We can only support IPv4 TSO in tunnels if we can mangle the
* inner IP ID field, so strip TSO if MANGLEID is not supported.
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
features &= ~NETIF_F_TSO;
return features & ~(NETIF_F_HW_CSUM);
return features;
}
@ -4683,10 +4694,10 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
ring = adapter->tx_ring[queue];
ring->launchtime_enable = enable;
if (adapter->base_time)
if (adapter->base_time.tv64)
return 0;
adapter->cycle_time = NSEC_PER_SEC;
adapter->cycle_time.tv64 = NSEC_PER_SEC;
for (i = 0; i < adapter->num_tx_queues; i++) {
ring = adapter->tx_ring[i];
@ -4706,143 +4717,143 @@ static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
return timespec64_compare(now, &b) > 0;
}
static bool validate_schedule(struct igc_adapter *adapter,
const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
struct timespec64 now;
size_t n;
// static bool validate_schedule(struct igc_adapter *adapter,
// const struct tc_taprio_qopt_offload *qopt)
// {
// int queue_uses[IGC_MAX_TX_QUEUES] = { };
// struct timespec64 now;
// size_t n;
if (qopt->cycle_time_extension)
return false;
// if (qopt->cycle_time_extension)
// return false;
igc_ptp_read(adapter, &now);
// igc_ptp_read(adapter, &now);
/* If we program the controller's BASET registers with a time
* in the future, it will hold all the packets until that
* time, causing a lot of TX Hangs, so to avoid that, we
* reject schedules that would start in the future.
*/
if (!is_base_time_past(qopt->base_time, &now))
return false;
// /* If we program the controller's BASET registers with a time
// * in the future, it will hold all the packets until that
// * time, causing a lot of TX Hangs, so to avoid that, we
// * reject schedules that would start in the future.
// */
// if (!is_base_time_past(qopt->base_time, &now))
// return false;
for (n = 0; n < qopt->num_entries; n++) {
const struct tc_taprio_sched_entry *e;
int i;
// for (n = 0; n < qopt->num_entries; n++) {
// const struct tc_taprio_sched_entry *e;
// int i;
e = &qopt->entries[n];
// e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
* settings.
*/
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
// /* i225 only supports "global" frame preemption
// * settings.
// */
// if (e->command != TC_TAPRIO_CMD_SET_GATES)
// return false;
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
if (e->gate_mask & BIT(i))
queue_uses[i]++;
// for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
// if (e->gate_mask & BIT(i))
// queue_uses[i]++;
if (queue_uses[i] > 1)
return false;
}
}
// if (queue_uses[i] > 1)
// return false;
// }
// }
return true;
}
// return true;
// }
static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
struct tc_etf_qopt_offload *qopt)
{
struct igc_hw *hw = &adapter->hw;
int err;
// static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
// struct tc_etf_qopt_offload *qopt)
// {
// struct igc_hw *hw = &adapter->hw;
// int err;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
// if (hw->mac.type != igc_i225)
// return -EOPNOTSUPP;
err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
if (err)
return err;
// err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
// if (err)
// return err;
return igc_tsn_offload_apply(adapter);
}
// return igc_tsn_offload_apply(adapter);
// }
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
u32 start_time = 0, end_time = 0;
size_t n;
// static int igc_save_qbv_schedule(struct igc_adapter *adapter,
// struct tc_taprio_qopt_offload *qopt)
// {
// u32 start_time = 0, end_time = 0;
// size_t n;
if (!qopt->enable) {
adapter->base_time = 0;
return 0;
}
// if (!qopt->enable) {
// adapter->base_time = 0;
// return 0;
// }
if (adapter->base_time)
return -EALREADY;
// if (adapter->base_time)
// return -EALREADY;
if (!validate_schedule(adapter, qopt))
return -EINVAL;
// if (!validate_schedule(adapter, qopt))
// return -EINVAL;
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
// adapter->cycle_time = qopt->cycle_time;
// adapter->base_time = qopt->base_time;
/* FIXME: be a little smarter about cases when the gate for a
* queue stays open for more than one entry.
*/
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
int i;
// /* FIXME: be a little smarter about cases when the gate for a
// * queue stays open for more than one entry.
// */
// for (n = 0; n < qopt->num_entries; n++) {
// struct tc_taprio_sched_entry *e = &qopt->entries[n];
// int i;
end_time += e->interval;
// end_time += e->interval;
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
// for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
// struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i)))
continue;
// if (!(e->gate_mask & BIT(i)))
// continue;
ring->start_time = start_time;
ring->end_time = end_time;
}
// ring->start_time = start_time;
// ring->end_time = end_time;
// }
start_time += e->interval;
}
// start_time += e->interval;
// }
return 0;
}
// return 0;
// }
static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
struct igc_hw *hw = &adapter->hw;
int err;
// static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
// struct tc_taprio_qopt_offload *qopt)
// {
// struct igc_hw *hw = &adapter->hw;
// int err;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
// if (hw->mac.type != igc_i225)
// return -EOPNOTSUPP;
err = igc_save_qbv_schedule(adapter, qopt);
if (err)
return err;
// err = igc_save_qbv_schedule(adapter, qopt);
// if (err)
// return err;
return igc_tsn_offload_apply(adapter);
}
// return igc_tsn_offload_apply(adapter);
// }
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct igc_adapter *adapter = netdev_priv(dev);
// static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
// void *type_data)
// {
// struct igc_adapter *adapter = netdev_priv(dev);
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return igc_tsn_enable_qbv_scheduling(adapter, type_data);
// switch (type) {
// case TC_SETUP_QDISC_TAPRIO:
// return igc_tsn_enable_qbv_scheduling(adapter, type_data);
case TC_SETUP_QDISC_ETF:
return igc_tsn_enable_launchtime(adapter, type_data);
// case TC_SETUP_QDISC_ETF:
// return igc_tsn_enable_launchtime(adapter, type_data);
default:
return -EOPNOTSUPP;
}
}
// default:
// return -EOPNOTSUPP;
// }
// }
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
@ -4856,7 +4867,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
.ndo_do_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
// .ndo_setup_tc = igc_setup_tc,
};
/* PCIe configuration access */
@ -5047,7 +5058,7 @@ static int igc_probe(struct pci_dev *pdev,
hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igc_netdev_ops;
igc_ethtool_set_ops(netdev);
// igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
netdev->mem_start = pci_resource_start(pdev, 0);
@ -5071,23 +5082,11 @@ static int igc_probe(struct pci_dev *pdev,
/* Add supported features to the features list*/
netdev->features |= NETIF_F_SG;
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_TSO_ECN;
// netdev->features |= NETIF_F_TSO;
// netdev->features |= NETIF_F_TSO6;
// netdev->features |= NETIF_F_TSO_ECN;
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
netdev->features |= NETIF_F_HW_TC;
#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
/* setup the private structure */
err = igc_sw_init(adapter);
@ -5102,8 +5101,8 @@ static int igc_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
// netdev->min_mtu = ETH_MIN_MTU;
// netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
/* before reading the NVM, reset the controller to put the device in a
* known good starting state
@ -5118,11 +5117,11 @@ static int igc_probe(struct pci_dev *pdev,
}
}
if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
// if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
/* copy the MAC address out of the NVM */
if (hw->mac.ops.read_mac_addr(hw))
dev_err(&pdev->dev, "NVM Read Error\n");
}
// }
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
@ -5182,10 +5181,11 @@ static int igc_probe(struct pci_dev *pdev,
adapter->ei = *ei;
/* print pcie link status and MAC address */
pcie_print_link_status(pdev);
// TODO backport pcie_print_link_status from drivers/pci/pci.c
// pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
// dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/* Disable EEE for internal PHY devices */
hw->dev_spec._base.eee_enable = false;
adapter->flags &= ~IGC_FLAG_EEE;
@ -5206,7 +5206,8 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_mem_regions(pdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);

View File

@ -16,6 +16,31 @@
#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
#define netdev_level_once(level, dev, fmt, ...) \
do { \
static bool __print_once __read_mostly; \
\
if (!__print_once) { \
__print_once = true; \
netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
} \
} while (0)
#define netdev_emerg_once(dev, fmt, ...) \
netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
#define netdev_alert_once(dev, fmt, ...) \
netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
#define netdev_crit_once(dev, fmt, ...) \
netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
#define netdev_err_once(dev, fmt, ...) \
netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
#define netdev_warn_once(dev, fmt, ...) \
netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
#define netdev_notice_once(dev, fmt, ...) \
netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
#define netdev_info_once(dev, fmt, ...) \
netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
/* SYSTIM read access for I225 */
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts)
{
@ -84,8 +109,7 @@ static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta)
}
static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
struct timespec64 *ts)
{
struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
ptp_caps);
@ -94,10 +118,8 @@ static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
spin_lock_irqsave(&igc->tmreg_lock, flags);
ptp_read_system_prets(sts);
ts->tv_nsec = rd32(IGC_SYSTIML);
ts->tv_sec = rd32(IGC_SYSTIMH);
ptp_read_system_postts(sts);
spin_unlock_irqrestore(&igc->tmreg_lock, flags);
@ -308,7 +330,7 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
// case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
igc_ptp_enable_rx_timestamp(adapter);
config->rx_filter = HWTSTAMP_FILTER_ALL;
@ -491,9 +513,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
// adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225;
adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
adapter->ptp_caps.gettime64 = igc_ptp_gettimex64_i225;
adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
break;

View File

@ -30,7 +30,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
return 0;
adapter->cycle_time = 0;
adapter->cycle_time.tv64 = 0;
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
@ -71,7 +71,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
return 0;
cycle = adapter->cycle_time;
cycle = adapter->cycle_time.tv64;
base_time = adapter->base_time;
wr32(IGC_TSAUXC, 0);
@ -92,7 +92,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_STQT(i), ring->start_time);
wr32(IGC_ENDQT(i), ring->end_time);
if (adapter->base_time) {
if (adapter->base_time.tv64) {
/* If we have a base_time we are in "taprio"
* mode and we need to be strict about the
* cycles: only transmit a packet if it can be
@ -116,11 +116,12 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (ktime_compare(systim, base_time) > 0) {
s64 n;
n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
ktime_t sub_time = ktime_sub(systim, base_time);
n = div64_s64(sub_time.tv64, cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
}
baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
baset_h = div_s64_rem(base_time.tv64, NSEC_PER_SEC, &baset_l);
wr32(IGC_BASET_H, baset_h);
wr32(IGC_BASET_L, baset_l);
@ -132,7 +133,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
bool is_any_enabled = adapter->base_time.tv64 || is_any_launchtime(adapter);
if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
return 0;