initial update

Signed-off-by: Jim Ma <majinjing3@gmail.com>
This commit is contained in:
Jim Ma 2022-10-25 01:08:22 +08:00
commit d799b8ebf3
27 changed files with 13951 additions and 0 deletions

11
Makefile Normal file
View File

@ -0,0 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Intel Corporation
#
# Intel(R) I225-LM/I225-V 2.5G Ethernet Controller
#
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o

0
Module.symvers Normal file
View File

18
README.md Normal file
View File

@ -0,0 +1,18 @@
# Intel igc driver for Synology
> Backport from Linux Kernel v5.12, commit: 9f4ad9e425a1d3b6a34617b8ea226d56a119a717
## Build
### 1. Setup develop environment
```
sudo chroot /synology-toolkit/build_env/ds.geminilake-7.1
```
### 3. Build module
```shell
cd /usr/src/synology-igc/
make -C /usr/local/x86_64-pc-linux-gnu/x86_64-pc-linux-gnu/sys-root/usr/lib/modules/DSM-7.1/build M=$PWD modules
```

568
igc.h Normal file
View File

@ -0,0 +1,568 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_H_
#define _IGC_H_
#include <linux/kobject.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/ethtool.h>
#include <linux/sctp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include "igc_hw.h"
void igc_ethtool_set_ops(struct net_device *);
/* Transmit and receive queues */
#define IGC_MAX_RX_QUEUES 4
#define IGC_MAX_TX_QUEUES 4
#define MAX_Q_VECTORS 8
#define MAX_STD_JUMBO_FRAME_SIZE 9216
#define MAX_ETYPE_FILTER 8
#define IGC_RETA_SIZE 128
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
};
struct igc_tx_queue_stats {
u64 packets;
u64 bytes;
u64 restart_queue;
u64 restart_queue2;
};
struct igc_rx_queue_stats {
u64 packets;
u64 bytes;
u64 drops;
u64 csum_err;
u64 alloc_failed;
};
struct igc_rx_packet_stats {
u64 ipv4_packets; /* IPv4 headers processed */
u64 ipv4e_packets; /* IPv4E headers with extensions processed */
u64 ipv6_packets; /* IPv6 headers processed */
u64 ipv6e_packets; /* IPv6E headers with extensions processed */
u64 tcp_packets; /* TCP headers processed */
u64 udp_packets; /* UDP headers processed */
u64 sctp_packets; /* SCTP headers processed */
u64 nfs_packets; /* NFS headers processe */
u64 other_packets;
};
struct igc_ring_container {
struct igc_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct igc_ring {
struct igc_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
struct device *dev; /* device for dma mapping */
union { /* array of buffer info structs */
struct igc_tx_buffer *tx_buffer_info;
struct igc_rx_buffer *rx_buffer_info;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
bool launchtime_enable; /* true if LaunchTime is enabled */
u32 start_time;
u32 end_time;
/* everything past this point are written often */
u16 next_to_clean;
u16 next_to_use;
u16 next_to_alloc;
union {
/* TX */
struct {
struct igc_tx_queue_stats tx_stats;
struct u64_stats_sync tx_syncp;
struct u64_stats_sync tx_syncp2;
};
/* RX */
struct {
struct igc_rx_queue_stats rx_stats;
struct igc_rx_packet_stats pkt_stats;
struct u64_stats_sync rx_syncp;
struct sk_buff *skb;
};
};
} ____cacheline_internodealigned_in_smp;
/* Board specific private data structure */
struct igc_adapter {
struct net_device *netdev;
struct ethtool_eee eee;
u16 eee_advert;
unsigned long state;
unsigned int flags;
unsigned int num_q_vectors;
struct msix_entry *msix_entries;
/* TX */
u16 tx_work_limit;
u32 tx_timeout_count;
int num_tx_queues;
struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
/* RX */
int num_rx_queues;
struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
struct timer_list watchdog_timer;
struct timer_list dma_err_timer;
struct timer_list phy_info_timer;
u32 wol;
u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
u8 port_num;
u8 __iomem *io_addr;
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
u32 tx_itr_setting;
struct work_struct reset_task;
struct work_struct watchdog_task;
struct work_struct dma_err_task;
bool fc_autoneg;
u8 tx_timeout_factor;
int msg_enable;
u32 max_frame_size;
u32 min_frame_size;
ktime_t base_time;
ktime_t cycle_time;
/* OS defined structs */
struct pci_dev *pdev;
/* lock for statistics */
spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in igc_hw.h */
struct igc_hw hw;
struct igc_hw_stats stats;
struct igc_q_vector *q_vector[MAX_Q_VECTORS];
u32 eims_enable_mask;
u32 eims_other;
u16 tx_ring_count;
u16 rx_ring_count;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 rss_queues;
u32 rss_indir_tbl_init;
/* Any access to elements in nfc_rule_list is protected by the
* nfc_rule_lock.
*/
struct mutex nfc_rule_lock;
struct list_head nfc_rule_list;
unsigned int nfc_rule_count;
u8 rss_indir_tbl[IGC_RETA_SIZE];
unsigned long link_check_timeout;
struct igc_info ei;
u32 test_icr;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
char fw_version[32];
};
void igc_up(struct igc_adapter *adapter);
void igc_down(struct igc_adapter *adapter);
int igc_open(struct net_device *netdev);
int igc_close(struct net_device *netdev);
int igc_setup_tx_resources(struct igc_ring *ring);
int igc_setup_rx_resources(struct igc_ring *ring);
void igc_free_tx_resources(struct igc_ring *ring);
void igc_free_rx_resources(struct igc_ring *ring);
unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
const u32 max_rss_queues);
int igc_reinit_queues(struct igc_adapter *adapter);
void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
void igc_update_stats(struct igc_adapter *adapter);
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
void igc_regs_dump(struct igc_adapter *adapter);
extern char igc_driver_name[];
#define IGC_REGS_LEN 740
/* flags controlling PTP/1588 function */
#define IGC_PTP_ENABLED BIT(0)
/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
#define IGC_FLAG_PTP BIT(8)
#define IGC_FLAG_WOL_SUPPORTED BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_EEE BIT(14)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
#define IGC_20K_ITR 196
#define IGC_70K_ITR 56
#define IGC_DEFAULT_ITR 3 /* dynamic */
#define IGC_MAX_ITR_USECS 10000
#define IGC_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_MSIX_ENTRIES 10
/* TX/RX descriptor defines */
#define IGC_DEFAULT_TXD 256
#define IGC_DEFAULT_TX_WORK 128
#define IGC_MIN_TXD 80
#define IGC_MAX_TXD 4096
#define IGC_DEFAULT_RXD 256
#define IGC_MIN_RXD 80
#define IGC_MAX_RXD 4096
/* Supported Rx Buffer Sizes */
#define IGC_RXBUFFER_256 256
#define IGC_RXBUFFER_2048 2048
#define IGC_RXBUFFER_3072 3072
#define AUTO_ALL_MODES 0
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* Transmit and receive latency (for PTP timestamps) */
#define IGC_I225_TX_LATENCY_10 240
#define IGC_I225_TX_LATENCY_100 58
#define IGC_I225_TX_LATENCY_1000 80
#define IGC_I225_TX_LATENCY_2500 1325
#define IGC_I225_RX_LATENCY_10 6450
#define IGC_I225_RX_LATENCY_100 185
#define IGC_I225_RX_LATENCY_1000 300
#define IGC_I225_RX_LATENCY_2500 1485
/* RX and TX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
* descriptors available in its onboard memory.
* Setting this to 0 disables RX descriptor prefetch.
* HTHRESH - MAC will only prefetch if there are at least this many descriptors
* available in host memory.
* If PTHRESH is 0, this should also be 0.
* WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
#define IGC_RX_PTHRESH 8
#define IGC_RX_HTHRESH 8
#define IGC_TX_PTHRESH 8
#define IGC_TX_HTHRESH 1
#define IGC_RX_WTHRESH 4
#define IGC_TX_WTHRESH 16
#define IGC_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
#define IGC_TS_HDR_LEN 16
#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#if (PAGE_SIZE < 8192)
#define IGC_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
#else
#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */
/* VLAN info */
#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000
/* igc_test_staterr - tests bits within Rx descriptor status and error fields */
static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
const u32 stat_err_bits)
{
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}
enum igc_state_t {
__IGC_TESTING,
__IGC_RESETTING,
__IGC_DOWN,
__IGC_PTP_TX_IN_PROGRESS,
};
enum igc_tx_flags {
/* cmd_type flags */
IGC_TX_FLAGS_VLAN = 0x01,
IGC_TX_FLAGS_TSO = 0x02,
IGC_TX_FLAGS_TSTAMP = 0x04,
/* olinfo flags */
IGC_TX_FLAGS_IPV4 = 0x10,
IGC_TX_FLAGS_CSUM = 0x20,
};
enum igc_boards {
board_base,
};
/* The largest size we can write to the descriptor is 65535. In order to
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGC_MAX_TXD_PWR 15
#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igc_tx_buffer {
union igc_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
};
struct igc_rx_buffer {
dma_addr_t dma;
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
};
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
u32 eims_value; /* EIMS mask value */
u16 itr_val;
u8 set_itr;
struct igc_ring_container rx, tx;
struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
};
enum igc_filter_match_flags {
IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
IGC_FILTER_FLAG_VLAN_TCI = 0x2,
IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
};
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
u16 vlan_tci;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
};
struct igc_nfc_rule {
struct list_head list;
struct igc_nfc_filter filter;
u32 location;
u16 action;
};
/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority
* based, and 8 ethertype based.
*/
#define IGC_MAX_RXNFC_RULES 32
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
{
u16 ntc = ring->next_to_clean;
u16 ntu = ring->next_to_use;
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
static inline s32 igc_get_phy_info(struct igc_hw *hw)
{
if (hw->phy.ops.get_phy_info)
return hw->phy.ops.get_phy_info(hw);
return 0;
}
static inline s32 igc_reset_phy(struct igc_hw *hw)
{
if (hw->phy.ops.reset)
return hw->phy.ops.reset(hw);
return 0;
}
static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
{
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
enum igc_ring_flags_t {
IGC_RING_FLAG_RX_3K_BUFFER,
IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGC_RING_FLAG_RX_SCTP_CSUM,
IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
IGC_RING_FLAG_TX_CTX_IDX,
IGC_RING_FLAG_TX_DETECT_HANG
};
#define ring_uses_large_buffer(ring) \
test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define ring_uses_build_skb(ring) \
test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return IGC_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
#endif
return IGC_RXBUFFER_2048;
}
static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return 1;
#endif
return 0;
}
static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
{
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
return 0;
}
void igc_reinit_locked(struct igc_adapter *);
struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
u32 location);
int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
struct sk_buff *skb);
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
#define IGC_RX_DESC(R, i) \
(&(((union igc_adv_rx_desc *)((R)->desc))[i]))
#define IGC_TX_DESC(R, i) \
(&(((union igc_adv_tx_desc *)((R)->desc))[i]))
#define IGC_TX_CTXTDESC(R, i) \
(&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))
#endif /* _IGC_H_ */

433
igc_base.c Normal file
View File

@ -0,0 +1,433 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include <linux/delay.h>
#include "igc_hw.h"
#include "igc_i225.h"
#include "igc_mac.h"
#include "igc_base.h"
#include "igc.h"
/**
* igc_reset_hw_base - Reset hardware
* @hw: pointer to the HW structure
*
* This resets the hardware into a known state. This is a
* function pointer entry point called by the api module.
*/
static s32 igc_reset_hw_base(struct igc_hw *hw)
{
s32 ret_val;
u32 ctrl;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = igc_disable_pcie_master(hw);
if (ret_val)
hw_dbg("PCI-E Master disable polling has failed\n");
hw_dbg("Masking off all interrupts\n");
wr32(IGC_IMC, 0xffffffff);
wr32(IGC_RCTL, 0);
wr32(IGC_TCTL, IGC_TCTL_PSP);
wrfl();
usleep_range(10000, 20000);
ctrl = rd32(IGC_CTRL);
hw_dbg("Issuing a global reset to MAC\n");
wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
ret_val = igc_get_auto_rd_done(hw);
if (ret_val) {
/* When auto config read does not complete, do not
* return with an error. This can happen in situations
* where there is no eeprom and prevents getting link.
*/
hw_dbg("Auto Read Done did not complete\n");
}
/* Clear any pending interrupt events. */
wr32(IGC_IMC, 0xffffffff);
rd32(IGC_ICR);
return ret_val;
}
/**
* igc_init_nvm_params_base - Init NVM func ptrs.
* @hw: pointer to the HW structure
*/
static s32 igc_init_nvm_params_base(struct igc_hw *hw)
{
struct igc_nvm_info *nvm = &hw->nvm;
u32 eecd = rd32(IGC_EECD);
u16 size;
size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
IGC_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
/* Just in case size is out of range, cap it to the largest
* EEPROM size supported
*/
if (size > 15)
size = 15;
nvm->type = igc_nvm_eeprom_spi;
nvm->word_size = BIT(size);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
16 : 8;
if (nvm->word_size == BIT(15))
nvm->page_size = 128;
return 0;
}
/**
* igc_setup_copper_link_base - Configure copper link settings
* @hw: pointer to the HW structure
*
* Configures the link for auto-neg or forced speed and duplex. Then we check
* for link, once link is established calls to configure collision distance
* and flow control are called.
*/
static s32 igc_setup_copper_link_base(struct igc_hw *hw)
{
s32 ret_val = 0;
u32 ctrl;
ctrl = rd32(IGC_CTRL);
ctrl |= IGC_CTRL_SLU;
ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
wr32(IGC_CTRL, ctrl);
ret_val = igc_setup_copper_link(hw);
return ret_val;
}
/**
* igc_init_mac_params_base - Init MAC func ptrs.
* @hw: pointer to the HW structure
*/
static s32 igc_init_mac_params_base(struct igc_hw *hw)
{
struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base;
struct igc_mac_info *mac = &hw->mac;
/* Set mta register count */
mac->mta_reg_count = 128;
mac->rar_entry_count = IGC_RAR_ENTRIES;
/* reset */
mac->ops.reset_hw = igc_reset_hw_base;
mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
/* Allow a single clear of the SW semaphore on I225 */
if (mac->type == igc_i225)
dev_spec->clear_semaphore_once = true;
/* physical interface link setup */
mac->ops.setup_physical_interface = igc_setup_copper_link_base;
return 0;
}
/**
* igc_init_phy_params_base - Init PHY func ptrs.
* @hw: pointer to the HW structure
*/
static s32 igc_init_phy_params_base(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
if (hw->phy.media_type != igc_media_type_copper) {
phy->type = igc_phy_none;
goto out;
}
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
phy->reset_delay_us = 100;
/* set lan id */
hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
IGC_STATUS_FUNC_SHIFT;
/* Make sure the PHY is in a good state. Several people have reported
* firmware leaving the PHY's page select register set to something
* other than the default of zero, which causes the PHY ID read to
* access something other than the intended register.
*/
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
hw_dbg("Error resetting the PHY\n");
goto out;
}
ret_val = igc_get_phy_id(hw);
if (ret_val)
return ret_val;
igc_check_for_copper_link(hw);
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
case I225_I_PHY_ID:
phy->type = igc_phy_i225;
break;
default:
ret_val = -IGC_ERR_PHY;
goto out;
}
out:
return ret_val;
}
static s32 igc_get_invariants_base(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
s32 ret_val = 0;
switch (hw->device_id) {
case IGC_DEV_ID_I225_LM:
case IGC_DEV_ID_I225_V:
case IGC_DEV_ID_I225_I:
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K:
case IGC_DEV_ID_I225_K2:
case IGC_DEV_ID_I226_K:
case IGC_DEV_ID_I225_LMVP:
case IGC_DEV_ID_I225_IT:
case IGC_DEV_ID_I226_LM:
case IGC_DEV_ID_I226_V:
case IGC_DEV_ID_I226_IT:
case IGC_DEV_ID_I221_V:
case IGC_DEV_ID_I226_BLANK_NVM:
case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
default:
return -IGC_ERR_MAC_INIT;
}
hw->phy.media_type = igc_media_type_copper;
/* mac initialization and operations */
ret_val = igc_init_mac_params_base(hw);
if (ret_val)
goto out;
/* NVM initialization */
ret_val = igc_init_nvm_params_base(hw);
switch (hw->mac.type) {
case igc_i225:
ret_val = igc_init_nvm_params_i225(hw);
break;
default:
break;
}
/* setup PHY parameters */
ret_val = igc_init_phy_params_base(hw);
if (ret_val)
goto out;
out:
return ret_val;
}
/**
* igc_acquire_phy_base - Acquire rights to access PHY
* @hw: pointer to the HW structure
*
* Acquire access rights to the correct PHY. This is a
* function pointer entry point called by the api module.
*/
static s32 igc_acquire_phy_base(struct igc_hw *hw)
{
u16 mask = IGC_SWFW_PHY0_SM;
return hw->mac.ops.acquire_swfw_sync(hw, mask);
}
/**
* igc_release_phy_base - Release rights to access PHY
* @hw: pointer to the HW structure
*
* A wrapper to release access rights to the correct PHY. This is a
* function pointer entry point called by the api module.
*/
static void igc_release_phy_base(struct igc_hw *hw)
{
u16 mask = IGC_SWFW_PHY0_SM;
hw->mac.ops.release_swfw_sync(hw, mask);
}
/**
* igc_init_hw_base - Initialize hardware
* @hw: pointer to the HW structure
*
* This inits the hardware readying it for operation.
*/
static s32 igc_init_hw_base(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
u16 i, rar_count = mac->rar_entry_count;
s32 ret_val = 0;
/* Setup the receive address */
igc_init_rx_addrs(hw, rar_count);
/* Zero out the Multicast HASH table */
hw_dbg("Zeroing the MTA\n");
for (i = 0; i < mac->mta_reg_count; i++)
array_wr32(IGC_MTA, i, 0);
/* Zero out the Unicast HASH table */
hw_dbg("Zeroing the UTA\n");
for (i = 0; i < mac->uta_reg_count; i++)
array_wr32(IGC_UTA, i, 0);
/* Setup link and flow control */
ret_val = igc_setup_link(hw);
/* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
*/
igc_clear_hw_cntrs_base(hw);
return ret_val;
}
/**
* igc_power_down_phy_copper_base - Remove link during PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
* driver unload, or wake on lan is not enabled, remove the link.
*/
void igc_power_down_phy_copper_base(struct igc_hw *hw)
{
/* If the management interface is not enabled, then power down */
if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw)))
igc_power_down_phy_copper(hw);
}
/**
* igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
* @hw: pointer to the HW structure
*
* After Rx enable, if manageability is enabled then there is likely some
* bad data at the start of the fifo and possibly in the DMA fifo. This
* function clears the fifos and flushes any packets that came in as rx was
* being enabled.
*/
void igc_rx_fifo_flush_base(struct igc_hw *hw)
{
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
int i, ms_wait;
/* disable IPv6 options as per hardware errata */
rfctl = rd32(IGC_RFCTL);
rfctl |= IGC_RFCTL_IPV6_EX_DIS;
wr32(IGC_RFCTL, rfctl);
if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
return;
/* Disable all Rx queues */
for (i = 0; i < 4; i++) {
rxdctl[i] = rd32(IGC_RXDCTL(i));
wr32(IGC_RXDCTL(i),
rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(IGC_RXDCTL(i));
if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
break;
}
if (ms_wait == 10)
hw_dbg("Queue disable timed out after 10ms\n");
/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
* incoming packets are rejected. Set enable and wait 2ms so that
* any packet that was coming in as RCTL.EN was set is flushed
*/
wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
rlpml = rd32(IGC_RLPML);
wr32(IGC_RLPML, 0);
rctl = rd32(IGC_RCTL);
temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
temp_rctl |= IGC_RCTL_LPE;
wr32(IGC_RCTL, temp_rctl);
wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
wrfl();
usleep_range(2000, 3000);
/* Enable Rx queues that were previously enabled and restore our
* previous state
*/
for (i = 0; i < 4; i++)
wr32(IGC_RXDCTL(i), rxdctl[i]);
wr32(IGC_RCTL, rctl);
wrfl();
wr32(IGC_RLPML, rlpml);
wr32(IGC_RFCTL, rfctl);
/* Flush receive errors generated by workaround */
rd32(IGC_ROC);
rd32(IGC_RNBC);
rd32(IGC_MPC);
}
static struct igc_mac_operations igc_mac_ops_base = {
.init_hw = igc_init_hw_base,
.check_for_link = igc_check_for_copper_link,
.rar_set = igc_rar_set,
.read_mac_addr = igc_read_mac_addr,
.get_speed_and_duplex = igc_get_speed_and_duplex_copper,
};
static const struct igc_phy_operations igc_phy_ops_base = {
.acquire = igc_acquire_phy_base,
.release = igc_release_phy_base,
.reset = igc_phy_hw_reset,
.read_reg = igc_read_phy_reg_gpy,
.write_reg = igc_write_phy_reg_gpy,
};
const struct igc_info igc_base_info = {
.get_invariants = igc_get_invariants_base,
.mac_ops = &igc_mac_ops_base,
.phy_ops = &igc_phy_ops_base,
};

90
igc_base.h Normal file
View File

@ -0,0 +1,90 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_BASE_H_
#define _IGC_BASE_H_
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
void igc_power_down_phy_copper_base(struct igc_hw *hw);
/* Transmit Descriptor - Advanced */
union igc_adv_tx_desc {
struct {
__le64 buffer_addr; /* Address of descriptor's data buf */
__le32 cmd_type_len;
__le32 olinfo_status;
} read;
struct {
__le64 rsvd; /* Reserved */
__le32 nxtseq_seed;
__le32 status;
} wb;
};
/* Context descriptors */
struct igc_adv_tx_context_desc {
__le32 vlan_macip_lens;
__le32 launch_time;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
/* Adv Transmit Descriptor Config Masks */
#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IGC_RAR_ENTRIES 16
/* Receive Descriptor - Advanced */
union igc_adv_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
union {
__le32 data;
struct {
__le16 pkt_info; /*RSS type, Pkt type*/
/* Split Header, header buffer len */
__le16 hdr_info;
} hs_rss;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length; /* Packet length */
__le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
/* Additional Transmit Descriptor Control definitions */
#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
/* Additional Receive Descriptor Control definitions */
#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#endif /* _IGC_BASE_H */

535
igc_defines.h Normal file
View File

@ -0,0 +1,535 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_DEFINES_H_
#define _IGC_DEFINES_H_
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */
/* Wake Up Filter Control */
#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
/* Wake Up Status */
#define IGC_WUS_EX 0x00000004 /* Directed Exact */
#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */
#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */
#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */
#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */
/* Packet types that are enabled for wake packet delivery */
#define WAKE_PKT_WUS ( \
IGC_WUS_EX | \
IGC_WUS_ARPD | \
IGC_WUS_IPV4 | \
IGC_WUS_IPV6 | \
IGC_WUS_NSD)
/* Wake Up Packet Length */
#define IGC_WUPL_MASK 0x00000FFF
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
/*Blocks new Master requests */
#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004
/* Status of Master requests. */
#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000
/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define IGC_RAH_RAH_MASK 0x0000FFFF
#define IGC_RAH_ASEL_MASK 0x00030000
#define IGC_RAH_ASEL_SRC_ADDR BIT(16)
#define IGC_RAH_QSEL_MASK 0x000C0000
#define IGC_RAH_QSEL_SHIFT 18
#define IGC_RAH_QSEL_ENABLE BIT(28)
#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
#define IGC_RAL_MAC_ADDR_LEN 4
#define IGC_RAH_MAC_ADDR_LEN 2
/* Error Codes */
#define IGC_SUCCESS 0
#define IGC_ERR_NVM 1
#define IGC_ERR_PHY 2
#define IGC_ERR_CONFIG 3
#define IGC_ERR_PARAM 4
#define IGC_ERR_MAC_INIT 5
#define IGC_ERR_RESET 9
#define IGC_ERR_MASTER_REQUESTS_PENDING 10
#define IGC_ERR_BLK_PHY_RESET 12
#define IGC_ERR_SWFW_SYNC 13
/* Device Control */
#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */
#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
/* PBA constants */
#define IGC_PBA_34K 0x0022
/* SW Semaphore Register */
#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
/* SWFW_SYNC Definitions */
#define IGC_SWFW_EEP_SM 0x1
#define IGC_SWFW_PHY0_SM 0x2
/* Autoneg Advertisement Register */
#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
/* Link Partner Ability Register (Base Page) */
#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
/* 1000BASE-T Control Register */
#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
/* 1000BASE-T Status Register */
#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
/* PHY GPY 211 registers */
#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
/* NVM Control */
/* Number of milliseconds for NVM auto read done after MAC reset. */
#define AUTO_READ_DONE_TIMEOUT 10
#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */
#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */
/* NVM Addressing bits based on type 0=small, 1=large */
#define IGC_EECD_ADDR_BITS 0x00000400
#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
#define IGC_EECD_SIZE_EX_SHIFT 11
#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */
#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/
#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */
#define IGC_FLUDONE_ATTEMPTS 20000
#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
/* Offset to data in NVM read/write registers */
#define IGC_NVM_RW_REG_DATA 16
#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define IGC_NVM_RW_REG_START 1 /* Start operation */
#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */
#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */
/* NVM Word Offsets */
#define NVM_CHECKSUM_REG 0x003F
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* Collision related configuration parameters */
#define IGC_COLLISION_THRESHOLD 15
#define IGC_CT_SHIFT 4
#define IGC_COLLISION_DISTANCE 63
#define IGC_COLD_SHIFT 12
/* Device Status */
#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define IGC_STATUS_FUNC_SHIFT 2
#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */
#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_2500 2500
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
#define ADVERTISE_1000_FULL 0x0020
#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */
#define ADVERTISE_2500_FULL 0x0080
#define IGC_ALL_SPEED_DUPLEX_2500 ( \
ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500
/* Interrupt Cause Read */
#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */
#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */
#define IGC_ICR_LSC BIT(2) /* Link Status Change */
#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */
#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */
#define IGC_ICR_RXO BIT(6) /* Rx overrun */
#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */
#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
#define IGC_ICR_INT_ASSERTED BIT(31)
#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
#define IMS_ENABLE_MASK ( \
IGC_IMS_RXT0 | \
IGC_IMS_TXDW | \
IGC_IMS_RXDMT0 | \
IGC_IMS_RXSEQ | \
IGC_IMS_LSC)
/* Interrupt Mask Set */
#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */
#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */
#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */
#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */
#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */
#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */
#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */
#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */
/* Interrupt Cause Set */
#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */
#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */
#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
#define IGC_IVAR_VALID 0x80
#define IGC_GPIE_NSICR 0x00000001
#define IGC_GPIE_MSIX_MODE 0x00000010
#define IGC_GPIE_EIAME 0x40000000
#define IGC_GPIE_PBA 0x80000000
/* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */
/* Transmit Descriptor bit definitions */
#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */
#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */
#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* IPSec Encrypt Enable */
#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/* Transmit Control */
#define IGC_TCTL_EN 0x00000002 /* enable Tx */
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
/* Enable XON frame transmission */
#define IGC_FCRTL_XONE 0x80000000
/* Management Control */
#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
/* Receive Control */
#define IGC_RCTL_RST 0x00000001 /* Software reset */
#define IGC_RCTL_EN 0x00000002 /* enable */
#define IGC_RCTL_SBP 0x00000004 /* store bad packet */
#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */
#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */
#define IGC_RCTL_LPE 0x00000020 /* long packet enable */
#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
/* Split Replication Receive Control */
#define IGC_SRRCTL_TIMESTAMP 0x40000000
#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14)
#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
/* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
#define IGC_RFCTL_LEF 0x00040000
#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */
#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
/* Time Sync Interrupt Causes */
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */
#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */
#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */
#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */
#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS
#define IGC_FTQF_VF_BP 0x00008000
#define IGC_FTQF_1588_TIME_STAMP 0x08000000
#define IGC_FTQF_MASK 0xF0000000
#define IGC_FTQF_MASK_PROTO_BP 0x10000000
/* Time Sync Receive Control bit definitions */
#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00
#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02
#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
#define IGC_TSYNCRXCTL_TYPE_ALL 0x08
#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */
/* Time Sync Receive Configuration */
#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
/* Immediate Interrupt Receive */
#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
/* Immediate Interrupt Receive Extended */
#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
/* Time Sync Transmit Control bit definitions */
#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */
#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
#define IGC_TXQCTL_STRICT_END 0x00000004
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* GPY211 - I225 defines */
#define GPY_MMD_MASK 0xFFFF0000
#define GPY_MMD_SHIFT 16
#define GPY_REG_MASK 0x0000FFFF
#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
/* MAC definitions */
#define IGC_FACTPS_MNGCG 0x20000000
#define IGC_FWSM_MODE_MASK 0xE
#define IGC_FWSM_MODE_SHIFT 1
/* Management Control */
#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
/* PHY */
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define IGC_GEN_POLL_TIMEOUT 1920
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
#define PHY_CONTROL 0x00 /* Control Register */
#define PHY_STATUS 0x01 /* Status Register */
#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
/* Bit definitions for valid PHY IDs. I = Integrated E = External */
#define I225_I_PHY_ID 0x67C9DC00
/* MDI Control */
#define IGC_MDIC_DATA_MASK 0x0000FFFF
#define IGC_MDIC_REG_MASK 0x001F0000
#define IGC_MDIC_REG_SHIFT 16
#define IGC_MDIC_PHY_MASK 0x03E00000
#define IGC_MDIC_PHY_SHIFT 21
#define IGC_MDIC_OP_WRITE 0x04000000
#define IGC_MDIC_OP_READ 0x08000000
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
#define IGC_MAX_NETWORK_HDR_LEN 511
#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4))
#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4))
#define IGC_VLANPQF_QUEUE_MASK 0x03
#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */
#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */
#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
/* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128
/* EEE defines */
#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */
#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
/* LTR defines */
#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */
#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */
#define IGC_TW_SYSTEM_1000_MASK 0x000000FF
/* Minimum time for 100BASE-T where no data will be transmit following move out
* of EEE LPI Tx state
*/
#define IGC_TW_SYSTEM_100_MASK 0x0000FF00
#define IGC_TW_SYSTEM_100_SHIFT 8
#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
#define IGC_DMACR_DMACTHR_MASK 0x00FF0000
#define IGC_DMACR_DMACTHR_SHIFT 16
/* Reg val to set scale to 1024 nsec */
#define IGC_LTRMINV_SCALE_1024 2
/* Reg val to set scale to 32768 nsec */
#define IGC_LTRMINV_SCALE_32768 3
/* Reg val to set scale to 1024 nsec */
#define IGC_LTRMAXV_SCALE_1024 2
/* Reg val to set scale to 32768 nsec */
#define IGC_LTRMAXV_SCALE_32768 3
#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */
#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */
#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */
#define IGC_LTRMINV_SCALE_SHIFT 10
#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */
#define IGC_LTRMAXV_SCALE_SHIFT 10
#endif /* _IGC_DEFINES_H_ */

186
igc_diag.c Normal file
View File

@ -0,0 +1,186 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Intel Corporation */
#include "igc.h"
#include "igc_diag.h"
static struct igc_reg_test reg_test[] = {
{ IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
{ IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
{ IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
{ IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
{ IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
{ IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
{ IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB },
{ IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF },
{ IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ IGC_RA, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
{ IGC_RA, 16, TABLE64_TEST_HI,
0x900FFFFF, 0xFFFFFFFF },
{ IGC_MTA, 128, TABLE32_TEST,
0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0}
};
static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
struct igc_hw *hw = &adapter->hw;
u32 pat, val, before;
static const u32 test_pattern[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
};
for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
before = rd32(reg);
wr32(reg, test_pattern[pat] & write);
val = rd32(reg);
if (val != (test_pattern[pat] & write & mask)) {
netdev_err(adapter->netdev,
"pattern test reg %04X failed: got 0x%08X expected 0x%08X",
reg, val, test_pattern[pat] & write & mask);
*data = reg;
wr32(reg, before);
return false;
}
wr32(reg, before);
}
return true;
}
static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
struct igc_hw *hw = &adapter->hw;
u32 val, before;
before = rd32(reg);
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
netdev_err(adapter->netdev,
"set/check reg %04X test failed: got 0x%08X expected 0x%08X",
reg, (val & mask), (write & mask));
*data = reg;
wr32(reg, before);
return false;
}
wr32(reg, before);
return true;
}
bool igc_reg_test(struct igc_adapter *adapter, u64 *data)
{
struct igc_reg_test *test = reg_test;
struct igc_hw *hw = &adapter->hw;
u32 value, before, after;
u32 i, toggle, b = false;
/* Because the status register is such a special case,
* we handle it separately from the rest of the register
* tests. Some bits are read-only, some toggle, and some
* are writeable.
*/
toggle = 0x6800D3;
before = rd32(IGC_STATUS);
value = before & toggle;
wr32(IGC_STATUS, toggle);
after = rd32(IGC_STATUS) & toggle;
if (value != after) {
netdev_err(adapter->netdev,
"failed STATUS register test got: 0x%08X expected: 0x%08X",
after, value);
*data = 1;
return false;
}
/* restore previous status */
wr32(IGC_STATUS, before);
/* Perform the remainder of the register test, looping through
* the test table until we either fail or reach the null entry.
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
switch (test->test_type) {
case PATTERN_TEST:
b = reg_pattern_test(adapter, data,
test->reg + (i * 0x40),
test->mask,
test->write);
break;
case SET_READ_TEST:
b = reg_set_and_check(adapter, data,
test->reg + (i * 0x40),
test->mask,
test->write);
break;
case TABLE64_TEST_LO:
b = reg_pattern_test(adapter, data,
test->reg + (i * 8),
test->mask,
test->write);
break;
case TABLE64_TEST_HI:
b = reg_pattern_test(adapter, data,
test->reg + 4 + (i * 8),
test->mask,
test->write);
break;
case TABLE32_TEST:
b = reg_pattern_test(adapter, data,
test->reg + (i * 4),
test->mask,
test->write);
break;
}
if (!b)
return false;
}
test++;
}
*data = 0;
return true;
}
bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data)
{
struct igc_hw *hw = &adapter->hw;
*data = 0;
if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) {
*data = 1;
return false;
}
return true;
}
bool igc_link_test(struct igc_adapter *adapter, u64 *data)
{
bool link_up;
*data = 0;
/* add delay to give enough time for autonegotioation to finish */
if (adapter->hw.mac.autoneg)
ssleep(5);
link_up = igc_has_link(adapter);
if (!link_up) {
*data = 1;
return false;
}
return true;
}

30
igc_diag.h Normal file
View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Intel Corporation */
bool igc_reg_test(struct igc_adapter *adapter, u64 *data);
bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data);
bool igc_link_test(struct igc_adapter *adapter, u64 *data);
struct igc_reg_test {
u16 reg;
u8 array_len;
u8 test_type;
u32 mask;
u32 write;
};
/* In the hardware, registers are laid out either singly, in arrays
* spaced 0x40 bytes apart, or in contiguous tables. We assume
* most tests take place on arrays or single registers (handled
* as a single-element array) and special-case the tables.
* Table tests are always pattern tests.
*
* We also make provision for some required setup steps by specifying
* registers to be written without any read-back testing.
*/
#define PATTERN_TEST 1
#define SET_READ_TEST 2
#define TABLE32_TEST 3
#define TABLE64_TEST_LO 4
#define TABLE64_TEST_HI 5

318
igc_dump.c Normal file
View File

@ -0,0 +1,318 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include "igc.h"
struct igc_reg_info {
u32 ofs;
char *name;
};
static const struct igc_reg_info igc_reg_info_tbl[] = {
/* General Registers */
{IGC_CTRL, "CTRL"},
{IGC_STATUS, "STATUS"},
{IGC_CTRL_EXT, "CTRL_EXT"},
{IGC_MDIC, "MDIC"},
/* Interrupt Registers */
{IGC_ICR, "ICR"},
/* RX Registers */
{IGC_RCTL, "RCTL"},
{IGC_RDLEN(0), "RDLEN"},
{IGC_RDH(0), "RDH"},
{IGC_RDT(0), "RDT"},
{IGC_RXDCTL(0), "RXDCTL"},
{IGC_RDBAL(0), "RDBAL"},
{IGC_RDBAH(0), "RDBAH"},
/* TX Registers */
{IGC_TCTL, "TCTL"},
{IGC_TDBAL(0), "TDBAL"},
{IGC_TDBAH(0), "TDBAH"},
{IGC_TDLEN(0), "TDLEN"},
{IGC_TDH(0), "TDH"},
{IGC_TDT(0), "TDT"},
{IGC_TXDCTL(0), "TXDCTL"},
/* List Terminator */
{}
};
/* igc_regdump - register printout routine */
static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
{
struct net_device *dev = igc_get_hw_dev(hw);
int n = 0;
char rname[16];
u32 regs[8];
switch (reginfo->ofs) {
case IGC_RDLEN(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDLEN(n));
break;
case IGC_RDH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDH(n));
break;
case IGC_RDT(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDT(n));
break;
case IGC_RXDCTL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RXDCTL(n));
break;
case IGC_RDBAL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDBAL(n));
break;
case IGC_RDBAH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDBAH(n));
break;
case IGC_TDBAL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDBAL(n));
break;
case IGC_TDBAH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDBAH(n));
break;
case IGC_TDLEN(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDLEN(n));
break;
case IGC_TDH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDH(n));
break;
case IGC_TDT(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDT(n));
break;
case IGC_TXDCTL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TXDCTL(n));
break;
default:
netdev_info(dev, "%-15s %08x\n", reginfo->name,
rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
regs[2], regs[3]);
}
/* igc_rings_dump - Tx-rings and Rx-rings */
void igc_rings_dump(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct my_u0 { u64 a; u64 b; } *u0;
union igc_adv_tx_desc *tx_desc;
union igc_adv_rx_desc *rx_desc;
struct igc_ring *tx_ring;
struct igc_ring *rx_ring;
u32 staterr;
u16 i, n;
if (!netif_msg_hw(adapter))
return;
netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n",
netdev->state, dev_trans_start(netdev));
/* Print TX Ring Summary */
if (!netif_running(netdev))
goto exit;
netdev_info(netdev, "TX Rings Summary\n");
netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igc_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp);
}
/* Print TX Rings */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
netdev_info(netdev, "TX Rings Dump\n");
/* Transmit Descriptor Formats
*
* Advanced Transmit Descriptor
* +--------------------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +--------------------------------------------------------------+
* 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
* +--------------------------------------------------------------+
* 63 46 45 40 39 38 36 35 32 31 24 15 0
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
netdev_info(netdev, "------------------------------------\n");
netdev_info(netdev, "TX QUEUE INDEX = %d\n",
tx_ring->queue_index);
netdev_info(netdev, "------------------------------------\n");
netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
struct igc_tx_buffer *buffer_info;
tx_desc = IGC_TX_DESC(tx_ring, i);
buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
next_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
next_desc = " NTU";
else if (i == tx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data,
dma_unmap_len(buffer_info, len),
true);
}
}
/* Print RX Rings Summary */
rx_ring_summary:
netdev_info(netdev, "RX Rings Summary\n");
netdev_info(netdev, "Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use,
rx_ring->next_to_clean);
}
/* Print RX Rings */
if (!netif_msg_rx_status(adapter))
goto exit;
netdev_info(netdev, "RX Rings Dump\n");
/* Advanced Receive Descriptor (Read) Format
* 63 1 0
* +-----------------------------------------------------+
* 0 | Packet Buffer Address [63:1] |A0/NSE|
* +----------------------------------------------+------+
* 8 | Header Buffer Address [63:1] | DD |
* +-----------------------------------------------------+
*
*
* Advanced Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 30 21 20 17 16 4 3 0
* +------------------------------------------------------+
* 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
* | Checksum Ident | | | | Type | Type |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length | Extended Error | Extended Status |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
netdev_info(netdev, "------------------------------------\n");
netdev_info(netdev, "RX QUEUE INDEX = %d\n",
rx_ring->queue_index);
netdev_info(netdev, "------------------------------------\n");
netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
struct igc_rx_buffer *buffer_info;
buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IGC_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
if (staterr & IGC_RXD_STAT_DD) {
/* Descriptor Done */
netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n",
"RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
next_desc);
} else {
netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n",
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
buffer_info->dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
page_address
(buffer_info->page) +
buffer_info->page_offset,
igc_rx_bufsz(rx_ring),
true);
}
}
}
}
exit:
return;
}
/* igc_regs_dump - registers dump */
void igc_regs_dump(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
struct igc_reg_info *reginfo;
/* Print Registers */
netdev_info(adapter->netdev, "Register Dump\n");
netdev_info(adapter->netdev, "Register Name Value\n");
for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl;
reginfo->name; reginfo++) {
igc_regdump(hw, reginfo);
}
}

1962
igc_ethtool.c Normal file

File diff suppressed because it is too large Load Diff

298
igc_hw.h Normal file
View File

@ -0,0 +1,298 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_HW_H_
#define _IGC_HW_H_
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include "igc_regs.h"
#include "igc_defines.h"
#include "igc_mac.h"
#include "igc_phy.h"
#include "igc_nvm.h"
#include "igc_i225.h"
#include "igc_base.h"
#define IGC_DEV_ID_I225_LM 0x15F2
#define IGC_DEV_ID_I225_V 0x15F3
#define IGC_DEV_ID_I225_I 0x15F8
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
#define IGC_DEV_ID_I225_K2 0x3101
#define IGC_DEV_ID_I225_LMVP 0x5502
#define IGC_DEV_ID_I226_K 0x5504
#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I226_LM 0x125B
#define IGC_DEV_ID_I226_V 0x125C
#define IGC_DEV_ID_I226_IT 0x125D
#define IGC_DEV_ID_I221_V 0x125E
#define IGC_DEV_ID_I226_BLANK_NVM 0x125F
#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
struct igc_mac_operations {
s32 (*check_for_link)(struct igc_hw *hw);
s32 (*reset_hw)(struct igc_hw *hw);
s32 (*init_hw)(struct igc_hw *hw);
s32 (*setup_physical_interface)(struct igc_hw *hw);
void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index);
s32 (*read_mac_addr)(struct igc_hw *hw);
s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed,
u16 *duplex);
s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask);
void (*release_swfw_sync)(struct igc_hw *hw, u16 mask);
};
enum igc_mac_type {
igc_undefined = 0,
igc_i225,
igc_num_macs /* List is 1-based, so subtract 1 for true count. */
};
enum igc_phy_type {
igc_phy_unknown = 0,
igc_phy_none,
igc_phy_i225,
};
enum igc_media_type {
igc_media_type_unknown = 0,
igc_media_type_copper = 1,
igc_num_media_types
};
enum igc_nvm_type {
igc_nvm_unknown = 0,
igc_nvm_eeprom_spi,
igc_nvm_flash_hw,
igc_nvm_invm,
};
struct igc_info {
s32 (*get_invariants)(struct igc_hw *hw);
struct igc_mac_operations *mac_ops;
const struct igc_phy_operations *phy_ops;
struct igc_nvm_operations *nvm_ops;
};
extern const struct igc_info igc_base_info;
struct igc_mac_info {
struct igc_mac_operations ops;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
enum igc_mac_type type;
u32 mc_filter_type;
u16 mta_reg_count;
u16 uta_reg_count;
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
bool asf_firmware_present;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
bool get_link_status;
};
struct igc_nvm_operations {
s32 (*acquire)(struct igc_hw *hw);
s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
void (*release)(struct igc_hw *hw);
s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
s32 (*update)(struct igc_hw *hw);
s32 (*validate)(struct igc_hw *hw);
};
struct igc_phy_operations {
s32 (*acquire)(struct igc_hw *hw);
s32 (*check_reset_block)(struct igc_hw *hw);
s32 (*force_speed_duplex)(struct igc_hw *hw);
s32 (*get_phy_info)(struct igc_hw *hw);
s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data);
void (*release)(struct igc_hw *hw);
s32 (*reset)(struct igc_hw *hw);
s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data);
};
struct igc_nvm_info {
struct igc_nvm_operations ops;
enum igc_nvm_type type;
u16 word_size;
u16 delay_usec;
u16 address_bits;
u16 opcode_bits;
u16 page_size;
};
struct igc_phy_info {
struct igc_phy_operations ops;
enum igc_phy_type type;
u32 addr;
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
enum igc_media_type media_type;
u16 autoneg_advertised;
u16 autoneg_mask;
u8 mdix;
bool is_mdix;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
struct igc_bus_info {
u16 func;
u16 pci_cmd_word;
};
enum igc_fc_mode {
igc_fc_none = 0,
igc_fc_rx_pause,
igc_fc_tx_pause,
igc_fc_full,
igc_fc_default = 0xFF
};
struct igc_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum igc_fc_mode current_mode; /* Type of flow control */
enum igc_fc_mode requested_mode;
};
struct igc_dev_spec_base {
bool clear_semaphore_once;
bool eee_enable;
};
struct igc_hw {
void *back;
u8 __iomem *hw_addr;
unsigned long io_base;
struct igc_mac_info mac;
struct igc_fc_info fc;
struct igc_nvm_info nvm;
struct igc_phy_info phy;
struct igc_bus_info bus;
union {
struct igc_dev_spec_base _base;
} dev_spec;
u16 device_id;
u16 subsystem_vendor_id;
u16 subsystem_device_id;
u16 vendor_id;
u8 revision_id;
};
/* Statistics counters collected by the MAC */
struct igc_hw_stats {
u64 crcerrs;
u64 algnerrc;
u64 symerrs;
u64 rxerrc;
u64 mpc;
u64 scc;
u64 ecol;
u64 mcc;
u64 latecol;
u64 colc;
u64 dc;
u64 tncrs;
u64 sec;
u64 cexterr;
u64 rlec;
u64 xonrxc;
u64 xontxc;
u64 xoffrxc;
u64 xofftxc;
u64 fcruc;
u64 prc64;
u64 prc127;
u64 prc255;
u64 prc511;
u64 prc1023;
u64 prc1522;
u64 tlpic;
u64 rlpic;
u64 gprc;
u64 bprc;
u64 mprc;
u64 gptc;
u64 gorc;
u64 gotc;
u64 rnbc;
u64 ruc;
u64 rfc;
u64 roc;
u64 rjc;
u64 mgprc;
u64 mgpdc;
u64 mgptc;
u64 tor;
u64 tot;
u64 tpr;
u64 tpt;
u64 ptc64;
u64 ptc127;
u64 ptc255;
u64 ptc511;
u64 ptc1023;
u64 ptc1522;
u64 mptc;
u64 bptc;
u64 tsctc;
u64 tsctfc;
u64 iac;
u64 htdpmc;
u64 rpthc;
u64 hgptc;
u64 hgorc;
u64 hgotc;
u64 lenerrs;
u64 scvpc;
u64 hrmpc;
u64 doosync;
u64 o2bgptc;
u64 o2bspc;
u64 b2ospc;
u64 b2ogprc;
};
struct net_device *igc_get_hw_dev(struct igc_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
#endif /* _IGC_HW_H_ */

645
igc_i225.c Normal file
View File

@ -0,0 +1,645 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include <linux/delay.h>
#include "igc_hw.h"
/**
* igc_get_hw_semaphore_i225 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the necessary semaphores for exclusive access to the EEPROM.
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -IGC_ERR_NVM (-1).
*/
static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
{
return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
}
/**
* igc_release_nvm_i225 - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
*/
static void igc_release_nvm_i225(struct igc_hw *hw)
{
igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
}
/**
* igc_get_hw_semaphore_i225 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
*/
static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
{
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
u32 swsm;
/* Get the SW semaphore */
while (i < timeout) {
swsm = rd32(IGC_SWSM);
if (!(swsm & IGC_SWSM_SMBI))
break;
usleep_range(500, 600);
i++;
}
if (i == timeout) {
/* In rare circumstances, the SW semaphore may already be held
* unintentionally. Clear the semaphore once before giving up.
*/
if (hw->dev_spec._base.clear_semaphore_once) {
hw->dev_spec._base.clear_semaphore_once = false;
igc_put_hw_semaphore(hw);
for (i = 0; i < timeout; i++) {
swsm = rd32(IGC_SWSM);
if (!(swsm & IGC_SWSM_SMBI))
break;
usleep_range(500, 600);
}
}
/* If we do not have the semaphore here, we have to give up. */
if (i == timeout) {
hw_dbg("Driver can't access device - SMBI bit is set.\n");
return -IGC_ERR_NVM;
}
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = rd32(IGC_SWSM);
wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI)
break;
usleep_range(500, 600);
}
if (i == timeout) {
/* Release semaphores */
igc_put_hw_semaphore(hw);
hw_dbg("Driver can't access the NVM\n");
return -IGC_ERR_NVM;
}
return 0;
}
/**
* igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
*/
s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
s32 i = 0, timeout = 200;
u32 fwmask = mask << 16;
u32 swmask = mask;
s32 ret_val = 0;
u32 swfw_sync;
while (i < timeout) {
if (igc_get_hw_semaphore_i225(hw)) {
ret_val = -IGC_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = rd32(IGC_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/* Firmware currently using resource (fwmask) */
igc_put_hw_semaphore(hw);
mdelay(5);
i++;
}
if (i == timeout) {
hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -IGC_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
wr32(IGC_SW_FW_SYNC, swfw_sync);
igc_put_hw_semaphore(hw);
out:
return ret_val;
}
/**
* igc_release_swfw_sync_i225 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
*/
void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
while (igc_get_hw_semaphore_i225(hw))
; /* Empty */
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
wr32(IGC_SW_FW_SYNC, swfw_sync);
igc_put_hw_semaphore(hw);
}
/**
* igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the Shadow Ram to read
* @words: number of words to read
* @data: word read from the Shadow Ram
*
* Reads a 16 bit word from the Shadow Ram using the EERD register.
* Uses necessary synchronization semaphores.
*/
static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
* to read in bursts than synchronizing access for each word.
*/
for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
IGC_EERD_EEWR_MAX_COUNT : (words - i);
status = hw->nvm.ops.acquire(hw);
if (status)
break;
status = igc_read_nvm_eerd(hw, offset, count, data + i);
hw->nvm.ops.release(hw);
if (status)
break;
}
return status;
}
/**
* igc_write_nvm_srwr - Write to Shadow Ram using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow Ram to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the Shadow Ram
*
* Writes data to Shadow Ram at offset using EEWR register.
*
* If igc_update_nvm_checksum is not called after this function , the
* Shadow Ram will most likely contain an invalid checksum.
*/
static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000;
u32 i, k, eewr = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
goto out;
}
for (i = 0; i < words; i++) {
eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
(data[i] << IGC_NVM_RW_REG_DATA) |
IGC_NVM_RW_REG_START;
wr32(IGC_SRWR, eewr);
for (k = 0; k < attempts; k++) {
if (IGC_NVM_RW_REG_DONE &
rd32(IGC_SRWR)) {
ret_val = 0;
break;
}
udelay(5);
}
if (ret_val) {
hw_dbg("Shadow RAM write EEWR timed out\n");
break;
}
}
out:
return ret_val;
}
/**
* igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow RAM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the Shadow RAM
*
* Writes data to Shadow RAM at offset using EEWR register.
*
* If igc_update_nvm_checksum is not called after this function , the
* data will not be committed to FLASH and also Shadow RAM will most likely
* contain an invalid checksum.
*
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
*/
static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
* to write in bursts than synchronizing access for each word.
*/
for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
IGC_EERD_EEWR_MAX_COUNT : (words - i);
status = hw->nvm.ops.acquire(hw);
if (status)
break;
status = igc_write_nvm_srwr(hw, offset, count, data + i);
hw->nvm.ops.release(hw);
if (status)
break;
}
return status;
}
/**
* igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
*/
static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
{
s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count,
u16 *data);
s32 status = 0;
status = hw->nvm.ops.acquire(hw);
if (status)
goto out;
/* Replace the read function with semaphore grabbing with
* the one that skips this for a while.
* We have semaphore taken already here.
*/
read_op_ptr = hw->nvm.ops.read;
hw->nvm.ops.read = igc_read_nvm_eerd;
status = igc_validate_nvm_checksum(hw);
/* Revert original read operation. */
hw->nvm.ops.read = read_op_ptr;
hw->nvm.ops.release(hw);
out:
return status;
}
/**
* igc_pool_flash_update_done_i225 - Pool FLUDONE status
* @hw: pointer to the HW structure
*/
static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
{
s32 ret_val = -IGC_ERR_NVM;
u32 i, reg;
for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
reg = rd32(IGC_EECD);
if (reg & IGC_EECD_FLUDONE_I225) {
ret_val = 0;
break;
}
udelay(5);
}
return ret_val;
}
/**
* igc_update_flash_i225 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*/
static s32 igc_update_flash_i225(struct igc_hw *hw)
{
s32 ret_val = 0;
u32 flup;
ret_val = igc_pool_flash_update_done_i225(hw);
if (ret_val == -IGC_ERR_NVM) {
hw_dbg("Flash update time out\n");
goto out;
}
flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225;
wr32(IGC_EECD, flup);
ret_val = igc_pool_flash_update_done_i225(hw);
if (ret_val)
hw_dbg("Flash update time out\n");
else
hw_dbg("Flash update complete\n");
out:
return ret_val;
}
/**
* igc_update_nvm_checksum_i225 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM. Next commit EEPROM data onto the Flash.
*/
static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
{
u16 checksum = 0;
s32 ret_val = 0;
u16 i, nvm_data;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
if (ret_val) {
hw_dbg("EEPROM read failed\n");
goto out;
}
ret_val = hw->nvm.ops.acquire(hw);
if (ret_val)
goto out;
/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
*/
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16)NVM_SUM - checksum;
ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Write Error while updating checksum.\n");
goto out;
}
hw->nvm.ops.release(hw);
ret_val = igc_update_flash_i225(hw);
out:
return ret_val;
}
/**
* igc_get_flash_presence_i225 - Check if flash device is detected
* @hw: pointer to the HW structure
*/
bool igc_get_flash_presence_i225(struct igc_hw *hw)
{
bool ret_val = false;
u32 eec = 0;
eec = rd32(IGC_EECD);
if (eec & IGC_EECD_FLASH_DETECTED_I225)
ret_val = true;
return ret_val;
}
/**
* igc_init_nvm_params_i225 - Init NVM func ptrs.
* @hw: pointer to the HW structure
*/
s32 igc_init_nvm_params_i225(struct igc_hw *hw)
{
struct igc_nvm_info *nvm = &hw->nvm;
nvm->ops.acquire = igc_acquire_nvm_i225;
nvm->ops.release = igc_release_nvm_i225;
/* NVM Function Pointers */
if (igc_get_flash_presence_i225(hw)) {
hw->nvm.type = igc_nvm_flash_hw;
nvm->ops.read = igc_read_nvm_srrd_i225;
nvm->ops.write = igc_write_nvm_srwr_i225;
nvm->ops.validate = igc_validate_nvm_checksum_i225;
nvm->ops.update = igc_update_nvm_checksum_i225;
} else {
hw->nvm.type = igc_nvm_invm;
nvm->ops.read = igc_read_nvm_eerd;
nvm->ops.write = NULL;
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
}
return 0;
}
/**
* igc_set_eee_i225 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv2p5G: boolean flag enabling 2.5G EEE advertisement
* @adv1G: boolean flag enabling 1G EEE advertisement
* @adv100M: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE based on setting in dev_spec structure.
**/
s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
bool adv100M)
{
u32 ipcnfg, eeer;
ipcnfg = rd32(IGC_IPCNFG);
eeer = rd32(IGC_EEER);
/* enable or disable per user setting */
if (hw->dev_spec._base.eee_enable) {
u32 eee_su = rd32(IGC_EEE_SU);
if (adv100M)
ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
else
ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
if (adv1G)
ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
else
ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
if (adv2p5G)
ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
else
ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
IGC_EEER_LPI_FC);
/* This bit should not be set in normal operation. */
if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
hw_dbg("LPI Clock Stop Bit should not be set!\n");
} else {
ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
IGC_IPCNFG_EEE_100M_AN);
eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
IGC_EEER_LPI_FC);
}
wr32(IGC_IPCNFG, ipcnfg);
wr32(IGC_EEER, eeer);
rd32(IGC_IPCNFG);
rd32(IGC_EEER);
return IGC_SUCCESS;
}
/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds
* @hw: pointer to the HW structure
* @link: bool indicating link status
*
* Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
* settings, otherwise specify that there is no LTR requirement.
*/
s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
{
u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
u16 speed, duplex;
s32 size;
/* If we do not have link, LTR thresholds are zero. */
if (link) {
hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
/* Check if using copper interface with EEE enabled or if the
* link speed is 10 Mbps.
*/
if (hw->dev_spec._base.eee_enable &&
speed != SPEED_10) {
/* EEE enabled, so send LTRMAX threshold. */
ltrc = rd32(IGC_LTRC) |
IGC_LTRC_EEEMS_EN;
wr32(IGC_LTRC, ltrc);
/* Calculate tw_system (nsec). */
if (speed == SPEED_100) {
tw_system = ((rd32(IGC_EEE_SU) &
IGC_TW_SYSTEM_100_MASK) >>
IGC_TW_SYSTEM_100_SHIFT) * 500;
} else {
tw_system = (rd32(IGC_EEE_SU) &
IGC_TW_SYSTEM_1000_MASK) * 500;
}
} else {
tw_system = 0;
}
/* Get the Rx packet buffer size. */
size = rd32(IGC_RXPBS) &
IGC_RXPBS_SIZE_I225_MASK;
/* Calculations vary based on DMAC settings. */
if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) {
size -= (rd32(IGC_DMACR) &
IGC_DMACR_DMACTHR_MASK) >>
IGC_DMACR_DMACTHR_SHIFT;
/* Convert size to bits. */
size *= 1024 * 8;
} else {
/* Convert size to bytes, subtract the MTU, and then
* convert the size to bits.
*/
size *= 1024;
size *= 8;
}
if (size < 0) {
hw_dbg("Invalid effective Rx buffer size %d\n",
size);
return -IGC_ERR_CONFIG;
}
/* Calculate the thresholds. Since speed is in Mbps, simplify
* the calculation by multiplying size/speed by 1000 for result
* to be in nsec before dividing by the scale in nsec. Set the
* scale such that the LTR threshold fits in the register.
*/
ltr_min = (1000 * size) / speed;
ltr_max = ltr_min + tw_system;
scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
IGC_LTRMINV_SCALE_32768;
scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
IGC_LTRMAXV_SCALE_32768;
ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
ltr_min -= 1;
ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
ltr_max -= 1;
/* Only write the LTR thresholds if they differ from before. */
ltrv = rd32(IGC_LTRMINV);
if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
(scale_min << IGC_LTRMINV_SCALE_SHIFT);
wr32(IGC_LTRMINV, ltrv);
}
ltrv = rd32(IGC_LTRMAXV);
if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
(scale_min << IGC_LTRMAXV_SCALE_SHIFT);
wr32(IGC_LTRMAXV, ltrv);
}
}
return IGC_SUCCESS;
}

16
igc_i225.h Normal file
View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_I225_H_
#define _IGC_I225_H_
s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask);
void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask);
s32 igc_init_nvm_params_i225(struct igc_hw *hw);
bool igc_get_flash_presence_i225(struct igc_hw *hw);
s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
bool adv100M);
s32 igc_set_ltr_i225(struct igc_hw *hw, bool link);
#endif

881
igc_mac.c Normal file
View File

@ -0,0 +1,881 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include <linux/pci.h>
#include <linux/delay.h>
#include "igc_mac.h"
#include "igc_hw.h"
/**
* igc_disable_pcie_master - Disables PCI-express master access
* @hw: pointer to the HW structure
*
* Returns 0 (0) if successful, else returns -10
* (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
* the master requests to be disabled.
*
* Disables PCI-Express master access and verifies there are no pending
* requests.
*/
s32 igc_disable_pcie_master(struct igc_hw *hw)
{
s32 timeout = MASTER_DISABLE_TIMEOUT;
s32 ret_val = 0;
u32 ctrl;
ctrl = rd32(IGC_CTRL);
ctrl |= IGC_CTRL_GIO_MASTER_DISABLE;
wr32(IGC_CTRL, ctrl);
while (timeout) {
if (!(rd32(IGC_STATUS) &
IGC_STATUS_GIO_MASTER_ENABLE))
break;
usleep_range(2000, 3000);
timeout--;
}
if (!timeout) {
hw_dbg("Master requests are pending.\n");
ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING;
goto out;
}
out:
return ret_val;
}
/**
* igc_init_rx_addrs - Initialize receive addresses
* @hw: pointer to the HW structure
* @rar_count: receive address registers
*
* Setup the receive address registers by setting the base receive address
* register to the devices MAC address and clearing all the other receive
* address registers to 0.
*/
void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count)
{
u8 mac_addr[ETH_ALEN] = {0};
u32 i;
/* Setup the receive address */
hw_dbg("Programming MAC Address into RAR[0]\n");
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
/* Zero out the other (rar_entry_count - 1) receive addresses */
hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
for (i = 1; i < rar_count; i++)
hw->mac.ops.rar_set(hw, mac_addr, i);
}
/**
* igc_set_fc_watermarks - Set flow control high/low watermarks
* @hw: pointer to the HW structure
*
* Sets the flow control high/low threshold (watermark) registers. If
* flow control XON frame transmission is enabled, then set XON frame
* transmission as well.
*/
static s32 igc_set_fc_watermarks(struct igc_hw *hw)
{
u32 fcrtl = 0, fcrth = 0;
/* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
* ability to transmit pause frames is not enabled, then these
* registers will be set to 0.
*/
if (hw->fc.current_mode & igc_fc_tx_pause) {
/* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of
* XON frames.
*/
fcrtl = hw->fc.low_water;
if (hw->fc.send_xon)
fcrtl |= IGC_FCRTL_XONE;
fcrth = hw->fc.high_water;
}
wr32(IGC_FCRTL, fcrtl);
wr32(IGC_FCRTH, fcrth);
return 0;
}
/**
* igc_setup_link - Setup flow control and link settings
* @hw: pointer to the HW structure
*
* Determines which flow control settings to use, then configures flow
* control. Calls the appropriate media-specific link configuration
* function. Assuming the adapter has a valid link partner, a valid link
* should be established. Assumes the hardware has previously been reset
* and the transmitter and receiver are not enabled.
*/
s32 igc_setup_link(struct igc_hw *hw)
{
s32 ret_val = 0;
/* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
if (igc_check_reset_block(hw))
goto out;
/* If requested flow control is set to default, set flow control
* to the both 'rx' and 'tx' pause frames.
*/
if (hw->fc.requested_mode == igc_fc_default)
hw->fc.requested_mode = igc_fc_full;
/* We want to save off the original Flow Control configuration just
* in case we get disconnected and then reconnected into a different
* hub or switch with different Flow Control capabilities.
*/
hw->fc.current_mode = hw->fc.requested_mode;
hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
/* Call the necessary media_type subroutine to configure the link. */
ret_val = hw->mac.ops.setup_physical_interface(hw);
if (ret_val)
goto out;
/* Initialize the flow control address, type, and PAUSE timer
* registers to their default values. This is done even if flow
* control is disabled, because it does not hurt anything to
* initialize these registers.
*/
hw_dbg("Initializing the Flow Control address, type and timer regs\n");
wr32(IGC_FCT, FLOW_CONTROL_TYPE);
wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW);
wr32(IGC_FCTTV, hw->fc.pause_time);
ret_val = igc_set_fc_watermarks(hw);
out:
return ret_val;
}
/**
* igc_force_mac_fc - Force the MAC's flow control settings
* @hw: pointer to the HW structure
*
* Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
* device control register to reflect the adapter settings. TFCE and RFCE
* need to be explicitly set by software when a copper PHY is used because
* autonegotiation is managed by the PHY rather than the MAC. Software must
* also configure these bits when link is forced on a fiber connection.
*/
s32 igc_force_mac_fc(struct igc_hw *hw)
{
s32 ret_val = 0;
u32 ctrl;
ctrl = rd32(IGC_CTRL);
/* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
* auto-neg), we have to manually enable/disable transmit an
* receive flow control.
*
* The "Case" statement below enables/disable flow control
* according to the "hw->fc.current_mode" parameter.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* frames but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
switch (hw->fc.current_mode) {
case igc_fc_none:
ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE));
break;
case igc_fc_rx_pause:
ctrl &= (~IGC_CTRL_TFCE);
ctrl |= IGC_CTRL_RFCE;
break;
case igc_fc_tx_pause:
ctrl &= (~IGC_CTRL_RFCE);
ctrl |= IGC_CTRL_TFCE;
break;
case igc_fc_full:
ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE);
break;
default:
hw_dbg("Flow control param set incorrectly\n");
ret_val = -IGC_ERR_CONFIG;
goto out;
}
wr32(IGC_CTRL, ctrl);
out:
return ret_val;
}
/**
* igc_clear_hw_cntrs_base - Clear base hardware counters
* @hw: pointer to the HW structure
*
* Clears the base hardware counters by reading the counter registers.
*/
void igc_clear_hw_cntrs_base(struct igc_hw *hw)
{
rd32(IGC_CRCERRS);
rd32(IGC_MPC);
rd32(IGC_SCC);
rd32(IGC_ECOL);
rd32(IGC_MCC);
rd32(IGC_LATECOL);
rd32(IGC_COLC);
rd32(IGC_RERC);
rd32(IGC_DC);
rd32(IGC_RLEC);
rd32(IGC_XONRXC);
rd32(IGC_XONTXC);
rd32(IGC_XOFFRXC);
rd32(IGC_XOFFTXC);
rd32(IGC_FCRUC);
rd32(IGC_GPRC);
rd32(IGC_BPRC);
rd32(IGC_MPRC);
rd32(IGC_GPTC);
rd32(IGC_GORCL);
rd32(IGC_GORCH);
rd32(IGC_GOTCL);
rd32(IGC_GOTCH);
rd32(IGC_RNBC);
rd32(IGC_RUC);
rd32(IGC_RFC);
rd32(IGC_ROC);
rd32(IGC_RJC);
rd32(IGC_TORL);
rd32(IGC_TORH);
rd32(IGC_TOTL);
rd32(IGC_TOTH);
rd32(IGC_TPR);
rd32(IGC_TPT);
rd32(IGC_MPTC);
rd32(IGC_BPTC);
rd32(IGC_PRC64);
rd32(IGC_PRC127);
rd32(IGC_PRC255);
rd32(IGC_PRC511);
rd32(IGC_PRC1023);
rd32(IGC_PRC1522);
rd32(IGC_PTC64);
rd32(IGC_PTC127);
rd32(IGC_PTC255);
rd32(IGC_PTC511);
rd32(IGC_PTC1023);
rd32(IGC_PTC1522);
rd32(IGC_ALGNERRC);
rd32(IGC_RXERRC);
rd32(IGC_TNCRS);
rd32(IGC_HTDPMC);
rd32(IGC_TSCTC);
rd32(IGC_MGTPRC);
rd32(IGC_MGTPDC);
rd32(IGC_MGTPTC);
rd32(IGC_IAC);
rd32(IGC_RPTHC);
rd32(IGC_TLPIC);
rd32(IGC_RLPIC);
rd32(IGC_HGPTC);
rd32(IGC_RXDMTC);
rd32(IGC_HGORCL);
rd32(IGC_HGORCH);
rd32(IGC_HGOTCL);
rd32(IGC_HGOTCH);
rd32(IGC_LENERRS);
}
/**
* igc_rar_set - Set receive address register
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
* @index: receive address array register
*
* Sets the receive address array register at index to the address passed
* in by addr.
*/
void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] |
((u32)addr[1] << 8) |
((u32)addr[2] << 16) | ((u32)addr[3] << 24));
rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
/* If MAC address zero, no need to set the AV bit */
if (rar_low || rar_high)
rar_high |= IGC_RAH_AV;
/* Some bridges will combine consecutive 32-bit writes into
* a single burst write, which will malfunction on some parts.
* The flushes avoid this.
*/
wr32(IGC_RAL(index), rar_low);
wrfl();
wr32(IGC_RAH(index), rar_high);
wrfl();
}
/**
* igc_check_for_copper_link - Check for link (Copper)
* @hw: pointer to the HW structure
*
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
*/
s32 igc_check_for_copper_link(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
bool link = false;
s32 ret_val;
/* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status) {
ret_val = 0;
goto out;
}
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
ret_val = igc_phy_has_link(hw, 1, 0, &link);
if (ret_val)
goto out;
if (!link)
goto out; /* No link detected */
mac->get_link_status = false;
/* Check if there was DownShift, must be checked
* immediately after link-up
*/
igc_check_downshift(hw);
/* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg) {
ret_val = -IGC_ERR_CONFIG;
goto out;
}
/* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
igc_config_collision_dist(hw);
/* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
*/
ret_val = igc_config_fc_after_link_up(hw);
if (ret_val)
hw_dbg("Error configuring flow control\n");
out:
/* Now that we are aware of our link settings, we can set the LTR
* thresholds.
*/
ret_val = igc_set_ltr_i225(hw, link);
return ret_val;
}
/**
* igc_config_collision_dist - Configure collision distance
* @hw: pointer to the HW structure
*
* Configures the collision distance to the default value and is used
* during link setup. Currently no func pointer exists and all
* implementations are handled in the generic version of this function.
*/
void igc_config_collision_dist(struct igc_hw *hw)
{
u32 tctl;
tctl = rd32(IGC_TCTL);
tctl &= ~IGC_TCTL_COLD;
tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT;
wr32(IGC_TCTL, tctl);
wrfl();
}
/**
* igc_config_fc_after_link_up - Configures flow control after link
* @hw: pointer to the HW structure
*
* Checks the status of auto-negotiation after link up to ensure that the
* speed and duplex were not forced. If the link needed to be forced, then
* flow control needs to be forced also. If auto-negotiation is enabled
* and did not fail, then we configure flow control based on our link
* partner.
*/
s32 igc_config_fc_after_link_up(struct igc_hw *hw)
{
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
struct igc_mac_info *mac = &hw->mac;
u16 speed, duplex;
s32 ret_val = 0;
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
if (mac->autoneg_failed)
ret_val = igc_force_mac_fc(hw);
if (ret_val) {
hw_dbg("Error forcing flow control settings\n");
goto out;
}
/* Check for the case where we have copper media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
if (mac->autoneg) {
/* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
&mii_status_reg);
if (ret_val)
goto out;
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
&mii_status_reg);
if (ret_val)
goto out;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
hw_dbg("Copper PHY and Auto Neg has not completed.\n");
goto out;
}
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement
* Register (Address 4) and the Auto_Negotiation Base
* Page Ability Register (Address 5) to determine how
* flow control was negotiated.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
if (ret_val)
goto out;
ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
&mii_nway_lp_ability_reg);
if (ret_val)
goto out;
/* Two bits in the Auto Negotiation Advertisement Register
* (Address 4) and two bits in the Auto Negotiation Base
* Page Ability Register (Address 5) determine flow control
* for both the PHY and the link partner. The following
* table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
* 1999, describes these PAUSE resolution bits and how flow
* control is determined based upon these settings.
* NOTE: DC = Don't Care
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
*-------|---------|-------|---------|--------------------
* 0 | 0 | DC | DC | igc_fc_none
* 0 | 1 | 0 | DC | igc_fc_none
* 0 | 1 | 1 | 0 | igc_fc_none
* 0 | 1 | 1 | 1 | igc_fc_tx_pause
* 1 | 0 | 0 | DC | igc_fc_none
* 1 | DC | 1 | DC | igc_fc_full
* 1 | 1 | 0 | 0 | igc_fc_none
* 1 | 1 | 0 | 1 | igc_fc_rx_pause
*
* Are both PAUSE bits set to 1? If so, this implies
* Symmetric Flow Control is enabled at both ends. The
* ASM_DIR bits are irrelevant per the spec.
*
* For Symmetric Flow Control:
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 1 | DC | 1 | DC | IGC_fc_full
*
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
/* Now we need to check if the user selected RX ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise RX
* ONLY. Hence, we must now check to see if we need to
* turn OFF the TRANSMISSION of PAUSE frames.
*/
if (hw->fc.requested_mode == igc_fc_full) {
hw->fc.current_mode = igc_fc_full;
hw_dbg("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
}
/* For receiving PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 0 | 1 | 1 | 1 | igc_fc_tx_pause
*/
else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = igc_fc_tx_pause;
hw_dbg("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 1 | 1 | 0 | 1 | igc_fc_rx_pause
*/
else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
* disabled. However, we want to consider that we could
* be connected to a legacy switch that doesn't advertise
* desired flow control, but can be forced on the link
* partner. So if we advertised no flow control, that is
* what we will resolve to. If we advertised some kind of
* receive capability (Rx Pause Only or Full Flow Control)
* and the link partner advertised none, we will configure
* ourselves to enable Rx Flow Control only. We can do
* this safely for two reasons: If the link partner really
* didn't want flow control enabled, and we enable Rx, no
* harm done since we won't be receiving any PAUSE frames
* anyway. If the intent on the link partner was to have
* flow control enabled, then by us enabling RX only, we
* can at least receive pause frames and process them.
* This is a good idea because in most cases, since we are
* predominantly a server NIC, more times than not we will
* be asked to delay transmission of packets than asking
* our link partner to pause transmission of frames.
*/
else if ((hw->fc.requested_mode == igc_fc_none) ||
(hw->fc.requested_mode == igc_fc_tx_pause) ||
(hw->fc.strict_ieee)) {
hw->fc.current_mode = igc_fc_none;
hw_dbg("Flow Control = NONE.\n");
} else {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
/* Now we need to do one last check... If we auto-
* negotiated to HALF DUPLEX, flow control should not be
* enabled per IEEE 802.3 spec.
*/
ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
hw_dbg("Error getting link speed and duplex\n");
goto out;
}
if (duplex == HALF_DUPLEX)
hw->fc.current_mode = igc_fc_none;
/* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
ret_val = igc_force_mac_fc(hw);
if (ret_val) {
hw_dbg("Error forcing flow control settings\n");
goto out;
}
}
out:
return ret_val;
}
/**
* igc_get_auto_rd_done - Check for auto read completion
* @hw: pointer to the HW structure
*
* Check EEPROM for Auto Read done bit.
*/
s32 igc_get_auto_rd_done(struct igc_hw *hw)
{
s32 ret_val = 0;
s32 i = 0;
while (i < AUTO_READ_DONE_TIMEOUT) {
if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD)
break;
usleep_range(1000, 2000);
i++;
}
if (i == AUTO_READ_DONE_TIMEOUT) {
hw_dbg("Auto read by HW from NVM has not completed.\n");
ret_val = -IGC_ERR_RESET;
goto out;
}
out:
return ret_val;
}
/**
* igc_get_speed_and_duplex_copper - Retrieve current speed/duplex
* @hw: pointer to the HW structure
* @speed: stores the current speed
* @duplex: stores the current duplex
*
* Read the status register for the current speed/duplex and store the current
* speed and duplex for copper connections.
*/
s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
u16 *duplex)
{
u32 status;
status = rd32(IGC_STATUS);
if (status & IGC_STATUS_SPEED_1000) {
/* For I225, STATUS will indicate 1G speed in both 1 Gbps
* and 2.5 Gbps link modes. An additional bit is used
* to differentiate between 1 Gbps and 2.5 Gbps.
*/
if (hw->mac.type == igc_i225 &&
(status & IGC_STATUS_SPEED_2500)) {
*speed = SPEED_2500;
hw_dbg("2500 Mbs, ");
} else {
*speed = SPEED_1000;
hw_dbg("1000 Mbs, ");
}
} else if (status & IGC_STATUS_SPEED_100) {
*speed = SPEED_100;
hw_dbg("100 Mbs, ");
} else {
*speed = SPEED_10;
hw_dbg("10 Mbs, ");
}
if (status & IGC_STATUS_FD) {
*duplex = FULL_DUPLEX;
hw_dbg("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
hw_dbg("Half Duplex\n");
}
return 0;
}
/**
* igc_put_hw_semaphore - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
*/
void igc_put_hw_semaphore(struct igc_hw *hw)
{
u32 swsm;
swsm = rd32(IGC_SWSM);
swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI);
wr32(IGC_SWSM, swsm);
}
/**
* igc_enable_mng_pass_thru - Enable processing of ARP's
* @hw: pointer to the HW structure
*
* Verifies the hardware needs to leave interface enabled so that frames can
* be directed to and from the management interface.
*/
bool igc_enable_mng_pass_thru(struct igc_hw *hw)
{
bool ret_val = false;
u32 fwsm, factps;
u32 manc;
if (!hw->mac.asf_firmware_present)
goto out;
manc = rd32(IGC_MANC);
if (!(manc & IGC_MANC_RCV_TCO_EN))
goto out;
if (hw->mac.arc_subsystem_valid) {
fwsm = rd32(IGC_FWSM);
factps = rd32(IGC_FACTPS);
if (!(factps & IGC_FACTPS_MNGCG) &&
((fwsm & IGC_FWSM_MODE_MASK) ==
(igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) {
ret_val = true;
goto out;
}
} else {
if ((manc & IGC_MANC_SMBUS_EN) &&
!(manc & IGC_MANC_ASF_EN)) {
ret_val = true;
goto out;
}
}
out:
return ret_val;
}
/**
* igc_hash_mc_addr - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
*
* Generates a multicast address hash value which is used to determine
* the multicast filter table array address and new table value. See
* igc_mta_set()
**/
static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
/* The portion of the address that is used for the hash table
* is determined by the mc_filter_type setting.
* The algorithm is such that there is a total of 8 bits of shifting.
* The bit_shift for a mc_filter_type of 0 represents the number of
* left-shifts where the MSB of mc_addr[5] would still fall within
* the hash_mask. Case 0 does this exactly. Since there are a total
* of 8 bits of shifting, then mc_addr[4] will shift right the
* remaining number of bits. Thus 8 - bit_shift. The rest of the
* cases are a variation of this algorithm...essentially raising the
* number of bits to shift mc_addr[5] left, while still keeping the
* 8-bit shifting total.
*
* For example, given the following Destination MAC Address and an
* MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask),
* we can see that the bit_shift for case 0 is 4. These are the hash
* values resulting from each mc_filter_type...
* [0] [1] [2] [3] [4] [5]
* 01 AA 00 12 34 56
* LSB MSB
*
* case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
* case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
* case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
* case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
*/
switch (hw->mac.mc_filter_type) {
default:
case 0:
break;
case 1:
bit_shift += 1;
break;
case 2:
bit_shift += 2;
break;
case 3:
bit_shift += 4;
break;
}
hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
(((u16)mc_addr[5]) << bit_shift)));
return hash_value;
}
/**
* igc_update_mc_addr_list - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
*
* Updates entire Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
**/
void igc_update_mc_addr_list(struct igc_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
/* clear mta_shadow */
memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
/* update mta_shadow from mc_addr_list */
for (i = 0; (u32)i < mc_addr_count; i++) {
hash_value = igc_hash_mc_addr(hw, mc_addr_list);
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += ETH_ALEN;
}
/* replace the entire MTA table */
for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]);
wrfl();
}

39
igc_mac.h Normal file
View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_MAC_H_
#define _IGC_MAC_H_
#include "igc_hw.h"
#include "igc_phy.h"
#include "igc_defines.h"
/* forward declaration */
s32 igc_disable_pcie_master(struct igc_hw *hw);
s32 igc_check_for_copper_link(struct igc_hw *hw);
s32 igc_config_fc_after_link_up(struct igc_hw *hw);
s32 igc_force_mac_fc(struct igc_hw *hw);
void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count);
s32 igc_setup_link(struct igc_hw *hw);
void igc_clear_hw_cntrs_base(struct igc_hw *hw);
s32 igc_get_auto_rd_done(struct igc_hw *hw);
void igc_put_hw_semaphore(struct igc_hw *hw);
void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index);
void igc_config_collision_dist(struct igc_hw *hw);
s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
u16 *duplex);
bool igc_enable_mng_pass_thru(struct igc_hw *hw);
void igc_update_mc_addr_list(struct igc_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count);
enum igc_mng_mode {
igc_mng_mode_none = 0,
igc_mng_mode_asf,
igc_mng_mode_pt,
igc_mng_mode_ipmi,
igc_mng_mode_host_if_only
};
#endif

5643
igc_main.c Normal file

File diff suppressed because it is too large Load Diff

215
igc_nvm.c Normal file
View File

@ -0,0 +1,215 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include "igc_mac.h"
#include "igc_nvm.h"
/**
* igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion
* @hw: pointer to the HW structure
* @ee_reg: EEPROM flag for polling
*
* Polls the EEPROM status bit for either read or write completion based
* upon the value of 'ee_reg'.
*/
static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
{
s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000;
u32 i, reg = 0;
for (i = 0; i < attempts; i++) {
if (ee_reg == IGC_NVM_POLL_READ)
reg = rd32(IGC_EERD);
else
reg = rd32(IGC_EEWR);
if (reg & IGC_NVM_RW_REG_DONE) {
ret_val = 0;
break;
}
udelay(5);
}
return ret_val;
}
/**
* igc_acquire_nvm - Generic request for access to EEPROM
* @hw: pointer to the HW structure
*
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -IGC_ERR_NVM (-1).
*/
s32 igc_acquire_nvm(struct igc_hw *hw)
{
s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
u32 eecd = rd32(IGC_EECD);
s32 ret_val = 0;
wr32(IGC_EECD, eecd | IGC_EECD_REQ);
eecd = rd32(IGC_EECD);
while (timeout) {
if (eecd & IGC_EECD_GNT)
break;
udelay(5);
eecd = rd32(IGC_EECD);
timeout--;
}
if (!timeout) {
eecd &= ~IGC_EECD_REQ;
wr32(IGC_EECD, eecd);
hw_dbg("Could not acquire NVM grant\n");
ret_val = -IGC_ERR_NVM;
}
return ret_val;
}
/**
* igc_release_nvm - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
*/
void igc_release_nvm(struct igc_hw *hw)
{
u32 eecd;
eecd = rd32(IGC_EECD);
eecd &= ~IGC_EECD_REQ;
wr32(IGC_EECD, eecd);
}
/**
* igc_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
* @words: number of words to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
*/
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
u32 i, eerd = 0;
s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -IGC_ERR_NVM;
goto out;
}
for (i = 0; i < words; i++) {
eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) +
IGC_NVM_RW_REG_START;
wr32(IGC_EERD, eerd);
ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ);
if (ret_val)
break;
data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA);
}
out:
return ret_val;
}
/**
* igc_read_mac_addr - Read device MAC address
* @hw: pointer to the HW structure
*/
s32 igc_read_mac_addr(struct igc_hw *hw)
{
u32 rar_high;
u32 rar_low;
u16 i;
rar_high = rd32(IGC_RAH(0));
rar_low = rd32(IGC_RAL(0));
for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
return 0;
}
/**
* igc_validate_nvm_checksum - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
*/
s32 igc_validate_nvm_checksum(struct igc_hw *hw)
{
u16 checksum = 0;
u16 i, nvm_data;
s32 ret_val = 0;
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
checksum += nvm_data;
}
if (checksum != (u16)NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -IGC_ERR_NVM;
goto out;
}
out:
return ret_val;
}
/**
* igc_update_nvm_checksum - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
*/
s32 igc_update_nvm_checksum(struct igc_hw *hw)
{
u16 checksum = 0;
u16 i, nvm_data;
s32 ret_val;
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16)NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
out:
return ret_val;
}

14
igc_nvm.h Normal file
View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
s32 igc_acquire_nvm(struct igc_hw *hw);
void igc_release_nvm(struct igc_hw *hw);
s32 igc_read_mac_addr(struct igc_hw *hw);
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
s32 igc_validate_nvm_checksum(struct igc_hw *hw);
s32 igc_update_nvm_checksum(struct igc_hw *hw);
#endif

811
igc_phy.c Normal file
View File

@ -0,0 +1,811 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include "igc_phy.h"
/**
* igc_check_reset_block - Check if PHY reset is blocked
* @hw: pointer to the HW structure
*
* Read the PHY management control register and check whether a PHY reset
* is blocked. If a reset is not blocked return 0, otherwise
* return IGC_ERR_BLK_PHY_RESET (12).
*/
s32 igc_check_reset_block(struct igc_hw *hw)
{
u32 manc;
manc = rd32(IGC_MANC);
return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
IGC_ERR_BLK_PHY_RESET : 0;
}
/**
* igc_get_phy_id - Retrieve the PHY ID and revision
* @hw: pointer to the HW structure
*
* Reads the PHY registers and stores the PHY ID and possibly the PHY
* revision in the hardware structure.
*/
s32 igc_get_phy_id(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
u16 phy_id;
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
phy->id = (u32)(phy_id << 16);
usleep_range(200, 500);
ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
if (ret_val)
goto out;
phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
out:
return ret_val;
}
/**
* igc_phy_has_link - Polls PHY for link
* @hw: pointer to the HW structure
* @iterations: number of times to poll for link
* @usec_interval: delay between polling attempts
* @success: pointer to whether polling was successful or not
*
* Polls the PHY status register for link, 'iterations' number of times.
*/
s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
u32 usec_interval, bool *success)
{
u16 i, phy_status;
s32 ret_val = 0;
for (i = 0; i < iterations; i++) {
/* Some PHYs require the PHY_STATUS register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val && usec_interval > 0) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
if (usec_interval >= 1000)
mdelay(usec_interval / 1000);
else
udelay(usec_interval);
}
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_LINK_STATUS)
break;
if (usec_interval >= 1000)
mdelay(usec_interval / 1000);
else
udelay(usec_interval);
}
*success = (i < iterations) ? true : false;
return ret_val;
}
/**
* igc_power_up_phy_copper - Restore copper link in case of PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
* driver unload, restore the link to previous settings.
*/
void igc_power_up_phy_copper(struct igc_hw *hw)
{
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
/**
* igc_power_down_phy_copper - Power down copper PHY
* @hw: pointer to the HW structure
*
* Power down PHY to save power when interface is down and wake on lan
* is not enabled.
*/
void igc_power_down_phy_copper(struct igc_hw *hw)
{
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
/* Temporary workaround - should be removed when PHY will implement
* IEEE registers as properly
*/
/* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
usleep_range(1000, 2000);
}
/**
* igc_check_downshift - Checks whether a downshift in speed occurred
* @hw: pointer to the HW structure
*
* Success returns 0, Failure returns 1
*
* A downshift is detected by querying the PHY link health.
*/
s32 igc_check_downshift(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val;
switch (phy->type) {
case igc_phy_i225:
default:
/* speed downshift not supported */
phy->speed_downgraded = false;
ret_val = 0;
}
return ret_val;
}
/**
* igc_phy_hw_reset - PHY hardware reset
* @hw: pointer to the HW structure
*
* Verify the reset block is not blocking us from resetting. Acquire
* semaphore (if necessary) and read/set/write the device control reset
* bit in the PHY. Wait the appropriate delay time for the device to
* reset and release the semaphore (if necessary).
*/
s32 igc_phy_hw_reset(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
u32 phpm = 0, timeout = 10000;
s32 ret_val;
u32 ctrl;
ret_val = igc_check_reset_block(hw);
if (ret_val) {
ret_val = 0;
goto out;
}
ret_val = phy->ops.acquire(hw);
if (ret_val)
goto out;
phpm = rd32(IGC_I225_PHPM);
ctrl = rd32(IGC_CTRL);
wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
wrfl();
udelay(phy->reset_delay_us);
wr32(IGC_CTRL, ctrl);
wrfl();
/* SW should guarantee 100us for the completion of the PHY reset */
usleep_range(100, 150);
do {
phpm = rd32(IGC_I225_PHPM);
timeout--;
udelay(1);
} while (!(phpm & IGC_PHY_RST_COMP) && timeout);
if (!timeout)
hw_dbg("Timeout is expired after a phy reset\n");
usleep_range(100, 150);
phy->ops.release(hw);
out:
return ret_val;
}
/**
* igc_phy_setup_autoneg - Configure PHY for auto-negotiation
* @hw: pointer to the HW structure
*
* Reads the MII auto-neg advertisement register and/or the 1000T control
* register and if the PHY is already setup for auto-negotiation, then
* return successful. Otherwise, setup advertisement and flow control to
* the appropriate values for the wanted auto-negotiation.
*/
static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
u16 aneg_multigbt_an_ctrl = 0;
u16 mii_1000t_ctrl_reg = 0;
u16 mii_autoneg_adv_reg;
s32 ret_val;
phy->autoneg_advertised &= phy->autoneg_mask;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
/* Read the MII 1000Base-T Control Register (Address 9). */
ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
&mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
}
if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
hw->phy.id == I225_I_PHY_ID) {
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
ANEG_MULTIGBT_AN_CTRL,
&aneg_multigbt_an_ctrl);
if (ret_val)
return ret_val;
}
/* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
/* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
NWAY_AR_100TX_HD_CAPS |
NWAY_AR_10T_FD_CAPS |
NWAY_AR_10T_HD_CAPS);
mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
hw_dbg("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
hw_dbg("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
hw_dbg("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
hw_dbg("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
hw_dbg("Advertise 1000mb Half duplex request denied!\n");
/* Do we want to advertise 1000 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
hw_dbg("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
/* We do not allow the Phy to advertise 2500 Mb Half Duplex */
if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
hw_dbg("Advertise 2500mb Half duplex request denied!\n");
/* Do we want to advertise 2500 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
hw_dbg("Advertise 2500mb Full duplex\n");
aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
} else {
aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
}
/* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
* Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
* negotiation.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames
* but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* but we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
* other: No software override. The flow control configuration
* in the EEPROM is used.
*/
switch (hw->fc.current_mode) {
case igc_fc_none:
/* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case igc_fc_rx_pause:
/* Rx Flow control is enabled, and Tx Flow control is
* disabled, by a software over-ride.
*
* Since there really isn't a way to advertise that we are
* capable of Rx Pause ONLY, we will advertise that we
* support both symmetric and asymmetric Rx PAUSE. Later
* (in igc_config_fc_after_link_up) we will disable the
* hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case igc_fc_tx_pause:
/* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case igc_fc_full:
/* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
default:
hw_dbg("Flow control param set incorrectly\n");
return -IGC_ERR_CONFIG;
}
ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
if (phy->autoneg_mask & ADVERTISE_1000_FULL)
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
hw->phy.id == I225_I_PHY_ID)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
ANEG_MULTIGBT_AN_CTRL,
aneg_multigbt_an_ctrl);
return ret_val;
}
/**
* igc_wait_autoneg - Wait for auto-neg completion
* @hw: pointer to the HW structure
*
* Waits for auto-negotiation to complete or for the auto-negotiation time
* limit to expire, which ever happens first.
*/
static s32 igc_wait_autoneg(struct igc_hw *hw)
{
u16 i, phy_status;
s32 ret_val = 0;
/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_AUTONEG_COMPLETE)
break;
msleep(100);
}
/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
* has completed.
*/
return ret_val;
}
/**
* igc_copper_link_autoneg - Setup/Enable autoneg for copper link
* @hw: pointer to the HW structure
*
* Performs initial bounds checking on autoneg advertisement parameter, then
* configure to advertise the full capability. Setup the PHY to autoneg
* and restart the negotiation process between the link partner. If
* autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
*/
static s32 igc_copper_link_autoneg(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
u16 phy_ctrl;
s32 ret_val;
/* Perform some bounds checking on the autoneg advertisement
* parameter.
*/
phy->autoneg_advertised &= phy->autoneg_mask;
/* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
if (phy->autoneg_advertised == 0)
phy->autoneg_advertised = phy->autoneg_mask;
hw_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = igc_phy_setup_autoneg(hw);
if (ret_val) {
hw_dbg("Error Setting up Auto-Negotiation\n");
goto out;
}
hw_dbg("Restarting Auto-Neg\n");
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
if (ret_val)
goto out;
phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
if (ret_val)
goto out;
/* Does the user want to wait for Auto-Neg to complete here, or
* check at a later time (for example, callback routine).
*/
if (phy->autoneg_wait_to_complete) {
ret_val = igc_wait_autoneg(hw);
if (ret_val) {
hw_dbg("Error while waiting for autoneg to complete\n");
goto out;
}
}
hw->mac.get_link_status = true;
out:
return ret_val;
}
/**
* igc_setup_copper_link - Configure copper link settings
* @hw: pointer to the HW structure
*
* Calls the appropriate function to configure the link for auto-neg or forced
* speed and duplex. Then we check for link, once link is established calls
* to configure collision distance and flow control are called. If link is
* not established, we return -IGC_ERR_PHY (-2).
*/
s32 igc_setup_copper_link(struct igc_hw *hw)
{
s32 ret_val = 0;
bool link;
if (hw->mac.autoneg) {
/* Setup autoneg and flow control advertisement and perform
* autonegotiation.
*/
ret_val = igc_copper_link_autoneg(hw);
if (ret_val)
goto out;
} else {
/* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
hw_dbg("Forcing Speed and Duplex\n");
ret_val = hw->phy.ops.force_speed_duplex(hw);
if (ret_val) {
hw_dbg("Error Forcing Speed and Duplex\n");
goto out;
}
}
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
if (ret_val)
goto out;
if (link) {
hw_dbg("Valid link established!!!\n");
igc_config_collision_dist(hw);
ret_val = igc_config_fc_after_link_up(hw);
} else {
hw_dbg("Unable to establish link!!!\n");
}
out:
return ret_val;
}
/**
* igc_read_phy_reg_mdic - Read MDI control register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
*
* Reads the MDI control register in the PHY at offset and stores the
* information read to data.
*/
static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
{
struct igc_phy_info *phy = &hw->phy;
u32 i, mdic = 0;
s32 ret_val = 0;
if (offset > MAX_PHY_REG_ADDRESS) {
hw_dbg("PHY Address %d is out of range\n", offset);
ret_val = -IGC_ERR_PARAM;
goto out;
}
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
mdic = ((offset << IGC_MDIC_REG_SHIFT) |
(phy->addr << IGC_MDIC_PHY_SHIFT) |
(IGC_MDIC_OP_READ));
wr32(IGC_MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
usleep_range(500, 1000);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
}
if (!(mdic & IGC_MDIC_READY)) {
hw_dbg("MDI Read did not complete\n");
ret_val = -IGC_ERR_PHY;
goto out;
}
if (mdic & IGC_MDIC_ERROR) {
hw_dbg("MDI Error\n");
ret_val = -IGC_ERR_PHY;
goto out;
}
*data = (u16)mdic;
out:
return ret_val;
}
/**
* igc_write_phy_reg_mdic - Write MDI control register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write to register at offset
*
* Writes data to MDI control register in the PHY at offset.
*/
static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
{
struct igc_phy_info *phy = &hw->phy;
u32 i, mdic = 0;
s32 ret_val = 0;
if (offset > MAX_PHY_REG_ADDRESS) {
hw_dbg("PHY Address %d is out of range\n", offset);
ret_val = -IGC_ERR_PARAM;
goto out;
}
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to write the desired data.
*/
mdic = (((u32)data) |
(offset << IGC_MDIC_REG_SHIFT) |
(phy->addr << IGC_MDIC_PHY_SHIFT) |
(IGC_MDIC_OP_WRITE));
wr32(IGC_MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
usleep_range(500, 1000);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
}
if (!(mdic & IGC_MDIC_READY)) {
hw_dbg("MDI Write did not complete\n");
ret_val = -IGC_ERR_PHY;
goto out;
}
if (mdic & IGC_MDIC_ERROR) {
hw_dbg("MDI Error\n");
ret_val = -IGC_ERR_PHY;
goto out;
}
out:
return ret_val;
}
/**
* __igc_access_xmdio_reg - Read/write XMDIO register
* @hw: pointer to the HW structure
* @address: XMDIO address to program
* @dev_addr: device address to program
* @data: pointer to value to read/write from/to the XMDIO address
* @read: boolean flag to indicate read or write
*/
static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
u8 dev_addr, u16 *data, bool read)
{
s32 ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
if (ret_val)
return ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
if (ret_val)
return ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
dev_addr);
if (ret_val)
return ret_val;
if (read)
ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
else
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
if (ret_val)
return ret_val;
/* Recalibrate the device back to 0 */
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
if (ret_val)
return ret_val;
return ret_val;
}
/**
* igc_read_xmdio_reg - Read XMDIO register
* @hw: pointer to the HW structure
* @addr: XMDIO address to program
* @dev_addr: device address to program
* @data: value to be read from the EMI address
*/
static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr,
u8 dev_addr, u16 *data)
{
return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
}
/**
* igc_write_xmdio_reg - Write XMDIO register
* @hw: pointer to the HW structure
* @addr: XMDIO address to program
* @dev_addr: device address to program
* @data: value to be written to the XMDIO address
*/
static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
u8 dev_addr, u16 data)
{
return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
}
/**
* igc_write_phy_reg_gpy - Write GPY PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
*/
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
{
u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
s32 ret_val;
offset = offset & GPY_REG_MASK;
if (!dev_addr) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
if (ret_val)
return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
data);
}
return ret_val;
}
/**
* igc_read_phy_reg_gpy - Read GPY PHY register
* @hw: pointer to the HW structure
* @offset: lower half is register offset to read to
* upper half is MMD to use.
* @data: data to read at register offset
*
* Acquires semaphore, if necessary, then reads the data in the PHY register
* at the offset. Release any acquired semaphores before exiting.
*/
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
{
u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
s32 ret_val;
offset = offset & GPY_REG_MASK;
if (!dev_addr) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
if (ret_val)
return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
data);
}
return ret_val;
}
/**
* igc_read_phy_fw_version - Read gPHY firmware version
* @hw: pointer to the HW structure
*/
u16 igc_read_phy_fw_version(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
u16 gphy_version = 0;
u16 ret_val;
/* NVM image version is reported as firmware version for i225 device */
ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version);
if (ret_val)
hw_dbg("igc_phy: read wrong gphy version\n");
return gphy_version;
}

22
igc_phy.h Normal file
View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_PHY_H_
#define _IGC_PHY_H_
#include "igc_mac.h"
s32 igc_check_reset_block(struct igc_hw *hw);
s32 igc_phy_hw_reset(struct igc_hw *hw);
s32 igc_get_phy_id(struct igc_hw *hw);
s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
s32 igc_check_downshift(struct igc_hw *hw);
s32 igc_setup_copper_link(struct igc_hw *hw);
void igc_power_up_phy_copper(struct igc_hw *hw);
void igc_power_down_phy_copper(struct igc_hw *hw);
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data);
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data);
u16 igc_read_phy_fw_version(struct igc_hw *hw);
#endif

619
igc_ptp.c Normal file
View File

@ -0,0 +1,619 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
#include <linux/ktime.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
/* SYSTIM read access for I225 */
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts)
{
struct igc_hw *hw = &adapter->hw;
u32 sec, nsec;
/* The timestamp is latched when SYSTIML is read. */
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
static void igc_ptp_write_i225(struct igc_adapter *adapter,
const struct timespec64 *ts)
{
struct igc_hw *hw = &adapter->hw;
wr32(IGC_SYSTIML, ts->tv_nsec);
wr32(IGC_SYSTIMH, ts->tv_sec);
}
static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
ptp_caps);
struct igc_hw *hw = &igc->hw;
int neg_adj = 0;
u64 rate;
u32 inca;
if (scaled_ppm < 0) {
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
rate = scaled_ppm;
rate <<= 14;
rate = div_u64(rate, 78125);
inca = rate & INCVALUE_MASK;
if (neg_adj)
inca |= ISGN;
wr32(IGC_TIMINCA, inca);
return 0;
}
static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta)
{
struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
ptp_caps);
struct timespec64 now, then = ns_to_timespec64(delta);
unsigned long flags;
spin_lock_irqsave(&igc->tmreg_lock, flags);
igc_ptp_read(igc, &now);
now = timespec64_add(now, then);
igc_ptp_write_i225(igc, (const struct timespec64 *)&now);
spin_unlock_irqrestore(&igc->tmreg_lock, flags);
return 0;
}
static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
ptp_caps);
struct igc_hw *hw = &igc->hw;
unsigned long flags;
spin_lock_irqsave(&igc->tmreg_lock, flags);
ptp_read_system_prets(sts);
ts->tv_nsec = rd32(IGC_SYSTIML);
ts->tv_sec = rd32(IGC_SYSTIMH);
ptp_read_system_postts(sts);
spin_unlock_irqrestore(&igc->tmreg_lock, flags);
return 0;
}
static int igc_ptp_settime_i225(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
ptp_caps);
unsigned long flags;
spin_lock_irqsave(&igc->tmreg_lock, flags);
igc_ptp_write_i225(igc, ts);
spin_unlock_irqrestore(&igc->tmreg_lock, flags);
return 0;
}
static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
/**
* igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp
* @adapter: board private structure
* @hwtstamps: timestamp structure to update
* @systim: unsigned 64bit system time value
*
* We need to convert the system time value stored in the RX/TXSTMP registers
* into a hwtstamp which can be used by the upper level timestamping functions.
**/
static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
struct skb_shared_hwtstamps *hwtstamps,
u64 systim)
{
switch (adapter->hw.mac.type) {
case igc_i225:
memset(hwtstamps, 0, sizeof(*hwtstamps));
/* Upper 32 bits contain s, lower 32 bits contain ns. */
hwtstamps->hwtstamp = ktime_set(systim >> 32,
systim & 0xFFFFFFFF);
break;
default:
break;
}
}
/**
* igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
* @skb: Buffer containing timestamp and packet
*
* This function retrieves the timestamp saved in the beginning of packet
* buffer. While two timestamps are available, one in timer0 reference and the
* other in timer1 reference, this function considers only the timestamp in
* timer0 reference.
*/
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
struct sk_buff *skb)
{
struct igc_adapter *adapter = q_vector->adapter;
u64 regval;
int adjust;
/* Timestamps are saved in little endian at the beginning of the packet
* buffer following the layout:
*
* DWORD: | 0 | 1 | 2 | 3 |
* Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
*
* SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
* part of the timestamp.
*/
regval = le32_to_cpu(va[2]);
regval |= (u64)le32_to_cpu(va[3]) << 32;
igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
/* Adjust timestamp for the RX latency based on link speed */
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGC_I225_RX_LATENCY_10;
break;
case SPEED_100:
adjust = IGC_I225_RX_LATENCY_100;
break;
case SPEED_1000:
adjust = IGC_I225_RX_LATENCY_1000;
break;
case SPEED_2500:
adjust = IGC_I225_RX_LATENCY_2500;
break;
default:
adjust = 0;
netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
break;
}
skb_hwtstamps(skb)->hwtstamp =
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
}
static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 val;
int i;
wr32(IGC_TSYNCRXCTL, 0);
for (i = 0; i < adapter->num_rx_queues; i++) {
val = rd32(IGC_SRRCTL(i));
val &= ~IGC_SRRCTL_TIMESTAMP;
wr32(IGC_SRRCTL(i), val);
}
val = rd32(IGC_RXPBS);
val &= ~IGC_RXPBS_CFG_TS_EN;
wr32(IGC_RXPBS, val);
}
static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 val;
int i;
val = rd32(IGC_RXPBS);
val |= IGC_RXPBS_CFG_TS_EN;
wr32(IGC_RXPBS, val);
for (i = 0; i < adapter->num_rx_queues; i++) {
val = rd32(IGC_SRRCTL(i));
/* FIXME: For now, only support retrieving RX timestamps from
* timer 0.
*/
val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) |
IGC_SRRCTL_TIMESTAMP;
wr32(IGC_SRRCTL(i), val);
}
val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL |
IGC_TSYNCRXCTL_RXSYNSIG;
wr32(IGC_TSYNCRXCTL, val);
}
static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
wr32(IGC_TSYNCTXCTL, 0);
}
static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG);
/* Read TXSTMP registers to discard any timestamp previously stored. */
rd32(IGC_TXSTMPL);
rd32(IGC_TXSTMPH);
}
/**
* igc_ptp_set_timestamp_mode - setup hardware for timestamping
* @adapter: networking device structure
* @config: hwtstamp configuration
*
* Return: 0 in case of success, negative errno code otherwise.
*/
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
struct hwtstamp_config *config)
{
/* reserved for future extensions */
if (config->flags)
return -EINVAL;
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
igc_ptp_disable_tx_timestamp(adapter);
break;
case HWTSTAMP_TX_ON:
igc_ptp_enable_tx_timestamp(adapter);
break;
default:
return -ERANGE;
}
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
igc_ptp_disable_rx_timestamp(adapter);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
igc_ptp_enable_rx_timestamp(adapter);
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
return 0;
}
static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
adapter->tx_hwtstamp_timeouts++;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
/* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */
rd32(IGC_TXSTMPH);
netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
}
void igc_ptp_tx_hang(struct igc_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
IGC_PTP_TX_TIMEOUT);
if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
return;
/* If we haven't received a timestamp within the timeout, it is
* reasonable to assume that it will never occur, so we can unlock the
* timestamp bit when this occurs.
*/
if (timeout) {
cancel_work_sync(&adapter->ptp_tx_work);
igc_ptp_tx_timeout(adapter);
}
}
/**
* igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: Board private structure
*
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
*/
static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
{
struct sk_buff *skb = adapter->ptp_tx_skb;
struct skb_shared_hwtstamps shhwtstamps;
struct igc_hw *hw = &adapter->hw;
int adjust = 0;
u64 regval;
if (WARN_ON_ONCE(!skb))
return;
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGC_I225_TX_LATENCY_10;
break;
case SPEED_100:
adjust = IGC_I225_TX_LATENCY_100;
break;
case SPEED_1000:
adjust = IGC_I225_TX_LATENCY_1000;
break;
case SPEED_2500:
adjust = IGC_I225_TX_LATENCY_2500;
break;
}
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
/* Clear the lock early before calling skb_tstamp_tx so that
* applications are not woken up before the lock bit is clear. We use
* a copy of the skb pointer to ensure other threads can't change it
* while we're notifying the stack.
*/
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
/* Notify the stack and free the skb after we've unlocked */
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
/**
* igc_ptp_tx_work
* @work: pointer to work struct
*
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
*/
static void igc_ptp_tx_work(struct work_struct *work)
{
struct igc_adapter *adapter = container_of(work, struct igc_adapter,
ptp_tx_work);
struct igc_hw *hw = &adapter->hw;
u32 tsynctxctl;
if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
return;
tsynctxctl = rd32(IGC_TSYNCTXCTL);
if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0)))
return;
igc_ptp_tx_hwtstamp(adapter);
}
/**
* igc_ptp_set_ts_config - set hardware time stamping config
* @netdev: network interface device structure
* @ifr: interface request data
*
**/
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config config;
int err;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
err = igc_ptp_set_timestamp_mode(adapter, &config);
if (err)
return err;
/* save these settings for future reference */
memcpy(&adapter->tstamp_config, &config,
sizeof(adapter->tstamp_config));
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
/**
* igc_ptp_get_ts_config - get hardware time stamping config
* @netdev: network interface device structure
* @ifr: interface request data
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
* of the last known settings.
**/
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config *config = &adapter->tstamp_config;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
/**
* igc_ptp_init - Initialize PTP functionality
* @adapter: Board private structure
*
* This function is called at device probe to initialize the PTP
* functionality.
*/
void igc_ptp_init(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
switch (hw->mac.type) {
case igc_i225:
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225;
adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
break;
default:
adapter->ptp_clock = NULL;
return;
}
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real());
adapter->ptp_reset_start = ktime_get();
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
netdev_err(netdev, "ptp_clock_register failed\n");
} else if (adapter->ptp_clock) {
netdev_info(netdev, "PHC added\n");
adapter->ptp_flags |= IGC_PTP_ENABLED;
}
}
static void igc_ptp_time_save(struct igc_adapter *adapter)
{
igc_ptp_read(adapter, &adapter->prev_ptp_time);
adapter->ptp_reset_start = ktime_get();
}
static void igc_ptp_time_restore(struct igc_adapter *adapter)
{
struct timespec64 ts = adapter->prev_ptp_time;
ktime_t delta;
delta = ktime_sub(ktime_get(), adapter->ptp_reset_start);
timespec64_add_ns(&ts, ktime_to_ns(delta));
igc_ptp_write_i225(adapter, &ts);
}
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
*
* This function stops the overflow check work and PTP Tx timestamp work, and
* will prepare the device for OS suspend.
*/
void igc_ptp_suspend(struct igc_adapter *adapter)
{
if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
return;
cancel_work_sync(&adapter->ptp_tx_work);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
igc_ptp_time_save(adapter);
}
/**
* igc_ptp_stop - Disable PTP device and stop the overflow check.
* @adapter: Board private structure.
*
* This function stops the PTP support and cancels the delayed work.
**/
void igc_ptp_stop(struct igc_adapter *adapter)
{
igc_ptp_suspend(adapter);
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
netdev_info(adapter->netdev, "PHC removed\n");
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
}
}
/**
* igc_ptp_reset - Re-enable the adapter for PTP following a reset.
* @adapter: Board private structure.
*
* This function handles the reset work required to re-enable the PTP device.
**/
void igc_ptp_reset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
unsigned long flags;
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
case igc_i225:
wr32(IGC_TSAUXC, 0x0);
wr32(IGC_TSSDP, 0x0);
wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS);
wr32(IGC_IMS, IGC_IMS_TS);
break;
default:
/* No work to do. */
goto out;
}
/* Re-initialize the timer. */
if (hw->mac.type == igc_i225) {
igc_ptp_time_restore(adapter);
} else {
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
}
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
wrfl();
}

268
igc_regs.h Normal file
View File

@ -0,0 +1,268 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_REGS_H_
#define _IGC_REGS_H_
/* General Register Descriptions */
#define IGC_CTRL 0x00000 /* Device Control - RW */
#define IGC_STATUS 0x00008 /* Device Status - RO */
#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */
#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */
#define IGC_EEWR 0x12018 /* EEprom mode write - RW */
/* Flow Control Register Descriptions */
#define IGC_FCAL 0x00028 /* FC Address Low - RW */
#define IGC_FCAH 0x0002C /* FC Address High - RW */
#define IGC_FCT 0x00030 /* FC Type - RW */
#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */
#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */
#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */
#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */
/* Semaphore registers */
#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
#define IGC_SWSM 0x05B50 /* SW Semaphore */
#define IGC_FWSM 0x05B54 /* FW Semaphore */
/* Function Active and Power State to MNG */
#define IGC_FACTPS 0x05B30
/* Interrupt Register Description */
#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */
#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */
#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */
#define IGC_ICS 0x01504 /* Intr Cause Set - WO */
#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */
#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */
#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */
/* Intr Throttle - RW */
#define IGC_EITR(_n) (0x01680 + (0x4 * (_n)))
/* Interrupt Vector Allocation - RW */
#define IGC_IVAR0 0x01700
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
/* RSS registers */
#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
/* Filtering Registers */
#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
/* ETQF register bit definitions */
#define IGC_ETQF_FILTER_ENABLE BIT(26)
#define IGC_ETQF_QUEUE_ENABLE BIT(31)
#define IGC_ETQF_QUEUE_SHIFT 16
#define IGC_ETQF_QUEUE_MASK 0x00070000
#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
/* Redirection Table - RW Array */
#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
/* RSS Random Key - RW Array */
#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4))
/* Receive Register Descriptions */
#define IGC_RCTL 0x00100 /* Rx Control - RW */
#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40))
#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40))
#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40))
#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40))
#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40))
#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40))
#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40))
#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */
#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */
#define IGC_RFCTL 0x05008 /* Receive Filter Control*/
#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */
#define IGC_RA 0x05400 /* Receive Address - RW Array */
#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */
/* Transmit Register Descriptions */
#define IGC_TCTL 0x00400 /* Tx Control - RW */
#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */
#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40))
#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40))
#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40))
#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40))
#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40))
#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40))
/* MMD Register Descriptions */
#define IGC_MMDAC 13 /* MMD Access Control */
#define IGC_MMDAAD 14 /* MMD Access Address/Data */
/* Statistics Register Descriptions */
#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */
#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */
#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */
#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */
#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */
#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */
#define IGC_COLC 0x04028 /* Collision Count - R/clr */
#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */
#define IGC_DC 0x04030 /* Defer Count - R/clr */
#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */
#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */
#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */
#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */
#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */
#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */
#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */
#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */
#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */
#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */
#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */
#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */
#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */
#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
#define IGC_IAC 0x04100 /* Interrupt Assertion Count */
#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */
#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */
#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */
#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */
#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */
#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */
#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define IGC_LENERRS 0x04138 /* Length Errors Count */
/* Time sync registers */
#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */
#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */
#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
#define IGC_BASET_L 0x3314
#define IGC_BASET_H 0x3318
#define IGC_QBVCYCLET 0x331C
#define IGC_QBVCYCLET_S 0x3320
#define IGC_STQT(_n) (0x3324 + 0x4 * (_n))
#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n))
#define IGC_DTXMXPKTSZ 0x355C
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
/* Management registers */
#define IGC_MANC 0x05820 /* Management Control - RW */
/* Shadow Ram Write Register - RW */
#define IGC_SRWR 0x12018
/* Wake Up registers */
#define IGC_WUC 0x05800 /* Wakeup Control - RW */
#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */
#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */
#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */
/* Wake Up packet memory */
#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
/* Energy Efficient Ethernet "EEE" registers */
#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */
#define IGC_EEE_SU 0x0E34 /* EEE Setup */
/* LTR registers */
#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */
#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */
/* forward declaration */
struct igc_hw;
u32 igc_rd32(struct igc_hw *hw, u32 reg);
/* write operations, indexed using DWORDS */
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
#define wrfl() ((void)rd32(IGC_STATUS))
#define array_wr32(reg, offset, value) \
wr32((reg) + ((offset) << 2), (value))
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
#endif

157
igc_tsn.c Normal file
View File

@ -0,0 +1,157 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
#include "igc_tsn.h"
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (ring->launchtime_enable)
return true;
}
return false;
}
/* Returns the TSN specific registers to their default values after
* TSN offloading is disabled.
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
return 0;
adapter->cycle_time = 0;
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
IGC_TQAVCTRL_ENHANCED_QAV);
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
ring->start_time = 0;
ring->end_time = 0;
ring->launchtime_enable = false;
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
}
wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
return 0;
}
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
int i;
if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
return 0;
cycle = adapter->cycle_time;
base_time = adapter->base_time;
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
wr32(IGC_TQAVCTRL, tqavctrl);
wr32(IGC_QBVCYCLET_S, cycle);
wr32(IGC_QBVCYCLET, cycle);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
wr32(IGC_STQT(i), ring->start_time);
wr32(IGC_ENDQT(i), ring->end_time);
if (adapter->base_time) {
/* If we have a base_time we are in "taprio"
* mode and we need to be strict about the
* cycles: only transmit a packet if it can be
* completed during that cycle.
*/
txqctl |= IGC_TXQCTL_STRICT_CYCLE |
IGC_TXQCTL_STRICT_END;
}
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
wr32(IGC_TXQCTL(i), txqctl);
}
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
systim = ktime_set(sec, nsec);
if (ktime_compare(systim, base_time) > 0) {
s64 n;
n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
}
baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
wr32(IGC_BASET_H, baset_h);
wr32(IGC_BASET_L, baset_l);
adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;
return 0;
}
int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
return 0;
if (!is_any_enabled) {
int err = igc_tsn_disable_offload(adapter);
if (err < 0)
return err;
/* The BASET registers aren't cleared when writing
* into them, force a reset if the interface is
* running.
*/
if (netif_running(adapter->netdev))
schedule_work(&adapter->reset_task);
return 0;
}
return igc_tsn_enable_offload(adapter);
}

9
igc_tsn.h Normal file
View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Intel Corporation */
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
int igc_tsn_offload_apply(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */

146
igc_xdp.c Normal file
View File

@ -0,0 +1,146 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
#include <linux/if_vlan.h>
#include <net/xdp_sock_drv.h>
#include "igc.h"
#include "igc_xdp.h"
int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct net_device *dev = adapter->netdev;
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
* jumbo frames so we return error.
*/
NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
return -EOPNOTSUPP;
}
if (if_running)
igc_close(dev);
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
if (if_running)
igc_open(dev);
return 0;
}
static int igc_xdp_enable_pool(struct igc_adapter *adapter,
struct xsk_buff_pool *pool, u16 queue_id)
{
struct net_device *ndev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
struct igc_ring *rx_ring, *tx_ring;
struct napi_struct *napi;
bool needs_reset;
u32 frame_size;
int err;
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
frame_size = xsk_pool_get_rx_frame_size(pool);
if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
/* When XDP is enabled, the driver doesn't support frames that
* span over multiple buffers. To avoid that, we check if xsk
* frame size is big enough to fit the max ethernet frame size
* + vlan double tagging.
*/
return -EOPNOTSUPP;
}
err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
if (err) {
netdev_err(ndev, "Failed to map xsk pool\n");
return err;
}
needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
rx_ring = adapter->rx_ring[queue_id];
tx_ring = adapter->tx_ring[queue_id];
/* Rx and Tx rings share the same napi context. */
napi = &rx_ring->q_vector->napi;
if (needs_reset) {
igc_disable_rx_ring(rx_ring);
igc_disable_tx_ring(tx_ring);
napi_disable(napi);
}
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
if (needs_reset) {
napi_enable(napi);
igc_enable_rx_ring(rx_ring);
igc_enable_tx_ring(tx_ring);
err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
if (err) {
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
return err;
}
}
return 0;
}
static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
{
struct igc_ring *rx_ring, *tx_ring;
struct xsk_buff_pool *pool;
struct napi_struct *napi;
bool needs_reset;
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
if (!pool)
return -EINVAL;
needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
rx_ring = adapter->rx_ring[queue_id];
tx_ring = adapter->tx_ring[queue_id];
/* Rx and Tx rings share the same napi context. */
napi = &rx_ring->q_vector->napi;
if (needs_reset) {
igc_disable_rx_ring(rx_ring);
igc_disable_tx_ring(tx_ring);
napi_disable(napi);
}
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
if (needs_reset) {
napi_enable(napi);
igc_enable_rx_ring(rx_ring);
igc_enable_tx_ring(tx_ring);
}
return 0;
}
int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
u16 queue_id)
{
return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
igc_xdp_disable_pool(adapter, queue_id);
}

17
igc_xdp.h Normal file
View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020, Intel Corporation. */
#ifndef _IGC_XDP_H_
#define _IGC_XDP_H_
int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack);
int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
u16 queue_id);
static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter)
{
return !!adapter->xdp_prog;
}
#endif /* _IGC_XDP_H_ */