Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2018-03-26

This patch series adds the ice driver, which will support the Intel(R)
E800 Series of network devices.

This is the first phase in the release of this driver where we implement
basic transmit and receive. The idea behind the multi-phase release is to
aid in code review as well as testing. Subsequent phases will implement
advanced features (like SR-IOV, tunnelling, flow director, QoS, etc.) that
build upon the previous phase(s). Each phase will be submitted as a patch
series.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-03-26 18:20:02 -04:00
commit 34fd03b9e6
26 changed files with 18863 additions and 0 deletions

View File

@ -0,0 +1,39 @@
Intel(R) Ethernet Connection E800 Series Linux Driver
===================================================================
Intel ice Linux driver.
Copyright(c) 2018 Intel Corporation.
Contents
========
- Enabling the driver
- Support
The driver in this release supports Intel's E800 Series of products. For
more information, visit Intel's support page at http://support.intel.com.
Enabling the driver
===================
The driver is enabled via the standard kernel configuration system,
using the make command:
Make oldconfig/silentoldconfig/menuconfig/etc.
The driver is located in the menu structure at:
-> Device Drivers
-> Network device support (NETDEVICES [=y])
-> Ethernet driver support
-> Intel devices
-> Intel(R) Ethernet Connection E800 Series Support
Support
=======
For general information, go to the Intel support website at:
http://support.intel.com
If an issue is identified with the released source code, please email
the maintainer listed in the MAINTAINERS file.

View File

@ -7063,6 +7063,7 @@ F: Documentation/networking/ixgbe.txt
F: Documentation/networking/ixgbevf.txt
F: Documentation/networking/i40e.txt
F: Documentation/networking/i40evf.txt
F: Documentation/networking/ice.txt
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h

View File

@ -251,6 +251,20 @@ config I40EVF
will be called i40evf. MSI-X interrupt support is required
for this driver to work correctly.
config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
---help---
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called ice.
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n

View File

@ -14,3 +14,4 @@ obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_I40EVF) += i40evf/
obj-$(CONFIG_FM10K) += fm10k/
obj-$(CONFIG_ICE) += ice/

View File

@ -0,0 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018, Intel Corporation.
#
# Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver
#
obj-$(CONFIG_ICE) += ice.o
ice-y := ice_main.o \
ice_controlq.o \
ice_common.o \
ice_nvm.o \
ice_switch.o \
ice_sched.o \
ice_txrx.o \
ice_ethtool.o

View File

@ -0,0 +1,312 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_H_
#define _ICE_H_
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/compiler.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/cpumask.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <linux/aer.h>
#include <linux/interrupt.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/bitmap.h>
#include <linux/log2.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
#include <net/ipv6.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_DFLT_NUM_DESC 128
#define ICE_MIN_NUM_DESC 8
#define ICE_MAX_NUM_DESC 8160
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_MAX_VSI_ALLOC 130
#define ICE_MAX_TXQS 2048
#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
#define ICE_MAX_SCATTER_TXQS 16
#define ICE_MAX_SCATTER_RXQS 16
#define ICE_Q_WAIT_RETRY_LIMIT 10
#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
#define ICE_MAX_LG_RSS_QS 256
#define ICE_MAX_SMALL_RSS_QS 8
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
ICE_AQ_VSI_UP_TABLE_UP##i##_M)
#define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
/* Macros for each tx/rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
struct ice_tc_info {
u16 qoffset;
u16 qcount;
};
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
u8 ena_tc; /* TX map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
struct ice_res_tracker {
u16 num_entries;
u16 search_hint;
u16 list[1];
};
struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
};
enum ice_state {
__ICE_DOWN,
__ICE_NEEDS_RESTART,
__ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
__ICE_CORER_RECV, /* set by OICR handler */
__ICE_GLOBR_RECV, /* set by OICR handler */
__ICE_EMPR_RECV, /* set by OICR handler */
__ICE_SUSPENDED, /* set on module remove path */
__ICE_RESET_FAILED, /* set by reset/rebuild */
__ICE_ADMINQ_EVENT_PENDING,
__ICE_FLTR_OVERFLOW_PROMISC,
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_STATE_NBITS /* must be last */
};
enum ice_vsi_flags {
ICE_VSI_FLAG_UMAC_FLTR_CHANGED,
ICE_VSI_FLAG_MMAC_FLTR_CHANGED,
ICE_VSI_FLAG_VLAN_FLTR_CHANGED,
ICE_VSI_FLAG_PROMISC_CHANGED,
ICE_VSI_FLAG_NBITS /* must be last */
};
/* struct that defines a VSI, associated with a dev */
struct ice_vsi {
struct net_device *netdev;
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_ring **rx_rings; /* rx ring array */
struct ice_ring **tx_rings; /* tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
u64 tx_linearize;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
unsigned int current_netdev_flags;
u32 tx_restart;
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
int num_q_vectors;
int base_vector;
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
/* Interrupt thresholds */
u16 work_lmt;
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
u8 *rss_hkey_user; /* User configured hash keys */
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
u16 max_frame;
u16 rx_buf_len;
struct ice_aqc_vsi_props info; /* VSI properties */
/* VSI stats */
struct rtnl_link_stats64 net_stats;
struct ice_eth_stats eth_stats;
struct ice_eth_stats eth_stats_prev;
struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
bool irqs_ready;
bool current_isup; /* Sync 'link up' logging */
bool stat_offsets_loaded;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */
u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */
u16 alloc_txq; /* Allocated Tx queues */
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
u16 num_desc;
struct ice_tc_cfg tc_cfg;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
struct ice_q_vector {
struct ice_vsi *vsi;
cpumask_t affinity_mask;
struct napi_struct napi;
struct ice_ring_container rx;
struct ice_ring_container tx;
struct irq_affinity_notify affinity_notify;
u16 v_idx; /* index in the vsi->q_vector array. */
u8 num_ring_tx; /* total number of tx rings in vector */
u8 num_ring_rx; /* total number of rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
ICE_PF_FLAGS_NBITS /* must be last */
};
struct ice_pf {
struct pci_dev *pdev;
struct msix_entry *msix_entries;
struct ice_res_tracker *irq_tracker;
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
struct work_struct serv_task;
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
u32 msg_enable;
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause vector index */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */
u16 num_lan_tx; /* num lan tx queues setup */
u16 num_lan_rx; /* num lan rx queues setup */
u16 q_left_tx; /* remaining num tx queues left unclaimed */
u16 q_left_rx; /* remaining num rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
u16 num_alloc_vsi;
u16 corer_count; /* Core reset count */
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
bool stat_prev_loaded; /* has previous stats been loaded */
char int_name[ICE_INT_NAME_STR_LEN];
};
struct ice_netdev_priv {
struct ice_vsi *vsi;
};
/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to hw struct
* @vsi: pointer to vsi struct, can be NULL
* @q_vector: pointer to q_vector, can be NULL
*/
static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx :
((struct ice_pf *)hw->back)->oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
/* clear the PBA here, as this function is meant to clean out all
* previous interrupts and enable the interrupt
*/
val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(itr << GLINT_DYN_CTL_ITR_INDX_S);
if (vsi)
if (test_bit(__ICE_DOWN, vsi->state))
return;
wr32(hw, GLINT_DYN_CTL(vector), val);
}
static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
vsi->tc_cfg.numtc = 1;
}
void ice_set_ethtool_ops(struct net_device *netdev);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
#endif /* _ICE_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,86 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_COMMON_H_
#define _ICE_COMMON_H_
#include "ice.h"
#include "ice_type.h"
#include "ice_switch.h"
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
enum ice_status
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
enum ice_status
ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
struct ice_aqc_get_set_rss_keys *keys);
enum ice_status
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
struct ice_aqc_get_set_rss_keys *keys);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart);
enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
u32 *q_teids, struct ice_sq_cd *cmd_details);
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
u16 *max_lanqs);
enum ice_status
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
#endif /* _ICE_COMMON_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_CONTROLQ_H_
#define _ICE_CONTROLQ_H_
#include "ice_adminq_cmd.h"
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*
*/
#define EXP_FW_API_VER_BRANCH 0x00
#define EXP_FW_API_VER_MAJOR 0x00
#define EXP_FW_API_VER_MINOR 0x01
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
};
/* Control Queue default settings */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to dma head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
void *cmd_buf; /* command buffer memory */
union {
struct ice_dma_mem *sq_bi;
struct ice_dma_mem *rq_bi;
} r;
u16 count; /* Number of descriptors */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
/* used for queue tracking */
u32 head;
u32 tail;
u32 len;
u32 bah;
u32 bal;
u32 len_mask;
u32 len_ena_mask;
u32 head_mask;
};
/* sq transaction details */
struct ice_sq_cd {
struct ice_aq_desc *wb_desc;
};
#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
/* rq event information */
struct ice_rq_event_info {
struct ice_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
};
/* Control Queue information */
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */
u32 sq_cmd_timeout; /* send queue cmd write back timeout */
u16 num_rq_entries; /* receive queue depth */
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */
enum ice_aq_err sq_last_status; /* last status on send queue */
enum ice_aq_err rq_last_status; /* last status on receive queue */
};
#endif /* _ICE_CONTROLQ_H_ */

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_DEVIDS_H_
#define _ICE_DEVIDS_H_
/* Device IDs */
/* Intel(R) Ethernet Controller C810 for backplane */
#define ICE_DEV_ID_C810_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller C810 for QSFP */
#define ICE_DEV_ID_C810_QSFP 0x1592
/* Intel(R) Ethernet Controller C810 for SFP */
#define ICE_DEV_ID_C810_SFP 0x1593
/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */
#define ICE_DEV_ID_C810_10G_BASE_T 0x1594
/* Intel(R) Ethernet Controller C810 1GbE */
#define ICE_DEV_ID_C810_SGMII 0x1595
#endif /* _ICE_DEVIDS_H_ */

View File

@ -0,0 +1,940 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
/* ethtool support for ice */
#include "ice.h"
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
#define ICE_STAT(_type, _name, _stat) { \
.stat_string = _name, \
.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
#define ICE_VSI_STAT(_name, _stat) \
ICE_STAT(struct ice_vsi, _name, _stat)
#define ICE_PF_STAT(_name, _stat) \
ICE_STAT(struct ice_pf, _name, _stat)
static int ice_q_stats_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
return ((np->vsi->num_txq + np->vsi->num_rxq) *
(sizeof(struct ice_q_stats) / sizeof(u64)));
}
#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
ice_q_stats_len(n))
static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
ICE_VSI_STAT("tx_linearize", tx_linearize),
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they aren't. This device is capable of supporting multiple
* VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
* netdevs whereas the PF_STATs are for the physical function that's
* hosting these netdevs.
*
* The PF_STATs are appended to the netdev stats only when ethtool -S
* is queried on the base PF netdev.
*/
static struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
ICE_PF_STAT("tx_size_64", stats.tx_size_64),
ICE_PF_STAT("rx_size_64", stats.rx_size_64),
ICE_PF_STAT("tx_size_127", stats.tx_size_127),
ICE_PF_STAT("rx_size_127", stats.rx_size_127),
ICE_PF_STAT("tx_size_255", stats.tx_size_255),
ICE_PF_STAT("rx_size_255", stats.rx_size_255),
ICE_PF_STAT("tx_size_511", stats.tx_size_511),
ICE_PF_STAT("rx_size_511", stats.rx_size_511),
ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
ICE_PF_STAT("tx_size_big", stats.tx_size_big),
ICE_PF_STAT("rx_size_big", stats.rx_size_big),
ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
ICE_PF_STAT("rx_undersize", stats.rx_undersize),
ICE_PF_STAT("rx_fragments", stats.rx_fragments),
ICE_PF_STAT("rx_oversize", stats.rx_oversize),
ICE_PF_STAT("rx_jabber", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
};
static u32 ice_regs_dump_list[] = {
PFGEN_STATE,
PRTGEN_STATUS,
QRX_CTRL(0),
QINT_TQCTL(0),
QINT_RQCTL(0),
PFINT_OICR_ENA,
QRX_ITR(0),
};
/**
* ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
*/
static char *ice_nvm_version_str(struct ice_hw *hw)
{
static char buf[ICE_ETHTOOL_FWVER_LEN];
u8 ver, patch;
u32 full_ver;
u16 build;
full_ver = hw->nvm.oem_ver;
ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
ICE_OEM_VER_BUILD_SHIFT);
patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
(hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
(hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
hw->nvm.eetrack, ver, build, patch);
return buf;
}
static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
}
static int ice_get_regs_len(struct net_device __always_unused *netdev)
{
return ARRAY_SIZE(ice_regs_dump_list);
}
static void
ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
struct ice_hw *hw = &pf->hw;
u32 *regs_buf = (u32 *)p;
int i;
regs->version = 1;
for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list) / sizeof(u32); ++i)
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
}
static u32 ice_get_msglevel(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
#ifndef CONFIG_DYNAMIC_DEBUG
if (pf->hw.debug_mask)
netdev_info(netdev, "hw debug_mask: 0x%llX\n",
pf->hw.debug_mask);
#endif /* !CONFIG_DYNAMIC_DEBUG */
return pf->msg_enable;
}
static void ice_set_msglevel(struct net_device *netdev, u32 data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
#ifndef CONFIG_DYNAMIC_DEBUG
if (ICE_DBG_USER & data)
pf->hw.debug_mask = data;
else
pf->msg_enable = data;
#else
pf->msg_enable = data;
#endif /* !CONFIG_DYNAMIC_DEBUG */
}
static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
char *p = (char *)data;
unsigned int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
ice_gstrings_vsi_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
ice_for_each_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"tx-queue-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
p += ETH_GSTRING_LEN;
}
ice_for_each_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"rx-queue-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
if (vsi->type != ICE_VSI_PF)
return;
for (i = 0; i < ICE_PF_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "port.%s",
ice_gstrings_pf_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
break;
default:
break;
}
}
static int ice_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ICE_ALL_STATS_LEN(netdev);
default:
return -EOPNOTSUPP;
}
}
static void
ice_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats __always_unused *stats, u64 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_ring *ring;
unsigned int j = 0;
int i = 0;
char *p;
for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
/* populate per queue stats */
rcu_read_lock();
ice_for_each_txq(vsi, j) {
ring = READ_ONCE(vsi->tx_rings[j]);
if (!ring)
continue;
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
}
ice_for_each_rxq(vsi, j) {
ring = READ_ONCE(vsi->rx_rings[j]);
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
}
rcu_read_unlock();
if (vsi->type != ICE_VSI_PF)
return;
for (j = 0; j < ICE_PF_STATS_LEN; j++) {
p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
static int
ice_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
bool link_up;
hw_link_info = &vsi->port_info->phy.link_info;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
/* set speed and duplex */
if (link_up) {
switch (hw_link_info->link_speed) {
case ICE_AQ_LINK_SPEED_100MB:
ks->base.speed = SPEED_100;
break;
case ICE_AQ_LINK_SPEED_2500MB:
ks->base.speed = SPEED_2500;
break;
case ICE_AQ_LINK_SPEED_5GB:
ks->base.speed = SPEED_5000;
break;
case ICE_AQ_LINK_SPEED_10GB:
ks->base.speed = SPEED_10000;
break;
case ICE_AQ_LINK_SPEED_25GB:
ks->base.speed = SPEED_25000;
break;
case ICE_AQ_LINK_SPEED_40GB:
ks->base.speed = SPEED_40000;
break;
default:
ks->base.speed = SPEED_UNKNOWN;
break;
}
ks->base.duplex = DUPLEX_FULL;
} else {
ks->base.speed = SPEED_UNKNOWN;
ks->base.duplex = DUPLEX_UNKNOWN;
}
/* set autoneg settings */
ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
/* set media type settings */
switch (vsi->port_info->phy.media_type) {
case ICE_MEDIA_FIBER:
ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
ks->base.port = PORT_FIBRE;
break;
case ICE_MEDIA_BASET:
ethtool_link_ksettings_add_link_mode(ks, supported, TP);
ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
ks->base.port = PORT_TP;
break;
case ICE_MEDIA_BACKPLANE:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Backplane);
ks->base.port = PORT_NONE;
break;
case ICE_MEDIA_DA:
ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
ks->base.port = PORT_DA;
break;
default:
ks->base.port = PORT_OTHER;
break;
}
/* flow control is symmetric and always supported */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
switch (vsi->port_info->fc.req_mode) {
case ICE_FC_FULL:
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
break;
case ICE_FC_TX_PAUSE:
ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
break;
case ICE_FC_RX_PAUSE:
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
break;
case ICE_FC_PFC:
default:
ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, advertising,
Asym_Pause);
break;
}
return 0;
}
/**
* ice_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
* @rule_locs: buffer to rturn Rx flow classification rules
*
* Returns Success if the command is supported.
*/
static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
u32 __always_unused *rule_locs)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = vsi->rss_size;
ret = 0;
break;
default:
break;
}
return ret;
}
static void
ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
ring->rx_max_pending = ICE_MAX_NUM_DESC;
ring->tx_max_pending = ICE_MAX_NUM_DESC;
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = ICE_MIN_NUM_DESC;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_jumbo_pending = 0;
}
static int
ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
u32 new_rx_cnt, new_tx_cnt;
if (ring->tx_pending > ICE_MAX_NUM_DESC ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
ring->rx_pending > ICE_MAX_NUM_DESC ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
ring->tx_pending, ring->rx_pending,
ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
return -EINVAL;
}
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
/* if nothing to do return success */
if (new_tx_cnt == vsi->tx_rings[0]->count &&
new_rx_cnt == vsi->rx_rings[0]->count) {
netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
return 0;
}
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
timeout--;
if (!timeout)
return -EBUSY;
usleep_range(1000, 2000);
}
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
for (i = 0; i < vsi->alloc_txq; i++)
vsi->tx_rings[i]->count = new_tx_cnt;
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rx_rings[i]->count = new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
if (new_tx_cnt == vsi->tx_rings[0]->count)
goto process_rx;
/* alloc updated Tx resources */
netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
vsi->tx_rings[0]->count, new_tx_cnt);
tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
sizeof(struct ice_ring), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
for (i = 0; i < vsi->num_txq; i++) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
while (i) {
i--;
ice_clean_tx_ring(&tx_rings[i]);
}
devm_kfree(&pf->pdev->dev, tx_rings);
goto done;
}
}
process_rx:
if (new_rx_cnt == vsi->rx_rings[0]->count)
goto process_link;
/* alloc updated Rx resources */
netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0]->count, new_rx_cnt);
rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
sizeof(struct ice_ring), GFP_KERNEL);
if (!rx_rings) {
err = -ENOMEM;
goto done;
}
for (i = 0; i < vsi->num_rxq; i++) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
rx_rings[i].desc = NULL;
rx_rings[i].rx_buf = NULL;
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
err = ice_setup_rx_ring(&rx_rings[i]);
if (err)
goto rx_unwind;
/* allocate Rx buffers */
err = ice_alloc_rx_bufs(&rx_rings[i],
ICE_DESC_UNUSED(&rx_rings[i]));
rx_unwind:
if (err) {
while (i) {
i--;
ice_free_rx_ring(&rx_rings[i]);
}
devm_kfree(&pf->pdev->dev, rx_rings);
err = -ENOMEM;
goto free_tx;
}
}
process_link:
/* Bring interface down, copy in the new ring info, then restore the
* interface. if VSI is up, bring it down and then back up
*/
if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
ice_down(vsi);
if (tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++) {
ice_free_tx_ring(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
}
devm_kfree(&pf->pdev->dev, tx_rings);
}
if (rx_rings) {
for (i = 0; i < vsi->alloc_rxq; i++) {
ice_free_rx_ring(vsi->rx_rings[i]);
/* copy the real tail offset */
rx_rings[i].tail = vsi->rx_rings[i]->tail;
/* this is to fake out the allocation routine
* into thinking it has to realloc everything
* but the recycling logic will let us re-use
* the buffers allocated above
*/
rx_rings[i].next_to_use = 0;
rx_rings[i].next_to_clean = 0;
rx_rings[i].next_to_alloc = 0;
*vsi->rx_rings[i] = rx_rings[i];
}
devm_kfree(&pf->pdev->dev, rx_rings);
}
ice_up(vsi);
}
goto done;
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++)
ice_free_tx_ring(&tx_rings[i]);
devm_kfree(&pf->pdev->dev, tx_rings);
}
done:
clear_bit(__ICE_CFG_BUSY, pf->state);
return err;
}
static int ice_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
bool link_up;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
status = ice_aq_set_link_restart_an(pi, link_up, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
return -EIO;
}
return 0;
}
/**
* ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure
* @pause: ethernet pause (flow control) parameters
*/
static void
ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_info *pi;
pi = np->vsi->port_info;
pause->autoneg =
((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
pause->rx_pause = 1;
} else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
pause->tx_pause = 1;
} else if (pi->fc.current_mode == ICE_FC_FULL) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
}
/**
* ice_set_pauseparam - Set Flow Control parameter
* @netdev: network interface device structure
* @pause: return tx/rx flow control status
*/
static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
enum ice_status status;
u8 aq_failures;
bool link_up;
int err = 0;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the
* PF VSI
*/
if (vsi->type != ICE_VSI_PF) {
netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
return -EOPNOTSUPP;
}
if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
/* If we have link and don't have autoneg */
if (!test_bit(__ICE_DOWN, pf->state) &&
!(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
if (pause->rx_pause && pause->tx_pause)
pi->fc.req_mode = ICE_FC_FULL;
else if (pause->rx_pause && !pause->tx_pause)
pi->fc.req_mode = ICE_FC_RX_PAUSE;
else if (!pause->rx_pause && pause->tx_pause)
pi->fc.req_mode = ICE_FC_TX_PAUSE;
else if (!pause->rx_pause && !pause->tx_pause)
pi->fc.req_mode = ICE_FC_NONE;
else
return -EINVAL;
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
*/
ice_print_link_msg(vsi, false);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
/* Set the FC mode and only restart AN if link is up */
status = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
err = -EAGAIN;
}
if (!test_bit(__ICE_DOWN, pf->state)) {
/* Give it a little more time to try to come back */
msleep(75);
if (!test_bit(__ICE_DOWN, pf->state))
return ice_nway_reset(netdev);
}
return err;
}
/**
* ice_get_rxfh_key_size - get the RSS hash key size
* @netdev: network interface device structure
*
* Returns the table size.
*/
static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
{
return ICE_VSIQF_HKEY_ARRAY_SIZE;
}
/**
* ice_get_rxfh_indir_size - get the rx flow hash indirection table size
* @netdev: network interface device structure
*
* Returns the table size.
*/
static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
return np->vsi->rss_table_size;
}
/**
* ice_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function
*
* Reads the indirection table directly from the hardware.
*/
static int
ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int ret = 0, i;
u8 *lut;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
return 0;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
/* RSS not supported return error here */
netdev_warn(netdev, "RSS is not configured on this VSI!\n");
return -EIO;
}
lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
ret = -EIO;
goto out;
}
for (i = 0; i < vsi->rss_table_size; i++)
indir[i] = (u32)(lut[i]);
out:
devm_kfree(&pf->pdev->dev, lut);
return ret;
}
/**
* ice_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
*/
static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u8 *seed = NULL;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
/* RSS not supported return error here */
netdev_warn(netdev, "RSS is not configured on this VSI!\n");
return -EIO;
}
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
devm_kzalloc(&pf->pdev->dev,
ICE_VSIQF_HKEY_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
seed = vsi->rss_hkey_user;
}
if (!vsi->rss_lut_user) {
vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
vsi->rss_table_size,
GFP_KERNEL);
if (!vsi->rss_lut_user)
return -ENOMEM;
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
if (indir) {
int i;
for (i = 0; i < vsi->rss_table_size; i++)
vsi->rss_lut_user[i] = (u8)(indir[i]);
} else {
ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
vsi->rss_size);
}
if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
return -EIO;
return 0;
}
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.get_link = ethtool_op_get_link,
.get_strings = ice_get_strings,
.get_ethtool_stats = ice_get_ethtool_stats,
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
.get_pauseparam = ice_get_pauseparam,
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
};
/**
* ice_set_ethtool_ops - setup netdev ethtool ops
* @netdev: network interface device structure
*
* setup netdev ethtool ops with ice specific ops
*/
void ice_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &ice_ethtool_ops;
}

View File

@ -0,0 +1,266 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
/* Machine-generated file */
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
#define PF_FW_ARQH_ARQH_S 0
#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S)
#define PF_FW_ARQLEN 0x00080280
#define PF_FW_ARQLEN_ARQLEN_S 0
#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S)
#define PF_FW_ARQLEN_ARQVFE_S 28
#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
#define PF_FW_ARQLEN_ARQOVFL_S 29
#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
#define PF_FW_ARQLEN_ARQCRIT_S 30
#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
#define PF_FW_ARQLEN_ARQENABLE_S 31
#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
#define PF_FW_ARQT 0x00080480
#define PF_FW_ATQBAH 0x00080100
#define PF_FW_ATQBAL 0x00080000
#define PF_FW_ATQH 0x00080300
#define PF_FW_ATQH_ATQH_S 0
#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S)
#define PF_FW_ATQLEN 0x00080200
#define PF_FW_ATQLEN_ATQLEN_S 0
#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
#define PF_FW_ATQLEN_ATQVFE_S 28
#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
#define PF_FW_ATQLEN_ATQOVFL_S 29
#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
#define PF_FW_ATQLEN_ATQCRIT_S 30
#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
#define PF_FW_ATQLEN_ATQENABLE_S 31
#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
#define PF_FW_ATQT 0x00080400
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S)
#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30
#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S)
#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S)
#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30
#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S)
#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S)
#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30
#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S)
#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S)
#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30
#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S)
#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
#define QRXFLXP_CNTXT_RXDID_IDX_S 0
#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S)
#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S)
#define QRXFLXP_CNTXT_TS_S 11
#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S)
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_S 0
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S)
#define GLGEN_RSTCTL 0x000B8180
#define GLGEN_RSTCTL_GRSTDEL_S 0
#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
#define GLGEN_RSTAT_RESET_TYPE_S 2
#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S)
#define GLGEN_RTRIG 0x000B8190
#define GLGEN_RTRIG_CORER_S 0
#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S)
#define GLGEN_RTRIG_GLOBR_S 1
#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S)
#define GLGEN_STAT 0x000B612C
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_S 0
#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S)
#define PFGEN_STATE 0x00088000
#define PRTGEN_STATUS 0x000B8100
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_S 0
#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S)
#define GLINT_DYN_CTL_CLEARPBA_S 1
#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S)
#define GLINT_DYN_CTL_SWINT_TRIG_S 2
#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S)
#define GLINT_DYN_CTL_ITR_INDX_S 3
#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S)
#define GLINT_DYN_CTL_INTENA_MSK_S 31
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
#define PFINT_FW_CTL 0x0016C800
#define PFINT_FW_CTL_MSIX_INDX_S 0
#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S)
#define PFINT_FW_CTL_ITR_INDX_S 11
#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S)
#define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00
#define PFINT_OICR_INTEVENT_S 0
#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
#define PFINT_OICR_HLP_RDY_S 14
#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
#define PFINT_OICR_CPM_RDY_S 15
#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
#define PFINT_OICR_ECC_ERR_S 16
#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
#define PFINT_OICR_MAL_DETECT_S 19
#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S)
#define PFINT_OICR_GRST_S 20
#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
#define PFINT_OICR_PCI_EXCEPTION_S 21
#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
#define PFINT_OICR_GPIO_S 22
#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
#define PFINT_OICR_STORM_DETECT_S 24
#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
#define PFINT_OICR_HMC_ERR_S 26
#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
#define PFINT_OICR_PE_CRITERR_S 28
#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S)
#define PFINT_OICR_CTL 0x0016CA80
#define PFINT_OICR_CTL_MSIX_INDX_S 0
#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S)
#define PFINT_OICR_CTL_ITR_INDX_S 11
#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S)
#define PFINT_OICR_CTL_CAUSE_ENA_S 30
#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S)
#define PFINT_OICR_ENA 0x0016C900
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
#define QINT_RQCTL_ITR_INDX_S 11
#define QINT_RQCTL_CAUSE_ENA_S 30
#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S)
#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
#define QINT_TQCTL_MSIX_INDX_S 0
#define QINT_TQCTL_ITR_INDX_S 11
#define QINT_TQCTL_CAUSE_ENA_S 30
#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
#define QRX_CTRL_MAX_INDEX 2047
#define QRX_CTRL_QENA_REQ_S 0
#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S)
#define QRX_CTRL_QENA_STAT_S 2
#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S)
#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4))
#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
#define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_S 6
#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S)
#define GLNVM_GENS 0x000B6100
#define GLNVM_GENS_SR_SIZE_S 5
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S)
#define GLNVM_ULD 0x000B6008
#define GLNVM_ULD_CORER_DONE_S 3
#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S)
#define GLNVM_ULD_GLOBR_DONE_S 4
#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S)
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S)
#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8))
#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define VSIQF_HKEY_MAX_INDEX 12
#endif /* _ICE_HW_AUTOGEN_H_ */

View File

@ -0,0 +1,473 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_LAN_TX_RX_H_
#define _ICE_LAN_TX_RX_H_
union ice_32byte_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
} read;
struct {
struct {
struct {
__le16 mirroring_status;
__le16 l2tag1;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fd_id; /* Flow Director filter id */
} hi_dword;
} qword0;
struct {
/* status/error/PTYPE/length */
__le64 status_error_len;
} qword1;
struct {
__le16 ext_status; /* extended status */
__le16 rsvd;
__le16 l2tag2_1;
__le16 l2tag2_2;
} qword2;
struct {
__le32 reserved;
__le32 fd_id;
} qword3;
} wb; /* writeback */
};
struct ice_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:2;
u32 outer_frag:1;
u32 tunnel_type:3;
u32 tunnel_end_prot:2;
u32 tunnel_end_frag:1;
u32 inner_prot:4;
u32 payload_layer:3;
};
enum ice_rx_ptype_outer_ip {
ICE_RX_PTYPE_OUTER_L2 = 0,
ICE_RX_PTYPE_OUTER_IP = 1,
};
enum ice_rx_ptype_outer_ip_ver {
ICE_RX_PTYPE_OUTER_NONE = 0,
ICE_RX_PTYPE_OUTER_IPV4 = 1,
ICE_RX_PTYPE_OUTER_IPV6 = 2,
};
enum ice_rx_ptype_outer_fragmented {
ICE_RX_PTYPE_NOT_FRAG = 0,
ICE_RX_PTYPE_FRAG = 1,
};
enum ice_rx_ptype_tunnel_type {
ICE_RX_PTYPE_TUNNEL_NONE = 0,
ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
};
enum ice_rx_ptype_tunnel_end_prot {
ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
};
enum ice_rx_ptype_inner_prot {
ICE_RX_PTYPE_INNER_PROT_NONE = 0,
ICE_RX_PTYPE_INNER_PROT_UDP = 1,
ICE_RX_PTYPE_INNER_PROT_TCP = 2,
ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
};
enum ice_rx_ptype_payload_layer {
ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
/* RX Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
*/
union ice_32b_rx_flex_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
} read;
struct {
/* Qword 0 */
u8 rxdid; /* descriptor builder profile id */
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
__le16 pkt_len; /* [15:14] are reserved */
__le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
/* sph=[11:11] */
/* ff1/ext=[15:12] */
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 flex_meta0;
__le16 flex_meta1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 time_stamp_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 flex_meta2;
__le16 flex_meta3;
union {
struct {
__le16 flex_meta4;
__le16 flex_meta5;
} flex;
__le32 ts_high;
} flex_ts;
} wb; /* writeback */
};
/* Rx Flex Descriptor NIC Profile
* This descriptor corresponds to RxDID 2 which contains
* metadata fields for RSS, flow id and timestamp info
*/
struct ice_32b_rx_flex_desc_nic {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le32 rss_hash;
/* Qword 2 */
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 flow_id;
union {
struct {
__le16 vlan_id;
__le16 flow_id_ipv6;
} flex;
__le32 ts_high;
} flex_ts;
};
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
* with a specific metadata (profile 7 reserved for HW)
*/
enum ice_rxdid {
ICE_RXDID_START = 0,
ICE_RXDID_LEGACY_0 = ICE_RXDID_START,
ICE_RXDID_LEGACY_1,
ICE_RXDID_FLX_START,
ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START,
ICE_RXDID_FLX_LAST = 63,
ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
};
/* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01
/* Receive Descriptor MDID values */
#define ICE_RX_MDID_FLOW_ID_LOWER 5
#define ICE_RX_MDID_FLOW_ID_HIGH 6
#define ICE_RX_MDID_HASH_LOW 56
#define ICE_RX_MDID_HASH_HIGH 57
/* Rx Flag64 packet flag bits */
enum ice_rx_flg64_bits {
ICE_RXFLG_PKT_DSI = 0,
ICE_RXFLG_EVLAN_x8100 = 15,
ICE_RXFLG_EVLAN_x9100,
ICE_RXFLG_VLAN_x8100,
ICE_RXFLG_TNL_MAC = 22,
ICE_RXFLG_TNL_VLAN,
ICE_RXFLG_PKT_FRG,
ICE_RXFLG_FIN = 32,
ICE_RXFLG_SYN,
ICE_RXFLG_RST,
ICE_RXFLG_TNL0 = 38,
ICE_RXFLG_TNL1,
ICE_RXFLG_TNL2,
ICE_RXFLG_UDP_GRE,
ICE_RXFLG_RSVD = 63
};
/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
#define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
/* for ice_32byte_rx_flex_desc.pkt_length member */
#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
enum ice_rx_flex_desc_status_error_0_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
ICE_RX_FLEX_DESC_STATUS0_EOF_S,
ICE_RX_FLEX_DESC_STATUS0_HBO_S,
ICE_RX_FLEX_DESC_STATUS0_L3L4P_S,
ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
ICE_RX_FLEX_DESC_STATUS0_LPBK_S,
ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
ICE_RX_FLEX_DESC_STATUS0_RXE_S,
ICE_RX_FLEX_DESC_STATUS0_CRCP_S,
ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
/* RLAN Rx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct ice_rlan_ctx {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
u16 dbuf; /* bigger than needed, see above for reason */
#define ICE_RLAN_CTX_HBUF_S 6
u16 hbuf; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
};
struct ice_ctx_ele {
u16 offset;
u16 size_of;
u16 width;
u16 lsb;
};
#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
.offset = offsetof(struct _struct, _ele), \
.size_of = FIELD_SIZEOF(struct _struct, _ele), \
.width = _width, \
.lsb = _lsb, \
}
/* for hsplit_0 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_0 {
ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1,
ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2,
ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8,
};
/* for hsplit_1 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_1 {
ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0,
ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1,
ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
};
/* TX Descriptor */
struct ice_tx_desc {
__le64 buf_addr; /* Address of descriptor's data buf */
__le64 cmd_type_offset_bsz;
};
enum ice_tx_desc_dtype_value {
ICE_TX_DESC_DTYPE_DATA = 0x0,
ICE_TX_DESC_DTYPE_CTX = 0x1,
/* DESC_DONE - HW has completed write-back of descriptor */
ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
};
#define ICE_TXD_QW1_CMD_S 4
#define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S)
enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
};
#define ICE_TXD_QW1_OFFSET_S 16
#define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S)
enum ice_tx_desc_len_fields {
/* Note: These are predefined bit offsets */
ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
};
#define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S)
#define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S)
#define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S)
/* Tx descriptor field limits in bytes */
#define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \
ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD)
#define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \
ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD)
#define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \
ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD)
#define ICE_TXD_QW1_TX_BUF_SZ_S 34
#define ICE_TXD_QW1_L2TAG1_S 48
/* Context descriptors */
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
__le16 rsvd;
__le64 qw1;
};
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
#define ICE_TXD_CTX_QW1_TSO_LEN_S 30
#define ICE_TXD_CTX_QW1_TSO_LEN_M \
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50
enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_TSO = 0x01,
ICE_TX_CTX_DESC_TSYN = 0x02,
ICE_TX_CTX_DESC_IL2TAG2 = 0x04,
ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
ICE_TX_CTX_DESC_SWTCH_VSI = 0x30,
ICE_TX_CTX_DESC_RESERVED = 0x40
};
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
/* Tx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
u16 cgd_num; /* bigger than needed, see above for reason */
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
u8 tsyn_ena;
u8 alt_vlan;
u16 cpuid; /* bigger than needed, see above for reason */
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
u8 tphwr_desc;
u16 cmpq_id;
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
u32 qlen; /* bigger than needed, see above for reason */
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
u8 legacy_int;
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
u8 int_q_state; /* width not needed - internal do not write */
};
/* macro to make the table lines short */
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
{ PTYPE, \
1, \
ICE_RX_PTYPE_OUTER_##OUTER_IP, \
ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
ICE_RX_PTYPE_##OUTER_FRAG, \
ICE_RX_PTYPE_TUNNEL_##T, \
ICE_RX_PTYPE_TUNNEL_END_##TE, \
ICE_RX_PTYPE_##TEF, \
ICE_RX_PTYPE_INNER_PROT_##I, \
ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
#define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
/* shorter macros makes the table fit but are terse */
#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
/* L2 Packet types */
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
{
return ice_ptype_lkup[ptype];
}
#endif /* _ICE_LAN_TX_RX_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,236 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
/**
* ice_aq_read_nvm
* @hw: pointer to the hw struct
* @module_typeid: module pointer location in words from the NVM beginning
* @offset: byte offset from the module beginning
* @length: length of the section to be read (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
* @cd: pointer to command details structure or NULL
*
* Read the NVM using the admin queue commands (0x0701)
*/
static enum ice_status
ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
void *data, bool last_command, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
cmd = &desc.params.nvm;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
cmd->module_typeid = module_typeid;
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
return ice_aq_send_cmd(hw, &desc, data, length, cd);
}
/**
* ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to access
*/
static enum ice_status
ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
{
if ((offset + words) > hw->nvm.sr_words) {
ice_debug(hw, ICE_DBG_NVM,
"NVM error: offset beyond SR lmt.\n");
return ICE_ERR_PARAM;
}
if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
/* We can access only up to 4KB (one sector), in one AQ write */
ice_debug(hw, ICE_DBG_NVM,
"NVM error: tried to access %d words, limit is %d.\n",
words, ICE_SR_SECTOR_SIZE_IN_WORDS);
return ICE_ERR_PARAM;
}
if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
(offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
/* A single access cannot spread over two sectors */
ice_debug(hw, ICE_DBG_NVM,
"NVM error: cannot spread over two sectors.\n");
return ICE_ERR_PARAM;
}
return 0;
}
/**
* ice_read_sr_aq - Read Shadow RAM.
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to read
* @data: buffer for words reads from Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
* Reads 16-bit word buffers from the Shadow RAM using the admin command.
*/
static enum ice_status
ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
bool last_command)
{
enum ice_status status;
status = ice_check_sr_access_params(hw, offset, words);
/* values in "offset" and "words" parameters are sized as words
* (16 bits) but ice_aq_read_nvm expects these values in bytes.
* So do this conversion while calling ice_aq_read_nvm.
*/
if (!status)
status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data,
last_command, NULL);
return status;
}
/**
* ice_read_sr_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method.
*/
static enum ice_status
ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
status = ice_read_sr_aq(hw, offset, 1, data, true);
if (!status)
*data = le16_to_cpu(*(__le16 *)data);
return status;
}
/**
* ice_acquire_nvm - Generic request for acquiring the NVM ownership
* @hw: pointer to the HW structure
* @access: NVM access type (read or write)
*
* This function will request NVM ownership.
*/
static enum
ice_status ice_acquire_nvm(struct ice_hw *hw,
enum ice_aq_res_access_type access)
{
if (hw->nvm.blank_nvm_mode)
return 0;
return ice_acquire_res(hw, ICE_NVM_RES_ID, access);
}
/**
* ice_release_nvm - Generic request for releasing the NVM ownership
* @hw: pointer to the HW structure
*
* This function will release NVM ownership.
*/
static void ice_release_nvm(struct ice_hw *hw)
{
if (hw->nvm.blank_nvm_mode)
return;
ice_release_res(hw, ICE_NVM_RES_ID);
}
/**
* ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
static enum ice_status
ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
status = ice_read_sr_word_aq(hw, offset, data);
ice_release_nvm(hw);
}
return status;
}
/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the hw struct
*
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
struct ice_nvm_info *nvm = &hw->nvm;
u16 eetrack_lo, eetrack_hi;
enum ice_status status = 0;
u32 fla, gens_stat;
u8 sr_size;
/* The SR size is stored regardless of the nvm programming mode
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);
sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
/* Switching to words (sr_size contains power of 2) */
nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, GLNVM_FLA);
if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
nvm->blank_nvm_mode = false;
} else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
status = ICE_ERR_NVM_BLANK_MODE;
ice_debug(hw, ICE_DBG_NVM,
"NVM init error: unsupported blank mode.\n");
return status;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
return status;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
return status;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
return status;
}
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
return status;
}

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_OSDEP_H_
#define _ICE_OSDEP_H_
#include <linux/types.h>
#include <linux/io.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define ice_flush(a) rd32((a), GLGEN_STAT)
#define ICE_M(m, s) ((m) << (s))
struct ice_dma_mem {
void *va;
dma_addr_t pa;
size_t size;
};
#define ice_hw_to_dev(ptr) \
(&(container_of((ptr), struct ice_pf, hw))->pdev->dev)
#ifdef CONFIG_DYNAMIC_DEBUG
#define ice_debug(hw, type, fmt, args...) \
dev_dbg(ice_hw_to_dev(hw), fmt, ##args)
#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
print_hex_dump_debug(KBUILD_MODNAME " ", \
DUMP_PREFIX_OFFSET, rowsize, \
groupsize, buf, len, false)
#else
#define ice_debug(hw, type, fmt, args...) \
do { \
if ((type) & (hw)->debug_mask) \
dev_info(ice_hw_to_dev(hw), fmt, ##args); \
} while (0)
#ifdef DEBUG
#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
do { \
if ((type) & (hw)->debug_mask) \
print_hex_dump_debug(KBUILD_MODNAME, \
DUMP_PREFIX_OFFSET, \
rowsize, groupsize, buf, \
len, false); \
} while (0)
#else
#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
do { \
struct ice_hw *hw_l = hw; \
if ((type) & (hw_l)->debug_mask) { \
u16 len_l = len; \
u8 *buf_l = buf; \
int i; \
for (i = 0; i < (len_l - 16); i += 16) \
ice_debug(hw_l, type, "0x%04X %16ph\n",\
i, ((buf_l) + i)); \
if (i < len_l) \
ice_debug(hw_l, type, "0x%04X %*ph\n", \
i, ((len_l) - i), ((buf_l) + i));\
} \
} while (0)
#endif /* DEBUG */
#endif /* CONFIG_DYNAMIC_DEBUG */
#endif /* _ICE_OSDEP_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_SCHED_H_
#define _ICE_SCHED_H_
#include "ice_common.h"
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u16 vsi_id;
};
struct ice_sched_agg_info {
struct list_head agg_vsi_list;
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
};
/* FW AQ command calls */
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_cleanup_all(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
enum ice_status
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info);
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
u8 owner);
enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
u8 owner, bool enable);
#endif /* _ICE_SCHED_H_ */

View File

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_STATUS_H_
#define _ICE_STATUS_H_
/* Error Codes */
enum ice_status {
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
ICE_ERR_RESET_FAILED = -9,
ICE_ERR_FW_API_VER = -10,
ICE_ERR_NO_MEMORY = -11,
ICE_ERR_CFG = -12,
ICE_ERR_OUT_OF_RANGE = -13,
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
ICE_ERR_AQ_TIMEOUT = -101,
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
};
#endif /* _ICE_STATUS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,161 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_SWITCH_H_
#define _ICE_SWITCH_H_
#include "ice_common.h"
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_VSI_INVAL_ID 0xffff
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
u16 vsis_allocd;
u16 vsis_unallocated;
u16 flags;
struct ice_aqc_vsi_props info;
bool alloc_from_pool;
};
enum ice_sw_fwd_act_type {
ICE_FWD_TO_VSI = 0,
ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
ICE_INVAL_ACT
};
/* Switch recipe ID enum values are specific to hardware */
enum ice_sw_lkup_type {
ICE_SW_LKUP_ETHERTYPE = 0,
ICE_SW_LKUP_MAC = 1,
ICE_SW_LKUP_MAC_VLAN = 2,
ICE_SW_LKUP_PROMISC = 3,
ICE_SW_LKUP_VLAN = 4,
ICE_SW_LKUP_DFLT = 5,
ICE_SW_LKUP_ETHERTYPE_MAC = 8,
ICE_SW_LKUP_PROMISC_VLAN = 9,
};
struct ice_fltr_info {
/* Look up information: how to look up packet */
enum ice_sw_lkup_type lkup_type;
/* Forward action: filter action to do after lookup */
enum ice_sw_fwd_act_type fltr_act;
/* rule ID returned by firmware once filter rule is created */
u16 fltr_rule_id;
u16 flag;
#define ICE_FLTR_RX BIT(0)
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
union {
struct {
u8 mac_addr[ETH_ALEN];
} mac;
struct {
u8 mac_addr[ETH_ALEN];
u16 vlan_id;
} mac_vlan;
struct {
u16 vlan_id;
} vlan;
/* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
* if just using ethertype as filter. Set lkup_type as
* ICE_SW_LKUP_ETHERTYPE_MAC if MAC also needs to be
* passed in as filter.
*/
struct {
u16 ethertype;
u8 mac_addr[ETH_ALEN]; /* optional */
} ethertype_mac;
} l_data;
/* Depending on filter action */
union {
/* queue id in case of ICE_FWD_TO_Q and starting
* queue id in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
u16 vsi_id:10;
u16 vsi_list_id:10;
} fwd_id;
/* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
* determines the range of queues the packet needs to be forwarded to
*/
u8 qgrp_size;
/* Rule creations populate these indicators basing on the switch type */
bool lb_en; /* Indicate if packet can be looped back */
bool lan_en; /* Indicate if packet can be forwarded to the uplink */
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
struct ice_vsi_list_map_info {
struct list_head list_entry;
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
u16 vsi_list_id;
};
enum ice_sw_fltr_status {
ICE_FLTR_STATUS_NEW = 0,
ICE_FLTR_STATUS_FW_SUCCESS,
ICE_FLTR_STATUS_FW_FAIL,
};
struct ice_fltr_list_entry {
struct list_head list_entry;
enum ice_sw_fltr_status status;
struct ice_fltr_info fltr_info;
};
/* This defines an entry in the list that maintains MAC or VLAN membership
* to HW list mapping, since multiple VSIs can subscribe to the same MAC or
* VLAN. As an optimization the VSI list should be created only when a
* second VSI becomes a subscriber to the VLAN address.
*/
struct ice_fltr_mgmt_list_entry {
/* back pointer to VSI list id to VSI list mapping */
struct ice_vsi_list_map_info *vsi_list_info;
u16 vsi_count;
#define ICE_INVAL_LG_ACT_INDEX 0xffff
u16 lg_act_idx;
#define ICE_INVAL_SW_MARKER_ID 0xffff
u16 sw_marker_id;
struct list_head list_entry;
struct ice_fltr_info fltr_info;
#define ICE_INVAL_COUNTER_ID 0xff
u8 counter_index;
};
/* VSI related commands */
enum ice_status
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id);
enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
enum ice_status
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction);
#endif /* _ICE_SWITCH_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,192 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_
#define ICE_DFLT_IRQ_WORK 256
#define ICE_RXBUF_2048 2048
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
* the nearest 4K which represents our maximum read request size.
*/
#define ICE_MAX_READ_REQ_SIZE 4096
#define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
/* Tx Descriptors needed, worst case */
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define ICE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_S 16
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
struct sk_buff *skb;
unsigned int bytecount;
unsigned short gso_segs;
u32 tx_flags;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
};
struct ice_tx_offload_params {
u8 header_len;
u32 td_cmd;
u32 td_offset;
u32 td_l2tag1;
u16 cd_l2tag2;
u32 cd_tunnel_params;
u64 cd_qw1;
struct ice_ring *tx_ring;
};
struct ice_rx_buf {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
};
struct ice_q_stats {
u64 pkts;
u64 bytes;
};
struct ice_txq_stats {
u64 restart_q;
u64 tx_busy;
u64 tx_linearize;
};
struct ice_rxq_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buf_failed;
u64 page_reuse_count;
};
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
* registers and QINT registers or more generally anywhere in the manual
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2.
*/
enum ice_dyn_idx_t {
ICE_IDX_ITR0 = 0,
ICE_IDX_ITR1 = 1,
ICE_IDX_ITR2 = 2,
ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
/* Header split modes defined by DTYPE field of Rx RLAN context */
enum ice_rx_dtype {
ICE_RX_DTYPE_NO_SPLIT = 0,
ICE_RX_DTYPE_HEADER_SPLIT = 1,
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define ICE_ITR_8K 0x003E
/* apply ITR HW granularity translation to program the HW registers */
#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
struct ice_ring {
struct ice_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
union {
struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf;
};
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
/* high bit set means dynamic, use accessor routines to read/write.
* hardware supports 2us/1us resolution for the ITR registers.
* these values always store the USER setting, and must be converted
* before programming to a register.
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
bool ring_active; /* is ring online or not */
/* stats structs */
struct ice_q_stats stats;
struct u64_stats_sync syncp;
union {
struct ice_txq_stats tx_stats;
struct ice_rxq_stats rx_stats;
};
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum ice_latency_range {
ICE_LOWEST_LATENCY = 0,
ICE_LOW_LATENCY = 1,
ICE_BULK_LATENCY = 2,
ICE_ULTRA_LATENCY = 3,
};
struct ice_ring_container {
/* array of pointers to rings */
struct ice_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range;
u16 itr;
};
/* iterator for handling rings in ring container */
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
void ice_clean_tx_ring(struct ice_ring *tx_ring);
void ice_clean_rx_ring(struct ice_ring *rx_ring);
int ice_setup_tx_ring(struct ice_ring *tx_ring);
int ice_setup_rx_ring(struct ice_ring *rx_ring);
void ice_free_tx_ring(struct ice_ring *tx_ring);
void ice_free_rx_ring(struct ice_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
#endif /* _ICE_TXRX_H_ */

View File

@ -0,0 +1,394 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_TYPE_H_
#define _ICE_TYPE_H_
#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
{
return test_bit(tc, (unsigned long *)&bitmap);
}
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
#define ICE_DBG_USER BIT_ULL(31)
enum ice_aq_res_ids {
ICE_NVM_RES_ID = 1,
ICE_SPD_RES_ID,
ICE_GLOBAL_CFG_LOCK_RES_ID,
ICE_CHANGE_LOCK_RES_ID
};
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
ICE_RES_WRITE
};
enum ice_fc_mode {
ICE_FC_NONE = 0,
ICE_FC_RX_PAUSE,
ICE_FC_TX_PAUSE,
ICE_FC_FULL,
ICE_FC_PFC,
ICE_FC_DFLT
};
enum ice_set_fc_aq_failures {
ICE_SET_FC_AQ_FAIL_NONE = 0,
ICE_SET_FC_AQ_FAIL_GET,
ICE_SET_FC_AQ_FAIL_SET,
ICE_SET_FC_AQ_FAIL_UPDATE
};
/* Various MAC types */
enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
ICE_MAC_GENERIC,
};
/* Media Types */
enum ice_media_type {
ICE_MEDIA_UNKNOWN = 0,
ICE_MEDIA_FIBER,
ICE_MEDIA_BASET,
ICE_MEDIA_BACKPLANE,
ICE_MEDIA_DA,
};
enum ice_vsi_type {
ICE_VSI_PF = 0,
};
struct ice_link_status {
/* Refer to ice_aq_phy_type for bits definition */
u64 phy_type_low;
u16 max_frame_size;
u16 link_speed;
bool lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
u8 ext_info;
u8 pacing;
u8 req_speeds;
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
* ice_aqc_get_phy_caps structure
*/
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
};
/* PHY info such as phy_type, etc... */
struct ice_phy_info {
struct ice_link_status link_info;
struct ice_link_status link_info_old;
u64 phy_type_low;
enum ice_media_type media_type;
bool get_link_info;
};
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
/* TX/RX queues */
u16 num_rxq; /* Number/Total RX queues */
u16 rxq_first_id; /* First queue ID for RX queues */
u16 num_txq; /* Number/Total TX queues */
u16 txq_first_id; /* First queue ID for TX queues */
/* MSI-X vectors */
u16 num_msix_vectors;
u16 msix_vector_first_id;
/* Max MTU for function or device */
u16 max_mtu;
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
};
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
u32 guaranteed_num_vsi;
};
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
};
/* MAC info */
struct ice_mac_info {
u8 lan_addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
};
/* Various RESET request, These are not tied with HW reset types */
enum ice_reset_req {
ICE_RESET_PFR = 0,
ICE_RESET_CORER = 1,
ICE_RESET_GLOBR = 2,
};
/* Bus parameters */
struct ice_bus_info {
u16 device;
u8 func;
};
/* Flow control (FC) parameters */
struct ice_fc_info {
enum ice_fc_mode current_mode; /* FC mode in effect */
enum ice_fc_mode req_mode; /* FC mode requested by caller */
};
/* NVM Information */
struct ice_nvm_info {
u32 eetrack; /* NVM data version */
u32 oem_ver; /* OEM version info */
u16 sr_words; /* Shadow RAM size in words */
u16 ver; /* NVM package version */
bool blank_nvm_mode; /* is NVM empty (no FW present) */
};
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
struct ice_sched_node {
struct ice_sched_node *parent;
struct ice_sched_node *sibling; /* next sibling in the same layer */
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */
u16 vsi_id;
bool in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
u8 num_children;
u8 tc_num;
u8 owner;
#define ICE_SCHED_NODE_OWNER_LAN 0
};
/* Access Macros for Tx Sched Elements data */
#define ICE_TXSCHED_GET_NODE_TEID(x) le32_to_cpu((x)->info.node_teid)
/* The aggregator type determines if identifier is for a VSI group,
* aggregator group, aggregator of queues, or queue group.
*/
enum ice_agg_type {
ICE_AGG_TYPE_UNKNOWN = 0,
ICE_AGG_TYPE_VSI,
ICE_AGG_TYPE_AGG, /* aggregator */
ICE_AGG_TYPE_Q,
ICE_AGG_TYPE_QG
};
#define ICE_SCHED_DFLT_RL_PROF_ID 0
/* vsi type list entry to locate corresponding vsi/ag nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
u16 vsi_id;
};
/* driver defines the policy */
struct ice_sched_tx_policy {
u16 max_num_vsis;
u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
bool rdma_ena;
};
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
struct ice_hw *hw; /* back pointer to hw instance */
u32 last_node_teid; /* scheduler last node info */
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
u16 dflt_tx_vsi_rule_id;
u16 dflt_tx_vsi_num;
u16 dflt_rx_vsi_rule_id;
u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
struct ice_sched_tx_policy sched_policy;
struct list_head vsi_info_list;
struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
bool is_vf;
};
struct ice_switch_info {
/* Switch VSI lists to MAC/VLAN translation */
struct mutex mac_list_lock; /* protect MAC list */
struct list_head mac_list_head;
struct mutex vlan_list_lock; /* protect VLAN list */
struct list_head vlan_list_head;
struct mutex eth_m_list_lock; /* protect ethtype list */
struct list_head eth_m_list_head;
struct mutex promisc_list_lock; /* protect promisc mode list */
struct list_head promisc_list_head;
struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */
struct list_head mac_vlan_list_head;
struct list_head vsi_list_map_head;
};
/* Port hardware description */
struct ice_hw {
u8 __iomem *hw_addr;
void *back;
struct ice_aqc_layer_props *layer_info;
struct ice_port_info *port_info;
u64 debug_mask; /* bitmap for debug mask */
enum ice_mac_type mac_type;
/* pci info */
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
u16 subsystem_vendor_id;
u8 revision_id;
u8 pf_id; /* device profile info */
/* TX Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
u8 flattened_layers;
u8 max_cgds;
u8 sw_entry_point_layer;
bool evb_veb; /* true for VEB, false for VEPA */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
struct ice_switch_info *switch_info; /* switch filter lists */
/* Control Queue info */
struct ice_ctl_q_info adminq;
u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */
u8 api_min_ver; /* API minor version */
u8 api_patch; /* API patch version */
u8 fw_branch; /* firmware branch version */
u8 fw_maj_ver; /* firmware major version */
u8 fw_min_ver; /* firmware minor version */
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
/* minimum allowed value for different speeds */
#define ICE_ITR_GRAN_MIN_200 1
#define ICE_ITR_GRAN_MIN_100 1
#define ICE_ITR_GRAN_MIN_50 2
#define ICE_ITR_GRAN_MIN_25 4
/* ITR granularity in 1 us */
u8 itr_gran_200;
u8 itr_gran_100;
u8 itr_gran_50;
u8 itr_gran_25;
bool ucast_shared; /* true if VSIs can share unicast addr */
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct ice_eth_stats {
u64 rx_bytes; /* gorc */
u64 rx_unicast; /* uprc */
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
u64 tx_multicast; /* mptc */
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
};
/* Statistics collected by the MAC */
struct ice_hw_port_stats {
/* eth stats collected by the port */
struct ice_eth_stats eth;
/* additional port specific stats */
u64 tx_dropped_link_down; /* tdold */
u64 crc_errors; /* crcerrs */
u64 illegal_bytes; /* illerrc */
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
u64 rx_len_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
u64 link_xoff_tx; /* lxofftxc */
u64 rx_size_64; /* prc64 */
u64 rx_size_127; /* prc127 */
u64 rx_size_255; /* prc255 */
u64 rx_size_511; /* prc511 */
u64 rx_size_1023; /* prc1023 */
u64 rx_size_1522; /* prc1522 */
u64 rx_size_big; /* prc9522 */
u64 rx_undersize; /* ruc */
u64 rx_fragments; /* rfc */
u64 rx_oversize; /* roc */
u64 rx_jabber; /* rjc */
u64 tx_size_64; /* ptc64 */
u64 tx_size_127; /* ptc127 */
u64 tx_size_255; /* ptc255 */
u64 tx_size_511; /* ptc511 */
u64 tx_size_1023; /* ptc1023 */
u64 tx_size_1522; /* ptc1522 */
u64 tx_size_big; /* ptc9522 */
};
/* Checksum and Shadow RAM pointers */
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
#define ICE_NVM_VER_LO_SHIFT 0
#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT)
#define ICE_NVM_VER_HI_SHIFT 12
#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT)
#define ICE_OEM_VER_PATCH_SHIFT 0
#define ICE_OEM_VER_PATCH_MASK (0xff << ICE_OEM_VER_PATCH_SHIFT)
#define ICE_OEM_VER_BUILD_SHIFT 8
#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT)
#define ICE_OEM_VER_SHIFT 24
#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT)
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
#endif /* _ICE_TYPE_H_ */