Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-04-18

This series contains updates to the ice driver only.

Anirudh fixes up code comments which had typos.  Added support for DCB
into the ice driver, which required a bit of refactoring of the existing
code.  Also fixed a potential race condition between closing and opening
the VSI for a MIB change event, so resolved this by grabbing the
rtnl_lock prior to closing.  Added support to process LLDP MIB change
notifications.  Added support for reporting DCB stats via ethtool.

Brett updates the calculation to increment ITR to use a direct
calculation instead of using estimations.  This provides a more accurate
value.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-04-18 11:25:33 -07:00
commit 16111991db
27 changed files with 3203 additions and 459 deletions

View File

@ -17,3 +17,4 @@ ice-y := ice_main.o \
ice_txrx.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o

View File

@ -34,6 +34,7 @@
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
@ -151,7 +152,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
u8 ena_tc; /* TX map */
u8 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@ -162,7 +163,7 @@ struct ice_res_tracker {
};
struct ice_qs_cfg {
struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
unsigned long *pf_map;
unsigned long pf_map_size;
unsigned int q_count;
@ -321,7 +322,11 @@ enum ice_pf_flags {
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_DISABLE_FW_LLDP,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@ -360,8 +365,8 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num lan Tx queues setup */
u16 num_lan_rx; /* num lan Rx queues setup */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@ -375,6 +380,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */
#ifdef CONFIG_DCB
u16 dcbx_cap;
#endif /* CONFIG_DCB */
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
@ -387,8 +395,8 @@ struct ice_netdev_priv {
/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to hw struct
* @vsi: pointer to vsi struct, can be NULL
* @hw: pointer to HW struct
* @vsi: pointer to VSI struct, can be NULL
* @q_vector: pointer to q_vector, can be NULL
*/
static inline void
@ -411,12 +419,6 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val);
}
static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
vsi->tc_cfg.numtc = 1;
}
void ice_set_ethtool_ops(struct net_device *netdev);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
@ -425,5 +427,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
#endif /* CONFIG_DCB */
#endif /* _ICE_H_ */

View File

@ -62,7 +62,7 @@ struct ice_aqc_req_res {
#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
/* For SDP: pin id of the SDP */
/* For SDP: pin ID of the SDP */
__le32 res_number;
/* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
__le16 status;
@ -747,6 +747,32 @@ struct ice_aqc_delete_elem {
__le32 teid[1];
};
/* Query Port ETS (indirect 0x040E)
*
* This indirect command is used to query port TC node configuration.
*/
struct ice_aqc_query_port_ets {
__le32 port_teid;
__le32 reserved;
__le32 addr_high;
__le32 addr_low;
};
struct ice_aqc_port_ets_elem {
u8 tc_valid_bits;
u8 reserved[3];
/* 3 bits for UP per TC 0-7, 4th byte reserved */
__le32 up2tc;
u8 tc_bw_share[8];
__le32 port_eir_prof_id;
__le32 port_cir_prof_id;
/* 3 bits per Node priority to TC 0-7, 4th byte reserved */
__le32 tc_node_prio;
#define ICE_TC_NODE_PRIO_S 0x4
u8 reserved1[4];
__le32 tc_node_teid[8]; /* Used for response, reserved in command */
};
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@ -1024,7 +1050,7 @@ struct ice_aqc_get_link_status_data {
u8 ext_info;
#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
/* Port TX Suspended */
/* Port Tx Suspended */
#define ICE_AQ_LINK_TX_S 2
#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
#define ICE_AQ_LINK_TX_ACTIVE 0
@ -1120,9 +1146,9 @@ struct ice_aqc_nvm {
};
/**
* Send to PF command (indirect 0x0801) id is only used by PF
* Send to PF command (indirect 0x0801) ID is only used by PF
*
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to VF command (indirect 0x0802) ID is only used by PF
*
*/
struct ice_aqc_pf_vf_msg {
@ -1132,6 +1158,126 @@ struct ice_aqc_pf_vf_msg {
__le32 addr_low;
};
/* Get LLDP MIB (indirect 0x0A00)
* Note: This is also used by the LLDP MIB Change Event (0x0A01)
* as the format is the same.
*/
struct ice_aqc_lldp_get_mib {
u8 type;
#define ICE_AQ_LLDP_MIB_TYPE_S 0
#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S)
#define ICE_AQ_LLDP_MIB_LOCAL 0
#define ICE_AQ_LLDP_MIB_REMOTE 1
#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2
#define ICE_AQ_LLDP_BRID_TYPE_S 2
#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S)
#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0
#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1
/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */
#define ICE_AQ_LLDP_TX_S 0x4
#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S)
#define ICE_AQ_LLDP_TX_ACTIVE 0
#define ICE_AQ_LLDP_TX_SUSPENDED 1
#define ICE_AQ_LLDP_TX_FLUSHED 3
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
* Get LLDP MIB (0x0A00) response only.
*/
u8 reserved1;
__le16 local_len;
__le16 remote_len;
u8 reserved2[2];
__le32 addr_high;
__le32 addr_low;
};
/* Configure LLDP MIB Change Event (direct 0x0A01) */
/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */
struct ice_aqc_lldp_set_mib_change {
u8 command;
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
u8 reserved[15];
};
/* Stop LLDP (direct 0x0A05) */
struct ice_aqc_lldp_stop {
u8 command;
#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0)
#define ICE_AQ_LLDP_AGENT_STOP 0x0
#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK
#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1)
u8 reserved[15];
};
/* Start LLDP (direct 0x0A06) */
struct ice_aqc_lldp_start {
u8 command;
#define ICE_AQ_LLDP_AGENT_START BIT(0)
#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1)
u8 reserved[15];
};
/* Get CEE DCBX Oper Config (0x0A07)
* The command uses the generic descriptor struct and
* returns the struct below as an indirect response.
*/
struct ice_aqc_get_cee_dcb_cfg_resp {
u8 oper_num_tc;
u8 oper_prio_tc[4];
u8 oper_tc_bw[8];
u8 oper_pfc_en;
__le16 oper_app_prio;
#define ICE_AQC_CEE_APP_FCOE_S 0
#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S)
#define ICE_AQC_CEE_APP_ISCSI_S 3
#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S)
#define ICE_AQC_CEE_APP_FIP_S 8
#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S)
__le32 tlv_status;
#define ICE_AQC_CEE_PG_STATUS_S 0
#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S)
#define ICE_AQC_CEE_PFC_STATUS_S 3
#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S)
#define ICE_AQC_CEE_FCOE_STATUS_S 8
#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S)
#define ICE_AQC_CEE_ISCSI_STATUS_S 11
#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S)
#define ICE_AQC_CEE_FIP_STATUS_S 16
#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S)
u8 reserved[12];
};
/* Set Local LLDP MIB (indirect 0x0A08)
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx
*/
struct ice_aqc_lldp_set_local_mib {
u8 type;
#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
u8 reserved0;
__le16 length;
u8 reserved1[4];
__le32 addr_high;
__le32 addr_low;
};
/* Stop/Start LLDP Agent (direct 0x0A09)
* Used for stopping/starting specific LLDP agent. e.g. DCBx.
* The same structure is used for the response, with the command field
* being used as the status field.
*/
struct ice_aqc_lldp_stop_start_specific_agent {
u8 command;
#define ICE_AQC_START_STOP_AGENT_M BIT(0)
#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0
#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M
u8 reserved[15];
};
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@ -1186,7 +1332,7 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
/* Add TX LAN Queues (indirect 0x0C30) */
/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
u8 reserved[3];
@ -1195,7 +1341,7 @@ struct ice_aqc_add_txqs {
__le32 addr_low;
};
/* This is the descriptor of each queue entry for the Add TX LAN Queues
/* This is the descriptor of each queue entry for the Add Tx LAN Queues
* command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
*/
struct ice_aqc_add_txqs_perq {
@ -1207,7 +1353,7 @@ struct ice_aqc_add_txqs_perq {
struct ice_aqc_txsched_elem info;
};
/* The format of the command buffer for Add TX LAN Queues (0x0C30)
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
* each struct ice_aqc_add_tx_qgrp is variable due
* to the variable number of queues in each group!
@ -1219,7 +1365,7 @@ struct ice_aqc_add_tx_qgrp {
struct ice_aqc_add_txqs_perq txqs[1];
};
/* Disable TX LAN Queues (indirect 0x0C31) */
/* Disable Tx LAN Queues (indirect 0x0C31) */
struct ice_aqc_dis_txqs {
u8 cmd_type;
#define ICE_AQC_Q_DIS_CMD_S 0
@ -1241,7 +1387,7 @@ struct ice_aqc_dis_txqs {
__le32 addr_low;
};
/* The buffer for Disable TX LAN Queues (indirect 0x0C31)
/* The buffer for Disable Tx LAN Queues (indirect 0x0C31)
* contains the following structures, arrayed one after the
* other.
* Note: Since the q_id is 16 bits wide, if the
@ -1388,8 +1534,15 @@ struct ice_aq_desc {
struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
struct ice_aqc_lldp_stop lldp_stop;
struct ice_aqc_lldp_start lldp_start;
struct ice_aqc_lldp_set_local_mib lldp_set_mib;
struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@ -1422,6 +1575,8 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* Success */
ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
ICE_AQ_RC_ENOENT = 2, /* No such element */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
@ -1474,6 +1629,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
ice_aqc_opc_query_sched_res = 0x0412,
@ -1491,6 +1647,14 @@ enum ice_adminq_opc {
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
ice_mbx_opc_send_msg_to_vf = 0x0802,
/* LLDP commands */
ice_aqc_opc_lldp_get_mib = 0x0A00,
ice_aqc_opc_lldp_set_mib_change = 0x0A01,
ice_aqc_opc_lldp_stop = 0x0A05,
ice_aqc_opc_lldp_start = 0x0A06,
ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
@ -1498,7 +1662,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
/* TX queue handling commands/events */
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,

View File

@ -31,7 +31,7 @@
* @hw: pointer to the HW structure
*
* This function sets the MAC type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
* vendor ID and device ID stored in the HW structure.
*/
static enum ice_status ice_set_mac_type(struct ice_hw *hw)
{
@ -77,7 +77,7 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
/**
* ice_aq_manage_mac_read - manage MAC address read command
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the manage MAC read response
* @buf_size: Size of the virtual buffer
* @cd: pointer to command details structure or NULL
@ -418,7 +418,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*/
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
@ -438,7 +438,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*/
static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
{
@ -477,7 +477,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cfg_fw_log - configure FW logging
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @enable: enable certain FW logging events if true, disable all if false
*
* This function enables/disables the FW logging via Rx CQ events and a UART
@ -626,7 +626,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
/**
* ice_output_fw_log
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @desc: pointer to the AQ message descriptor
* @buf: pointer to the buffer accompanying the AQ message
*
@ -642,7 +642,7 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
/**
* ice_get_itr_intrl_gran - determine int/intrl granularity
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Determines the itr/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
@ -731,7 +731,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
/* set the back pointer to hw */
/* set the back pointer to HW */
hw->port_info->hw = hw;
/* Initialize port_info struct with switch configuration data */
@ -988,7 +988,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the Rx queue
*
* Copies rxq context from dense structure to hw register space
* Copies rxq context from dense structure to HW register space
*/
static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
@ -1001,7 +1001,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
if (rxq_index > QRX_CTRL_MAX_INDEX)
return ICE_ERR_PARAM;
/* Copy each dword separately to hw */
/* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
wr32(hw, QRX_CONTEXT(i, rxq_index),
*((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
@ -1045,7 +1045,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
* it to hw register space
* it to HW register space
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@ -1144,7 +1144,7 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @desc: descriptor describing the command
* @buf: buffer to use for indirect commands (NULL for direct commands)
* @buf_size: size of buffer for indirect commands (0 for direct commands)
@ -1161,7 +1161,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
/**
* ice_aq_get_fw_ver
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @cd: pointer to command details structure or NULL
*
* Get the firmware version (0x0001) from the admin queue commands
@ -1195,7 +1195,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
/**
* ice_aq_q_shutdown
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
@ -1218,8 +1218,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
/**
* ice_aq_req_res
* @hw: pointer to the hw struct
* @res: resource id
* @hw: pointer to the HW struct
* @res: resource ID
* @access: access type
* @sdp_number: resource number
* @timeout: the maximum time in ms that the driver may hold the resource
@ -1304,8 +1304,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/**
* ice_aq_release_res
* @hw: pointer to the hw struct
* @res: resource id
* @hw: pointer to the HW struct
* @res: resource ID
* @sdp_number: resource number
* @cd: pointer to command details structure or NULL
*
@ -1331,7 +1331,7 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
/**
* ice_acquire_res
* @hw: pointer to the HW structure
* @res: resource id
* @res: resource ID
* @access: access type (read or write)
* @timeout: timeout in milliseconds
*
@ -1393,7 +1393,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/**
* ice_release_res
* @hw: pointer to the HW structure
* @res: resource id
* @res: resource ID
*
* This function will release a resource using the proper Admin Command.
*/
@ -1405,7 +1405,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
* results in an admin Q timeout, so handle them correctly
* results in an admin queue timeout, so handle them correctly
*/
while ((status == ICE_ERR_AQ_TIMEOUT) &&
(total_delay < hw->adminq.sq_cmd_timeout)) {
@ -1417,7 +1417,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
/**
* ice_get_num_per_func - determine number of resources per PF
* @hw: pointer to the hw structure
* @hw: pointer to the HW structure
* @max: value to be evenly split between each PF
*
* Determine the number of valid functions by going through the bitmap returned
@ -1440,7 +1440,7 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
/**
* ice_parse_caps - parse function/device capabilities
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @buf: pointer to a buffer containing function/device capability records
* @cap_count: number of capability records in the list
* @opc: type of capabilities list to parse
@ -1582,7 +1582,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
/**
* ice_aq_discover_caps - query function/device capabilities
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the capabilities
* @buf_size: Size of the virtual buffer
* @cap_count: cap count needed if AQ err==ENOMEM
@ -1681,7 +1681,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
/**
* ice_aq_manage_mac_write - manage MAC address write command
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
* @flags: flags to control write behavior
* @cd: pointer to command details structure or NULL
@ -1709,7 +1709,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
/**
* ice_aq_clear_pxe_mode
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
@ -1725,7 +1725,7 @@ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
/**
* ice_clear_pxe_mode - clear pxe operations mode
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Make sure all PXE mode settings are cleared, including things
* like descriptor fetch/write-back mode.
@ -1741,10 +1741,10 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
*
* This helper function will convert an entry in phy type structure
* This helper function will convert an entry in PHY type structure
* [phy_type_low, phy_type_high] to its corresponding link speed.
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one phy type to its
* be one bit set, as this function will convert one PHY type to its
* speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
@ -1914,7 +1914,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
/**
* ice_aq_set_phy_cfg
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @lport: logical port number
* @cfg: structure with PHY configuration data to be set
* @cd: pointer to command details structure or NULL
@ -2029,7 +2029,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
/* Get the current phy config */
/* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status) {
@ -2338,7 +2338,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
/**
* __ice_aq_get_set_rss_key
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_id: VSI FW index
* @key: pointer to key info struct
* @set: set true to set the key, false to get the key
@ -2373,7 +2373,7 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
/**
* ice_aq_get_rss_key
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @key: pointer to key info struct
*
@ -2392,7 +2392,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_aq_set_rss_key
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @keys: pointer to key info struct
*
@ -2477,7 +2477,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes
* @rst_src: if called due to reset, specifies the RST source
* @rst_src: if called due to reset, specifies the reset source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
@ -2517,7 +2517,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
break;
case ICE_VF_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
/* In this case, FW expects vmvf_num to be absolute VF id */
/* In this case, FW expects vmvf_num to be absolute VF ID */
cmd->vmvf_and_timeout |=
cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
ICE_AQC_Q_DIS_VMVF_NUM_M);
@ -2794,13 +2794,13 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* ice_ena_vsi_txq
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: tc number
* @tc: TC number
* @num_qgrps: Number of added queue groups
* @buf: list of queue groups to be added
* @buf_size: size of buffer for indirect command
* @cd: pointer to command details structure or NULL
*
* This function adds one lan q
* This function adds one LAN queue
*/
enum ice_status
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
@ -2844,11 +2844,11 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
* Bit 5-6.
* - Bit 7 is reserved.
* Without setting the generic section as valid in valid_sections, the
* Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
* Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
/* add the lan q */
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
if (status) {
ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n",
@ -2860,7 +2860,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
/* add a leaf node into schduler tree q layer */
/* add a leaf node into schduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
ena_txq_exit:
@ -2874,7 +2874,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
* @num_queues: number of queues
* @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids
* @rst_src: if called due to reset, specifies the RST source
* @rst_src: if called due to reset, specifies the reset source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
@ -2925,12 +2925,12 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
}
/**
* ice_cfg_vsi_qs - configure the new/exisiting VSI queues
* ice_cfg_vsi_qs - configure the new/existing VSI queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @maxqs: max queues array per TC
* @owner: lan or rdma
* @owner: LAN or RDMA
*
* This function adds/updates the VSI queues per TC.
*/
@ -2965,13 +2965,13 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
}
/**
* ice_cfg_vsi_lan - configure VSI lan queues
* ice_cfg_vsi_lan - configure VSI LAN queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @max_lanqs: max lan queues array per TC
* @max_lanqs: max LAN queues array per TC
*
* This function adds/updates the VSI lan queues per TC.
* This function adds/updates the VSI LAN queues per TC.
*/
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
@ -2983,7 +2983,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
@ -3007,7 +3007,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
/**
* ice_replay_vsi - replay VSI configuration
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: driver VSI handle
*
* Restore all VSI configuration after reset. It is required to call this
@ -3034,7 +3034,7 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_replay_post - post replay configuration cleanup
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Post replay cleanup.
*/
@ -3106,3 +3106,28 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
/* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
}
/**
* ice_sched_query_elem - query element information from HW
* @hw: pointer to the HW struct
* @node_teid: node TEID to be queried
* @buf: buffer to element information
*
* This function queries HW element information
*/
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf)
{
u16 buf_size, num_elem_ret = 0;
enum ice_status status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
buf->generic[0].node_teid = cpu_to_le32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}

View File

@ -118,4 +118,7 @@ ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);
#endif /* _ICE_COMMON_H_ */

View File

@ -51,7 +51,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
/**
* ice_check_sq_alive
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if Queue is enabled else false.
@ -287,7 +287,7 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
* Configure base address and length registers for the receive (event q)
* Configure base address and length registers for the receive (event queue)
*/
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
@ -751,7 +751,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if the firmware has processed all descriptors on the
@ -767,7 +767,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_sq_send_cmd - send command to Control Queue (ATQ)
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buf: buffer to use for indirect commands (or NULL for direct commands)
@ -962,7 +962,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
/**
* ice_clean_rq_elem
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,179 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_DCB_H_
#define _ICE_DCB_H_
#include "ice_type.h"
#define ICE_DCBX_STATUS_NOT_STARTED 0
#define ICE_DCBX_STATUS_IN_PROGRESS 1
#define ICE_DCBX_STATUS_DONE 2
#define ICE_DCBX_STATUS_DIS 7
#define ICE_TLV_TYPE_END 0
#define ICE_TLV_TYPE_ORG 127
#define ICE_IEEE_8021QAZ_OUI 0x0080C2
#define ICE_IEEE_SUBTYPE_ETS_CFG 9
#define ICE_IEEE_SUBTYPE_ETS_REC 10
#define ICE_IEEE_SUBTYPE_PFC_CFG 11
#define ICE_IEEE_SUBTYPE_APP_PRI 12
#define ICE_CEE_DCBX_OUI 0x001B21
#define ICE_CEE_DCBX_TYPE 2
#define ICE_CEE_SUBTYPE_PG_CFG 2
#define ICE_CEE_SUBTYPE_PFC_CFG 3
#define ICE_CEE_SUBTYPE_APP_PRI 4
#define ICE_CEE_MAX_FEAT_TYPE 3
/* Defines for LLDP TLV header */
#define ICE_LLDP_TLV_LEN_S 0
#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
#define ICE_LLDP_TLV_TYPE_S 9
#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
#define ICE_LLDP_TLV_SUBTYPE_S 0
#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
#define ICE_LLDP_TLV_OUI_S 8
#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
/* Defines for IEEE ETS TLV */
#define ICE_IEEE_ETS_MAXTC_S 0
#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
#define ICE_IEEE_ETS_CBS_S 6
#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
#define ICE_IEEE_ETS_WILLING_S 7
#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
#define ICE_IEEE_ETS_PRIO_0_S 0
#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
#define ICE_IEEE_ETS_PRIO_1_S 4
#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
#define ICE_CEE_PGID_PRIO_0_S 0
#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
#define ICE_CEE_PGID_PRIO_1_S 4
#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
#define ICE_CEE_PGID_STRICT 15
/* Defines for IEEE TSA types */
#define ICE_IEEE_TSA_STRICT 0
#define ICE_IEEE_TSA_ETS 2
/* Defines for IEEE PFC TLV */
#define ICE_IEEE_PFC_CAP_S 0
#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
#define ICE_IEEE_PFC_MBC_S 6
#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
#define ICE_IEEE_PFC_WILLING_S 7
#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
/* Defines for IEEE APP TLV */
#define ICE_IEEE_APP_SEL_S 0
#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
#define ICE_IEEE_APP_PRIO_S 5
#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
/* TLV definitions for preparing MIB */
#define ICE_IEEE_TLV_ID_ETS_CFG 3
#define ICE_IEEE_TLV_ID_ETS_REC 4
#define ICE_IEEE_TLV_ID_PFC_CFG 5
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
__be32 ouisubtype;
u8 tlvinfo[1];
} __packed;
struct ice_cee_tlv_hdr {
__be16 typelen;
u8 operver;
u8 maxver;
};
struct ice_cee_ctrl_tlv {
struct ice_cee_tlv_hdr hdr;
__be32 seqno;
__be32 ackno;
};
struct ice_cee_feat_tlv {
struct ice_cee_tlv_hdr hdr;
u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
#define ICE_CEE_FEAT_TLV_ENA_M 0x80
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
u8 subtype;
u8 tlvinfo[1];
};
struct ice_cee_app_prio {
__be16 protocol;
u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
#define ICE_CEE_APP_SELECTOR_M 0x03
__be16 lower_oui;
u8 prio_map;
} __packed;
u8 ice_get_dcbx_status(struct ice_hw *hw);
enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_init_dcb(struct ice_hw *hw);
enum ice_status
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB
enum ice_status
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
struct ice_sq_cd *cd);
enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
enum ice_status
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd);
#else /* CONFIG_DCB */
static inline enum ice_status
ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent,
struct ice_sq_cd __always_unused *cd)
{
return 0;
}
static inline enum ice_status
ice_aq_start_lldp(struct ice_hw __always_unused *hw,
struct ice_sq_cd __always_unused *cd)
{
return 0;
}
static inline enum ice_status
ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
bool __always_unused start_dcbx_agent,
bool *dcbx_agent_status,
struct ice_sq_cd __always_unused *cd)
{
*dcbx_agent_status = false;
return 0;
}
static inline enum ice_status
ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
bool __always_unused ena_update,
struct ice_sq_cd __always_unused *cd)
{
return 0;
}
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_H_ */

View File

@ -0,0 +1,551 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_dcb_lib.h"
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
*/
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
{
u8 i, num_tc, ena_tc = 1;
num_tc = ice_dcb_get_num_tc(dcbcfg);
for (i = 0; i < num_tc; i++)
ena_tc |= BIT(i);
return ena_tc;
}
/**
* ice_dcb_get_num_tc - Get the number of TCs from DCBX config
* @dcbcfg: config to retrieve number of TCs from
*/
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
{
bool tc_unused = false;
u8 num_tc = 0;
u8 ret = 0;
int i;
/* Scan the ETS Config Priority Table to find traffic classes
* enabled and create a bitmask of enabled TCs
*/
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
/* Scan bitmask for contiguous TCs starting with TC0 */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (num_tc & BIT(i)) {
if (!tc_unused) {
ret++;
} else {
pr_err("Non-contiguous TCs - Disabling DCB\n");
return 1;
}
} else {
tc_unused = true;
}
}
/* There is always at least 1 TC */
if (!ret)
ret = 1;
return ret;
}
/**
* ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
* @vsi: VSI owner of rings being updated
*/
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{
struct ice_ring *tx_ring, *rx_ring;
u16 qoffset, qcount;
int i, n;
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */
for (i = 0; i < vsi->num_txq; i++) {
tx_ring = vsi->tx_rings[i];
tx_ring->dcb_tc = 0;
}
for (i = 0; i < vsi->num_rxq; i++) {
rx_ring = vsi->rx_rings[i];
rx_ring->dcb_tc = 0;
}
return;
}
ice_for_each_traffic_class(n) {
if (!(vsi->tc_cfg.ena_tc & BIT(n)))
break;
qoffset = vsi->tc_cfg.tc_info[n].qoffset;
qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
for (i = qoffset; i < (qoffset + qcount); i++) {
tx_ring = vsi->tx_rings[i];
rx_ring = vsi->rx_rings[i];
tx_ring->dcb_tc = n;
rx_ring->dcb_tc = n;
}
}
}
/**
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
* @pf: pointer to the PF struct
*
* Assumed caller has already disabled all VSIs before
* calling this function. Reconfiguring DCB based on
* local_dcbx_cfg.
*/
static void ice_pf_dcb_recfg(struct ice_pf *pf)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
u8 tc_map = 0;
int v, ret;
/* Update each VSI */
ice_for_each_vsi(pf, v) {
if (!pf->vsi[v])
continue;
if (pf->vsi[v]->type == ICE_VSI_PF)
tc_map = ice_dcb_get_ena_tc(dcbcfg);
else
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
if (ret)
dev_err(&pf->pdev->dev,
"Failed to config TC for VSI index: %d\n",
pf->vsi[v]->idx);
else
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
}
}
/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
*/
static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
{
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
int ret = 0;
curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) {
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
return ret;
}
/* Store old config in case FW config fails */
old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
rtnl_lock();
ice_pf_dis_all_vsi(pf, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place.
*/
if (pf->hw.port_info->is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
/* Restore previous settings to local config */
memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
goto out;
}
}
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
goto out;
}
ice_pf_dcb_recfg(pf);
out:
ice_pf_ena_all_vsi(pf, true);
rtnl_unlock();
devm_kfree(&pf->pdev->dev, old_cfg);
return ret;
}
/**
* ice_dcb_rebuild - rebuild DCB post reset
* @pf: physical function instance
*/
void ice_dcb_rebuild(struct ice_pf *pf)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *prev_cfg;
enum ice_status ret;
u8 willing;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
goto dcb_error;
}
/* If DCB was not enabled previously, we are done */
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
/* Save current willing state and force FW to unwilling */
willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
goto dcb_error;
}
/* Retrieve DCB config and ensure same as current in SW */
prev_cfg = devm_kmemdup(&pf->pdev->dev,
&pf->hw.port_info->local_dcbx_cfg,
sizeof(*prev_cfg), GFP_KERNEL);
if (!prev_cfg) {
dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
goto dcb_error;
}
ice_init_dcb(&pf->hw);
if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
sizeof(*prev_cfg))) {
/* difference in cfg detected - disable DCB till next MIB */
dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
devm_kfree(&pf->pdev->dev, prev_cfg);
goto dcb_error;
}
/* fetched config congruent to previous configuration */
devm_kfree(&pf->pdev->dev, prev_cfg);
/* Configuration replayed - reset willing state to previous */
pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
goto dcb_error;
}
dev_info(&pf->pdev->dev, "DCB restored after reset\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
goto dcb_error;
}
return;
dcb_error:
dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
prev_cfg->etscfg.willing = true;
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
ice_pf_dcb_cfg(pf, prev_cfg);
devm_kfree(&pf->pdev->dev, prev_cfg);
}
/**
* ice_dcb_init_cfg - set the initial DCB config in SW
* @pf: pf to apply config to
*/
static int ice_dcb_init_cfg(struct ice_pf *pf)
{
struct ice_dcbx_cfg *newcfg;
struct ice_port_info *pi;
int ret = 0;
pi = pf->hw.port_info;
newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
if (!newcfg)
return -ENOMEM;
memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg))
ret = -EINVAL;
devm_kfree(&pf->pdev->dev, newcfg);
return ret;
}
/**
* ice_dcb_sw_default_config - Apply a default DCB config
* @pf: pf to apply config to
*/
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
struct ice_port_info *pi;
struct ice_hw *hw;
int ret;
hw = &pf->hw;
pi = hw->port_info;
dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
memset(dcbcfg, 0, sizeof(*dcbcfg));
memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
dcbcfg->etscfg.willing = 1;
dcbcfg->etscfg.maxtcs = 8;
dcbcfg->etscfg.tcbwtable[0] = 100;
dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&dcbcfg->etsrec, &dcbcfg->etscfg,
sizeof(dcbcfg->etsrec));
dcbcfg->etsrec.willing = 0;
dcbcfg->pfc.willing = 1;
dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS;
dcbcfg->numapps = 1;
dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
dcbcfg->app[0].priority = 3;
dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
ret = ice_pf_dcb_cfg(pf, dcbcfg);
devm_kfree(&pf->pdev->dev, dcbcfg);
if (ret)
return ret;
return ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
}
/**
* ice_init_pf_dcb - initialize DCB for a PF
* @pf: pf to initiialize DCB for
*/
int ice_init_pf_dcb(struct ice_pf *pf)
{
struct device *dev = &pf->pdev->dev;
struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw;
int sw_default = 0;
int err;
port_info = hw->port_info;
/* check if device is DCB capable */
if (!hw->func_caps.common_cap.dcb) {
dev_dbg(dev, "DCB not supported\n");
return -EOPNOTSUPP;
}
/* Best effort to put DCBx and LLDP into a good state */
port_info->dcbx_status = ice_get_dcbx_status(hw);
if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
bool dcbx_status;
/* Attempt to start LLDP engine. Ignore errors
* as this will error if it is already started
*/
ice_aq_start_lldp(hw, NULL);
/* Attempt to start DCBX. Ignore errors as this
* will error if it is already started
*/
ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
}
err = ice_init_dcb(hw);
if (err) {
/* FW LLDP not in usable state, default to SW DCBx/LLDP */
dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n");
hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
hw->port_info->is_sw_lldp = true;
}
if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
dev_info(&pf->pdev->dev, "DCBX disabled\n");
/* LLDP disabled in FW */
if (port_info->is_sw_lldp) {
sw_default = 1;
dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
}
if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
sw_default = 1;
dev_info(&pf->pdev->dev, "DCBX not started\n");
}
if (sw_default) {
err = ice_dcb_sw_dflt_cfg(pf);
if (err) {
dev_err(&pf->pdev->dev,
"Failed to set local DCB config %d\n", err);
err = -EIO;
goto dcb_init_err;
}
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
return 0;
}
/* DCBX in FW and LLDP enabled in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
err = ice_dcb_init_cfg(pf);
if (err)
goto dcb_init_err;
dev_info(&pf->pdev->dev, "DCBX offload supported\n");
return err;
dcb_init_err:
dev_err(dev, "DCB init failed\n");
return err;
}
/**
* ice_update_dcb_stats - Update DCB stats counters
* @pf: PF whose stats needs to be updated
*/
void ice_update_dcb_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
u8 pf_id = hw->pf_id;
int i;
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
for (i = 0; i < 8; i++) {
ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i),
pf->stat_prev_loaded,
&prev_ps->priority_xoff_rx[i],
&cur_ps->priority_xoff_rx[i]);
ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_rx[i],
&cur_ps->priority_xon_rx[i]);
ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_tx[i],
&cur_ps->priority_xon_tx[i]);
ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i),
pf->stat_prev_loaded,
&prev_ps->priority_xoff_tx[i],
&cur_ps->priority_xoff_tx[i]);
ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_2_xoff[i],
&cur_ps->priority_xon_2_xoff[i]);
}
}
/**
* ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
* @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf
*/
int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
return 0;
/* Insert 802.1p priority into VLAN header */
if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */
first->tx_flags |= (skb->priority & 0x7) <<
ICE_TX_FLAGS_VLAN_PR_S;
if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
struct vlan_ethhdr *vhdr;
int rc;
rc = skb_cow_head(skb, 0);
if (rc < 0)
return rc;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI = htons(first->tx_flags >>
ICE_TX_FLAGS_VLAN_S);
} else {
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
}
return 0;
}
/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
*/
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event)
{
if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
struct ice_dcbx_cfg *dcbcfg, *prev_cfg;
int err;
prev_cfg = &pf->hw.port_info->local_dcbx_cfg;
dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg,
sizeof(*dcbcfg), GFP_KERNEL);
if (!dcbcfg)
return;
err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg);
if (!err)
ice_pf_dcb_cfg(pf, dcbcfg);
devm_kfree(&pf->pdev->dev, dcbcfg);
/* Get updated DCBx data from firmware */
err = ice_get_dcb_cfg(pf->hw.port_info);
if (err)
dev_err(&pf->pdev->dev,
"Failed to get DCB config\n");
} else {
dev_dbg(&pf->pdev->dev,
"MIB Change Event in HOST mode\n");
}
}

View File

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_DCB_LIB_H_
#define _ICE_DCB_LIB_H_
#include "ice.h"
#include "ice_lib.h"
#ifdef CONFIG_DCB
#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf);
void ice_update_dcb_stats(struct ice_pf *pf);
int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
tlan_ctx->cgd_num = ring->dcb_tc;
}
#else
#define ice_dcb_rebuild(pf) do {} while (0)
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return ICE_DFLT_TRAFFIC_CLASS;
}
static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return 1;
}
static inline int ice_init_pf_dcb(struct ice_pf *pf)
{
dev_dbg(&pf->pdev->dev, "DCB not supported\n");
return -EOPNOTSUPP;
}
static inline int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first)
{
return 0;
}
#define ice_update_dcb_stats(pf) do {} while (0)
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */

View File

@ -4,6 +4,8 @@
/* ethtool support for ice */
#include "ice.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@ -33,8 +35,14 @@ static int ice_q_stats_len(struct net_device *netdev)
#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
ice_q_stats_len(n))
#define ICE_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \
FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \
FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \
FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \
/ sizeof(u64))
#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
ICE_VSI_STATS_LEN + ice_q_stats_len(n))
static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
@ -126,6 +134,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@ -309,6 +318,22 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
"port.tx-priority-%u-xon", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
"port.tx-priority-%u-xoff", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
"port.rx-priority-%u-xon", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
"port.rx-priority-%u-xoff", i);
p += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
@ -382,13 +407,19 @@ static u32 ice_get_priv_flags(struct net_device *netdev)
static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS);
DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int ret = 0;
u32 i;
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
return -EINVAL;
set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
const struct ice_priv_flag *priv_flag;
@ -400,7 +431,79 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
clear_bit(priv_flag->bitno, pf->flags);
}
return 0;
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) {
if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) {
enum ice_status status;
status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
NULL);
/* If unregistering for LLDP events fails, this is
* not an error state, as there shouldn't be any
* events to respond to.
*/
if (status)
dev_info(&pf->pdev->dev,
"Failed to unreg for LLDP events\n");
/* The AQ call to stop the FW LLDP agent will generate
* an error if the agent is already stopped.
*/
status = ice_aq_stop_lldp(&pf->hw, true, NULL);
if (status)
dev_warn(&pf->pdev->dev,
"Fail to stop LLDP agent\n");
/* Use case for having the FW LLDP agent stopped
* will likely not need DCB, so failure to init is
* not a concern of ethtool
*/
status = ice_init_pf_dcb(pf);
if (status)
dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
} else {
enum ice_status status;
bool dcbx_agent_status;
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
*/
status = ice_aq_start_lldp(&pf->hw, NULL);
if (status)
dev_warn(&pf->pdev->dev,
"Fail to start LLDP Agent\n");
/* AQ command to start FW DCBx agent will fail if
* the agent is already started
*/
status = ice_aq_start_stop_dcbx(&pf->hw, true,
&dcbx_agent_status,
NULL);
if (status)
dev_dbg(&pf->pdev->dev,
"Failed to start FW DCBX\n");
dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n",
dcbx_agent_status ? "ACTIVE" : "DISABLED");
/* Failure to configure MIB change or init DCB is not
* relevant to ethtool. Print notification that
* registration/init failed but do not return error
* state to ethtool
*/
status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
NULL);
if (status)
dev_dbg(&pf->pdev->dev,
"Fail to reg for MIB change\n");
status = ice_init_pf_dcb(pf);
if (status)
dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
}
}
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
static int ice_get_sset_count(struct net_device *netdev, int sset)
@ -486,6 +589,16 @@ ice_get_ethtool_stats(struct net_device *netdev,
data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
data[i++] = pf->stats.priority_xon_tx[j];
data[i++] = pf->stats.priority_xoff_tx[j];
}
for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
data[i++] = pf->stats.priority_xon_rx[j];
data[i++] = pf->stats.priority_xoff_rx[j];
}
}
/**
@ -811,7 +924,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
link_info = &vsi->port_info->phy.link_info;
/* Initialize supported and advertised settings based on phy settings */
/* Initialize supported and advertised settings based on PHY settings */
switch (link_info->phy_type_low) {
case ICE_PHY_TYPE_LOW_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@ -1140,7 +1253,7 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
struct net_device __always_unused *netdev)
{
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
* supported PHY types to figure out what info to display
*/
ice_phy_type_to_ethtool(netdev, ks);
@ -1350,7 +1463,7 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
} else {
/* If autoneg is currently enabled */
if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
/* If autoneg is supported 10GBASE_T is the only phy
/* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
if (ethtool_link_ksettings_test_link_mode(ks,
@ -1400,7 +1513,7 @@ ice_set_link_ksettings(struct net_device *netdev,
if (!p)
return -EOPNOTSUPP;
/* Check if this is lan vsi */
/* Check if this is LAN VSI */
ice_for_each_vsi(pf, idx)
if (pf->vsi[idx]->type == ICE_VSI_PF) {
if (np->vsi != pf->vsi[idx])
@ -1464,7 +1577,7 @@ ice_set_link_ksettings(struct net_device *netdev,
if (!abilities)
return -ENOMEM;
/* Get the current phy config */
/* Get the current PHY config */
status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
NULL);
if (status) {
@ -1559,7 +1672,7 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/**
* ice_get_rxnfc - command to get RX flow classification rules
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
* @rule_locs: buffer to rturn Rx flow classification rules
@ -1822,18 +1935,21 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_vsi *vsi = np->vsi;
struct ice_dcbx_cfg *dcbx_cfg;
enum ice_status status;
/* Initialize pause params */
pause->rx_pause = 0;
pause->tx_pause = 0;
dcbx_cfg = &pi->local_dcbx_cfg;
pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
return;
/* Get current phy config */
/* Get current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status)
@ -1842,6 +1958,10 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
if (dcbx_cfg->pfc.pfcena)
/* PFC enabled so report LFC as off */
goto out;
if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
pause->tx_pause = 1;
if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
@ -1862,6 +1982,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
struct ice_dcbx_cfg *dcbx_cfg;
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
@ -1872,6 +1993,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
dcbx_cfg = &pi->local_dcbx_cfg;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the
@ -1894,6 +2016,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
if (dcbx_cfg->pfc.pfcena) {
netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
return -EOPNOTSUPP;
}
if (pause->rx_pause && pause->tx_pause)
pi->fc.req_mode = ICE_FC_FULL;
else if (pause->rx_pause && !pause->tx_pause)
@ -2022,7 +2148,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
* @key: hash key
* @hfunc: hash function
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
static int
@ -2089,7 +2215,7 @@ enum ice_container_type {
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
* @c_type: container type, RX or TX
* @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@ -2191,7 +2317,7 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
* @c_type: container type, RX or TX
* @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container

View File

@ -49,6 +49,9 @@
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
@ -318,11 +321,16 @@
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
#define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64))
#define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64))
#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))

View File

@ -20,7 +20,7 @@ union ice_32byte_rx_desc {
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fd_id; /* Flow Director filter id */
__le32 fd_id; /* Flow Director filter ID */
} hi_dword;
} qword0;
struct {
@ -99,7 +99,7 @@ enum ice_rx_ptype_payload_layer {
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
/* RX Flex Descriptor
/* Rx Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
*/
@ -113,7 +113,7 @@ union ice_32b_rx_flex_desc {
} read;
struct {
/* Qword 0 */
u8 rxdid; /* descriptor builder profile id */
u8 rxdid; /* descriptor builder profile ID */
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
__le16 pkt_len; /* [15:14] are reserved */
@ -149,7 +149,7 @@ union ice_32b_rx_flex_desc {
/* Rx Flex Descriptor NIC Profile
* This descriptor corresponds to RxDID 2 which contains
* metadata fields for RSS, flow id and timestamp info
* metadata fields for RSS, flow ID and timestamp info
*/
struct ice_32b_rx_flex_desc_nic {
/* Qword 0 */
@ -208,7 +208,7 @@ enum ice_flex_rx_mdid {
ICE_RX_MDID_HASH_HIGH,
};
/* RX/TX Flag64 packet flag bits */
/* Rx/Tx Flag64 packet flag bits */
enum ice_flg64_bits {
ICE_FLG_PKT_DSI = 0,
ICE_FLG_EVLAN_x8100 = 15,
@ -322,7 +322,7 @@ enum ice_rlan_ctx_rx_hsplit_1 {
ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
};
/* TX Descriptor */
/* Tx Descriptor */
struct ice_tx_desc {
__le64 buf_addr; /* Address of descriptor's data buf */
__le64 cmd_type_offset_bsz;

View File

@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
* ice_setup_rx_ctx - Configure a receive ring context
@ -73,7 +74,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
/* increasing context priority to pick up profile id;
/* increasing context priority to pick up profile ID;
* default is 0x01; setting to 0x03 to ensure profile
* is programming if prev context is of same priority
*/
@ -124,6 +125,8 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
ice_set_cgd_num(tlan_ctx, ring);
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
@ -138,7 +141,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF id */
/* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
@ -297,7 +300,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
* @vf_id: Id of the VF being configured
* @vf_id: ID of the VF being configured
*
* Return 0 on success and a negative value on error
*/
@ -479,7 +482,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @type: type of VSI
* @vf_id: Id of the VF being configured
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
@ -1301,7 +1304,11 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
* through the MSI-X enabling code. On a constrained vector budget, we map Tx
* and Rx rings to the vector as "efficiently" as possible.
*/
#ifdef CONFIG_DCB
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
#else
static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
#endif /* CONFIG_DCB */
{
int q_vectors = vsi->num_q_vectors;
int tx_rings_rem, rx_rings_rem;
@ -1445,12 +1452,12 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
}
/**
* ice_add_mac_to_list - Add a mac address filter entry to the list
* ice_add_mac_to_list - Add a MAC address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
* @macaddr: the MAC address to be added.
*
* Adds mac address filter entry to the temp list
* Adds MAC address filter entry to the temp list
*
* Returns 0 on success or ENOMEM on failure.
*/
@ -1552,7 +1559,7 @@ void ice_free_fltr_list(struct device *dev, struct list_head *h)
/**
* ice_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
* @vid: VLAN id to be added
* @vid: VLAN ID to be added
*/
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
{
@ -1590,7 +1597,7 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
/**
* ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
* @vsi: the VSI being configured
* @vid: VLAN id to be removed
* @vid: VLAN ID to be removed
*
* Returns 0 on success and negative on failure
*/
@ -2016,7 +2023,7 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
* @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
* @offset: offset within vsi->txq_map
*/
@ -2102,7 +2109,7 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
* @rel_vmvf_num: Relative ID of VF/VM
*/
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
@ -2172,12 +2179,20 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
return -EIO;
}
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
}
/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
* @type: VSI type
* @vf_id: defines VF id to which this VSI connects. This field is meant to be
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
*
@ -2219,7 +2234,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
/* set tc configuration */
/* set TC configuration */
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
@ -2815,3 +2830,125 @@ bool ice_is_reset_in_progress(unsigned long *state)
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
}
#ifdef CONFIG_DCB
/**
* ice_vsi_update_q_map - update our copy of the VSI info with new queue map
* @vsi: VSI being configured
* @ctx: the context buffer returned from AQ VSI update command
*/
static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
{
vsi->info.mapping_flags = ctx->info.mapping_flags;
memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
sizeof(vsi->info.q_mapping));
memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
sizeof(vsi->info.tc_mapping));
}
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
* @vsi: the VSI being configured
* @ena_tc: TC map to be enabled
*/
static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
{
struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = vsi->back;
struct ice_dcbx_cfg *dcbcfg;
u8 netdev_tc;
int i;
if (!netdev)
return;
if (!ena_tc) {
netdev_reset_tc(netdev);
return;
}
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
netdev_set_tc_queue(netdev,
vsi->tc_cfg.tc_info[i].netdev_tc,
vsi->tc_cfg.tc_info[i].qcount_tx,
vsi->tc_cfg.tc_info[i].qoffset);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
u8 ets_tc = dcbcfg->etscfg.prio_table[i];
/* Get the mapped netdev TC# for the UP */
netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
netdev_set_prio_tc_map(netdev, i, netdev_tc);
}
}
/**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured
* @ena_tc: TC bitmap
*
* VSI queues expected to be quiesced before calling this function
*/
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back;
enum ice_status status;
int i, ret = 0;
u8 num_tc = 0;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
if (ena_tc & BIT(i))
num_tc++;
/* populate max_txqs per TC */
max_txqs[i] = pf->num_lan_tx;
}
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->vf_num = 0;
ctx->info = vsi->info;
ice_vsi_setup_q_map(vsi, ctx);
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) {
dev_info(&pf->pdev->dev, "Failed VSI Update\n");
ret = -EIO;
goto out;
}
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
dev_err(&pf->pdev->dev,
"VSI %d failed TC config, error %d\n",
vsi->vsi_num, status);
ret = -EIO;
goto out;
}
ice_vsi_update_q_map(vsi, ctx);
vsi->info.valid_sections = 0;
ice_vsi_cfg_netdev_tc(vsi, ena_tc);
out:
devm_kfree(&pf->pdev->dev, ctx);
return ret;
}
#endif /* CONFIG_DCB */

View File

@ -41,6 +41,10 @@ void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
#endif /* CONFIG_DCB */
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id);
@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
void ice_vsi_put_qs(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
#endif /* CONFIG_DCB */
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);

View File

@ -7,8 +7,9 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#define DRV_VERSION "0.7.2-k"
#define DRV_VERSION "0.7.4-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@ -30,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_ops;
static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf);
static void ice_vsi_release_all(struct ice_pf *pf);
@ -113,14 +113,14 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
}
/**
* ice_add_mac_to_sync_list - creates list of mac addresses to be synced
* ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
* @netdev: the net device on which the sync is happening
* @addr: mac address to sync
* @addr: MAC address to sync
*
* This is a callback function which is called by the in kernel device sync
* functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
* populates the tmp_sync_list, which is later used by ice_add_mac to add the
* mac filters from the hardware.
* MAC filters from the hardware.
*/
static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
{
@ -134,14 +134,14 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
}
/**
* ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
* ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
* @netdev: the net device on which the unsync is happening
* @addr: mac address to unsync
* @addr: MAC address to unsync
*
* This is a callback function which is called by the in kernel device unsync
* functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
* populates the tmp_unsync_list, which is later used by ice_remove_mac to
* delete the mac filters from the hardware.
* delete the MAC filters from the hardware.
*/
static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
{
@ -245,7 +245,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
netif_addr_unlock_bh(netdev);
}
/* Remove mac addresses in the unsync list */
/* Remove MAC addresses in the unsync list */
status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
if (status) {
@ -257,7 +257,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
/* Add mac addresses in the sync list */
/* Add MAC addresses in the sync list */
status = ice_add_mac(hw, &vsi->tmp_sync_list);
ice_free_fltr_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
@ -266,7 +266,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
*/
if (status && status != ICE_ERR_ALREADY_EXISTS) {
netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, vsi
/* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
@ -317,7 +317,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply TX filter rule to get traffic from VMs */
/* Apply Tx filter rule to get traffic from VMs */
status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_TX);
if (status) {
@ -327,7 +327,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
err = -EIO;
goto out_promisc;
}
/* Apply RX filter rule to get traffic from wire */
/* Apply Rx filter rule to get traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX);
if (status) {
@ -338,7 +338,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
} else {
/* Clear TX filter rule to stop traffic from VMs */
/* Clear Tx filter rule to stop traffic from VMs */
status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_TX);
if (status) {
@ -348,7 +348,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
err = -EIO;
goto out_promisc;
}
/* Clear RX filter to remove traffic from wire */
/* Clear Rx filter to remove traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
@ -396,6 +396,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
}
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
* @locked: is the rtnl_lock already held
*/
static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
if (!locked) {
rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
rtnl_unlock();
} else {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else {
ice_vsi_close(vsi);
}
}
}
/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
* @locked: is the rtnl_lock already held
*/
#ifdef CONFIG_DCB
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
#else
static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
#endif /* CONFIG_DCB */
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_dis_vsi(pf->vsi[v], locked);
}
/**
* ice_prepare_for_reset - prep for the core to reset
* @pf: board private structure
@ -416,7 +461,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_vc_notify_reset(pf);
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
ice_pf_dis_all_vsi(pf, false);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@ -504,7 +549,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
pf->hw.reset_ongoing = false;
ice_rebuild(pf);
/* clear bit to resume normal operations, but
* ICE_NEEDS_RESTART bit is set incase rebuild failed
* ICE_NEEDS_RESTART bit is set in case rebuild failed
*/
clear_bit(__ICE_RESET_OICR_RECV, pf->state);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
@ -608,9 +653,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
/**
* ice_vsi_link_event - update the vsi's netdev
* @vsi: the vsi on which the link event occurred
* @link_up: whether or not the vsi needs to be set up or down
* ice_vsi_link_event - update the VSI's netdev
* @vsi: the VSI on which the link event occurred
* @link_up: whether or not the VSI needs to be set up or down
*/
static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
{
@ -891,6 +936,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
default:
dev_dbg(&pf->pdev->dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
@ -1236,7 +1284,7 @@ static void ice_service_task(struct work_struct *work)
/**
* ice_set_ctrlq_len - helper function to set controlq length
* @hw: pointer to the hw instance
* @hw: pointer to the HW instance
*/
static void ice_set_ctrlq_len(struct ice_hw *hw)
{
@ -1796,12 +1844,12 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
* ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
* @vid: vlan id to be added
* @vid: VLAN ID to be added
*
* net_device_ops implementation for adding vlan ids
* net_device_ops implementation for adding VLAN IDs
*/
static int
ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
@ -1827,7 +1875,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
return ret;
}
/* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
/* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
@ -1841,12 +1889,12 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
}
/**
* ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
* ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
* @vid: vlan id to be removed
* @vid: VLAN ID to be removed
*
* net_device_ops implementation for removing vlan ids
* net_device_ops implementation for removing VLAN IDs
*/
static int
ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
@ -2285,6 +2333,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_init_pf(pf);
err = ice_init_pf_dcb(pf);
if (err) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
/* do not fail overall init if DCB init fails */
err = 0;
}
ice_determine_q_usage(pf);
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
@ -2622,7 +2679,7 @@ static void __exit ice_module_exit(void)
module_exit(ice_module_exit);
/**
* ice_set_mac_address - NDO callback to set mac address
* ice_set_mac_address - NDO callback to set MAC address
* @netdev: network interface device structure
* @pi: pointer to an address structure
*
@ -2659,14 +2716,14 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
/* When we change the mac address we also have to change the mac address
* based filter rules that were created previously for the old mac
/* When we change the MAC address we also have to change the MAC address
* based filter rules that were created previously for the old MAC
* address. So first, we remove the old filter rule using ice_remove_mac
* and then create a new filter rule using ice_add_mac. Note that for
* both these operations, we first need to form a "list" of mac
* addresses (even though in this case, we have only 1 mac address to be
* both these operations, we first need to form a "list" of MAC
* addresses (even though in this case, we have only 1 MAC address to be
* added/removed) and this done using ice_add_mac_to_list. Depending on
* the ensuing operation this "list" of mac addresses is either to be
* the ensuing operation this "list" of MAC addresses is either to be
* added or removed from the filter.
*/
err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
@ -2704,12 +2761,12 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return err;
}
/* change the netdev's mac address */
/* change the netdev's MAC address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
netdev->dev_addr);
/* write new mac address to the firmware */
/* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
@ -2751,7 +2808,7 @@ static void ice_set_rx_mode(struct net_device *netdev)
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN id
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
* @extack: netlink extended ack
*/
@ -2791,7 +2848,7 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN id
* @vid: VLAN ID
*/
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
@ -2850,8 +2907,8 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
}
/**
* ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
* @vsi: VSI to setup vlan properties for
* ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
* @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
@ -2883,6 +2940,7 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
ice_vsi_cfg_dcb_rings(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
if (!err)
@ -3193,6 +3251,8 @@ static void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
&prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
ice_update_dcb_stats(pf);
ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
&prev_ps->crc_errors, &cur_ps->crc_errors);
@ -3570,47 +3630,31 @@ static void ice_vsi_release_all(struct ice_pf *pf)
}
}
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
* @locked: is the rtnl_lock already held
*/
static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
if (!locked) {
rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
rtnl_unlock();
} else {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else {
ice_vsi_close(vsi);
}
}
}
/**
* ice_ena_vsi - resume a VSI
* @vsi: the VSI being resume
* @locked: is the rtnl_lock already held
*/
static int ice_ena_vsi(struct ice_vsi *vsi)
static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
int err = 0;
if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
vsi->netdev) {
if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
return err;
clear_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
struct net_device *netd = vsi->netdev;
if (netif_running(vsi->netdev)) {
rtnl_lock();
err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
rtnl_unlock();
if (locked) {
err = netd->netdev_ops->ndo_open(netd);
} else {
rtnl_lock();
err = netd->netdev_ops->ndo_open(netd);
rtnl_unlock();
}
} else {
err = ice_vsi_open(vsi);
}
@ -3619,30 +3663,22 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
return err;
}
/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
*/
static void ice_pf_dis_all_vsi(struct ice_pf *pf)
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_dis_vsi(pf->vsi[v], false);
}
/**
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
* @locked: is the rtnl_lock already held
*/
static int ice_pf_ena_all_vsi(struct ice_pf *pf)
#ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
#else
static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
#endif /* CONFIG_DCB */
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
if (ice_ena_vsi(pf->vsi[v]))
if (ice_ena_vsi(pf->vsi[v], locked))
return -EIO;
return 0;
@ -3757,6 +3793,8 @@ static void ice_rebuild(struct ice_pf *pf)
if (err)
goto err_sched_init_port;
ice_dcb_rebuild(pf);
/* reset search_hint of irq_trackers to 0 since interrupts are
* reclaimed and could be allocated from beginning during VSI rebuild
*/
@ -3790,7 +3828,7 @@ static void ice_rebuild(struct ice_pf *pf)
}
/* restart the VSIs that were rebuilt and running before the reset */
err = ice_pf_ena_all_vsi(pf);
err = ice_pf_ena_all_vsi(pf, false);
if (err) {
dev_err(&pf->pdev->dev, "error enabling VSIs\n");
/* no need to disable VSIs in tear down path in ice_rebuild()
@ -3986,7 +4024,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
* @pid: process id
* @pid: process ID
* @seq: RTNL message seq
* @dev: the netdev being configured
* @filter_mask: filter mask passed in

View File

@ -5,7 +5,7 @@
/**
* ice_aq_read_nvm
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @module_typeid: module pointer location in words from the NVM beginning
* @offset: byte offset from the module beginning
* @length: length of the section to be read (in bytes from the offset)
@ -235,7 +235,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
@ -248,7 +248,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
u32 fla, gens_stat;
u8 sr_size;
/* The SR size is stored regardless of the nvm programming mode
/* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);

View File

@ -43,9 +43,9 @@ ice_sched_add_root_node(struct ice_port_info *pi,
/**
* ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
* @start_node: pointer to the starting ice_sched_node struct in a sub-tree
* @teid: node teid to search
* @teid: node TEID to search
*
* This function searches for a node matching the teid in the scheduling tree
* This function searches for a node matching the TEID in the scheduling tree
* from the SW DB. The search is recursive and is restricted by the number of
* layers it has searched through; stopping at the max supported layer.
*
@ -66,7 +66,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
return NULL;
/* Check if teid matches to any of the children nodes */
/* Check if TEID matches to any of the children nodes */
for (i = 0; i < start_node->num_children; i++)
if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
return start_node->children[i];
@ -86,7 +86,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
* @elems_req: number of elements to request
* @buf: pointer to buffer
@ -118,7 +118,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
/**
* ice_aq_query_sched_elems - query scheduler elements
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @elems_req: number of elements to query
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
static enum ice_status
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@ -137,31 +137,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
elems_ret, cd);
}
/**
* ice_sched_query_elem - query element information from hw
* @hw: pointer to the hw struct
* @node_teid: node teid to be queried
* @buf: buffer to element information
*
* This function queries HW element information
*/
static enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf)
{
u16 buf_size, num_elem_ret = 0;
enum ice_status status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
buf->generic[0].node_teid = cpu_to_le32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}
/**
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
* @pi: port information structure
@ -226,7 +201,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
/**
* ice_aq_delete_sched_elems - delete scheduler elements
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @grps_req: number of groups to delete
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@ -246,13 +221,13 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
}
/**
* ice_sched_remove_elems - remove nodes from hw
* @hw: pointer to the hw struct
* ice_sched_remove_elems - remove nodes from HW
* @hw: pointer to the HW struct
* @parent: pointer to the parent node
* @num_nodes: number of nodes
* @node_teids: array of node teids to be deleted
*
* This function remove nodes from hw
* This function remove nodes from HW
*/
static enum ice_status
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
@ -285,7 +260,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
/**
* ice_sched_get_first_node - get the first node of the given layer
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @parent: pointer the base node of the subtree
* @layer: layer number
*
@ -406,7 +381,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
/**
* ice_aq_get_dflt_topo - gets default scheduler topology
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @lport: logical port number
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@ -436,7 +411,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
/**
* ice_aq_add_sched_elems - adds scheduling element
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @grps_req: the number of groups that are requested to be added
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@ -457,7 +432,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@ -478,7 +453,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
/**
* ice_aq_resume_sched_elems - resume scheduler elements
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @elems_req: number of elements to resume
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@ -499,7 +474,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
/**
* ice_aq_query_sched_res - query scheduler resource
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @buf_size: buffer size in bytes
* @buf: pointer to buffer
* @cd: pointer to command details structure or NULL
@ -518,13 +493,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
}
/**
* ice_sched_suspend_resume_elems - suspend or resume hw nodes
* @hw: pointer to the hw struct
* ice_sched_suspend_resume_elems - suspend or resume HW nodes
* @hw: pointer to the HW struct
* @num_nodes: number of nodes
* @node_teids: array of node teids to be suspended or resumed
* @suspend: true means suspend / false means resume
*
* This function suspends or resumes hw nodes
* This function suspends or resumes HW nodes
*/
static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
@ -558,10 +533,10 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
}
/**
* ice_sched_clear_agg - clears the agg related information
* ice_sched_clear_agg - clears the aggregator related information
* @hw: pointer to the hardware structure
*
* This function removes agg list and free up agg related memory
* This function removes aggregator list and free up aggregator related memory
* previously allocated.
*/
void ice_sched_clear_agg(struct ice_hw *hw)
@ -619,7 +594,7 @@ void ice_sched_clear_port(struct ice_port_info *pi)
/**
* ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Cleanup scheduling elements from SW DB for all the ports
*/
@ -643,16 +618,16 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
}
/**
* ice_sched_add_elems - add nodes to hw and SW DB
* ice_sched_add_elems - add nodes to HW and SW DB
* @pi: port information structure
* @tc_node: pointer to the branch node
* @parent: pointer to the parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes
* @num_nodes_added: pointer to num nodes added
* @first_node_teid: if new nodes are added then return the teid of first node
* @first_node_teid: if new nodes are added then return the TEID of first node
*
* This function add nodes to hw as well as to SW DB for a given layer
* This function add nodes to HW as well as to SW DB for a given layer
*/
static enum ice_status
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
@ -746,7 +721,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* @parent: pointer to parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes to be added
* @first_node_teid: pointer to the first node teid
* @first_node_teid: pointer to the first node TEID
* @num_nodes_added: pointer to number of nodes added
*
* This function add nodes to a given layer.
@ -798,7 +773,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
*num_nodes_added += num_added;
}
/* Don't modify the first node teid memory if the first node was
/* Don't modify the first node TEID memory if the first node was
* added already in the above call. Instead send some temp
* memory for all other recursive calls.
*/
@ -830,7 +805,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
/**
* ice_sched_get_qgrp_layer - get the current queue group layer number
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* This function returns the current queue group layer number
*/
@ -842,7 +817,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
/**
* ice_sched_get_vsi_layer - get the current VSI layer number
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* This function returns the current VSI layer number
*/
@ -853,7 +828,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 7 4
* 5 or less sw_entry_point_layer
*/
/* calculate the vsi layer based on number of layers. */
/* calculate the VSI layer based on number of layers. */
if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
@ -971,7 +946,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
goto err_init_port;
}
/* If the last node is a leaf node then the index of the Q group
/* If the last node is a leaf node then the index of the queue group
* layer is two less than the number of elements.
*/
if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
@ -1080,7 +1055,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @base: pointer to the base node
* @node: pointer to the node to search
*
@ -1112,13 +1087,13 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
}
/**
* ice_sched_get_free_qparent - Get a free lan or rdma q group node
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: branch number
* @owner: lan or rdma
* @owner: LAN or RDMA
*
* This function retrieves a free lan or rdma q group node
* This function retrieves a free LAN or RDMA queue group node
*/
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
@ -1136,11 +1111,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_ctx)
return NULL;
vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI id */
/* validate invalid VSI ID */
if (!vsi_node)
goto lan_q_exit;
/* get the first q group node from VSI sub-tree */
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
@ -1156,12 +1131,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
}
/**
* ice_sched_get_vsi_node - Get a VSI node based on VSI id
* @hw: pointer to the hw struct
* ice_sched_get_vsi_node - Get a VSI node based on VSI ID
* @hw: pointer to the HW struct
* @tc_node: pointer to the TC node
* @vsi_handle: software VSI handle
*
* This function retrieves a VSI node for a given VSI id from a given
* This function retrieves a VSI node for a given VSI ID from a given
* TC branch
*/
static struct ice_sched_node *
@ -1186,7 +1161,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @num_qs: number of queues
* @num_nodes: num nodes array
*
@ -1202,7 +1177,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
/* calculate num nodes from q group to VSI layer */
/* calculate num nodes from queue group to VSI layer */
for (i = qgl; i > vsil; i--) {
/* round to the next integer if there is a remainder */
num = DIV_ROUND_UP(num, hw->max_children[i]);
@ -1218,10 +1193,10 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* @vsi_handle: software VSI handle
* @tc_node: pointer to the TC node
* @num_nodes: pointer to the num nodes that needs to be added per layer
* @owner: node owner (lan or rdma)
* @owner: node owner (LAN or RDMA)
*
* This function adds the VSI child nodes to tree. It gets called for
* lan and rdma separately.
* LAN and RDMA separately.
*/
static enum ice_status
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
@ -1270,7 +1245,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
/**
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@ -1389,7 +1364,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
/* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
/* add vsi supported nodes to tc subtree */
/* add VSI supported nodes to TC subtree */
return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
num_nodes);
}
@ -1460,7 +1435,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* @vsi_handle: software VSI handle
* @tc: TC number
* @maxqs: max number of queues
* @owner: lan or rdma
* @owner: LAN or RDMA
* @enable: TC enabled or disabled
*
* This function adds/updates VSI nodes based on the number of queues. If TC is
@ -1485,7 +1460,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return ICE_ERR_PARAM;
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
/* suspend the VSI if tc is not enabled */
/* suspend the VSI if TC is not enabled */
if (!enable) {
if (vsi_node && vsi_node->in_use) {
u32 teid = le32_to_cpu(vsi_node->info.node_teid);
@ -1536,7 +1511,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
}
/**
* ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
* ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
* @pi: port information structure
* @vsi_handle: software VSI handle
*
@ -1641,7 +1616,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_free_sched_node(pi, vsi_node);
vsi_ctx->sched.vsi_node[i] = NULL;
/* clean up agg related vsi info if any */
/* clean up aggregator related VSI info if any */
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)

View File

@ -24,6 +24,10 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);

View File

@ -12,6 +12,7 @@ enum ice_status {
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
ICE_ERR_NOT_SUPPORTED = -4,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,

View File

@ -19,7 +19,7 @@
* byte 6 = 0x2: to identify it as locally administered SA MAC
* byte 12 = 0x81 & byte 13 = 0x00:
* In case of VLAN filter first two bytes defines ether type (0x8100)
* and remaining two bytes are placeholder for programming a given VLAN id
* and remaining two bytes are placeholder for programming a given VLAN ID
* In case of Ether type filter it is treated as header without VLAN tag
* and byte 12 and 13 is used to program a given Ether type instead
*/
@ -51,7 +51,7 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
/**
* ice_aq_alloc_free_res - command to allocate/free resources
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
@ -87,7 +87,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
@ -163,7 +163,7 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
/**
* ice_aq_add_vsi
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
*
@ -206,7 +206,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_free_vsi
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
* @cd: pointer to command details structure or NULL
@ -242,7 +242,7 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_update_vsi
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
*
@ -279,7 +279,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_is_vsi_valid - check whether the VSI is valid or not
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* check whether the VSI is valid or not
@ -290,11 +290,11 @@ bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
}
/**
* ice_get_hw_vsi_num - return the hw VSI number
* @hw: pointer to the hw struct
* ice_get_hw_vsi_num - return the HW VSI number
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* return the hw VSI number
* return the HW VSI number
* Caution: call this function only if VSI is valid (ice_is_vsi_valid)
*/
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
@ -304,7 +304,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* return the VSI context entry for a given VSI handle
@ -316,7 +316,7 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_save_vsi_ctx - save the VSI context for a given VSI handle
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
* @vsi: VSI context pointer
*
@ -330,7 +330,7 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
/**
* ice_clear_vsi_ctx - clear the VSI context entry
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* clear the VSI context entry
@ -348,7 +348,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_clear_all_vsi_ctx - clear all the VSI context entries
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*/
void ice_clear_all_vsi_ctx(struct ice_hw *hw)
{
@ -360,7 +360,7 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
/**
* ice_add_vsi - add VSI context to the hardware and VSI handle list
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle provided by drivers
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
@ -383,7 +383,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
return status;
tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!tmp_vsi_ctx) {
/* Create a new vsi context */
/* Create a new VSI context */
tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) {
@ -403,7 +403,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_free_vsi- free VSI context from hardware and VSI handle list
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle
* @vsi_ctx: pointer to a VSI context struct
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
@ -428,7 +428,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_update_vsi
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
@ -447,8 +447,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_alloc_free_vsi_list
* @hw: pointer to the hw struct
* @vsi_list_id: VSI list id returned or used for lookup
* @hw: pointer to the HW struct
* @vsi_list_id: VSI list ID returned or used for lookup
* @lkup_type: switch rule filter lookup type
* @opc: switch rules population command type - pass in the command opcode
*
@ -504,7 +504,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
/**
* ice_aq_sw_rules - add/update/remove switch rules
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @rule_list: pointer to switch rule population list
* @rule_list_sz: total size of the rule list in bytes
* @num_rules: number of switch rules in the rule_list
@ -653,7 +653,7 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
* 1. The switch is a VEB AND
* 2
* 2.1 The lookup is a directional lookup like ethertype,
* promiscuous, ethertype-mac, promiscuous-vlan
* promiscuous, ethertype-MAC, promiscuous-VLAN
* and default-port OR
* 2.2 The lookup is VLAN, OR
* 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
@ -821,7 +821,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* @hw: pointer to the hardware structure
* @m_ent: the management entry for which sw marker needs to be added
* @sw_marker: sw marker to tag the Rx descriptor with
* @l_id: large action resource id
* @l_id: large action resource ID
*
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
@ -833,8 +833,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
/* For software marker we need 3 large actions
* 1. FWD action: FWD TO VSI or VSI LIST
* 2. GENERIC VALUE action to hold the profile id
* 3. GENERIC VALUE action to hold the software marker id
* 2. GENERIC VALUE action to hold the profile ID
* 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
enum ice_status status;
@ -897,13 +897,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
/* Update the action to point to the large action id */
/* Update the action to point to the large action ID */
rx_tx->pdata.lkup_tx_rx.act =
cpu_to_le32(ICE_SINGLE_ACT_PTR |
((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
ICE_SINGLE_ACT_PTR_VAL_M));
/* Use the filter rule id of the previously created rule with single
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
@ -926,10 +926,10 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* @hw: pointer to the hardware structure
* @vsi_handle_arr: array of VSI handles to set in the VSI mapping
* @num_vsi: number of VSI handles in the array
* @vsi_list_id: VSI list id generated as part of allocate resource
* @vsi_list_id: VSI list ID generated as part of allocate resource
*
* Helper function to create a new entry of VSI list id to VSI mapping
* using the given VSI list id
* Helper function to create a new entry of VSI list ID to VSI mapping
* using the given VSI list ID
*/
static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
@ -957,13 +957,13 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* @hw: pointer to the hardware structure
* @vsi_handle_arr: array of VSI handles to form a VSI list
* @num_vsi: number of VSI handles in the array
* @vsi_list_id: VSI list id generated as part of allocate resource
* @vsi_list_id: VSI list ID generated as part of allocate resource
* @remove: Boolean value to indicate if this is a remove action
* @opc: switch rules population command type - pass in the command opcode
* @lkup_type: lookup type of the filter
*
* Call AQ command to add a new switch rule or update existing switch rule
* using the given VSI list id
* using the given VSI list ID
*/
static enum ice_status
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
@ -1020,7 +1020,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
/**
* ice_create_vsi_list_rule - Creates and populates a VSI list rule
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
* @vsi_handle_arr: array of VSI handles to form a VSI list
* @num_vsi: number of VSI handles in the array
* @vsi_list_id: stores the ID of the VSI list to be created
@ -1114,7 +1114,7 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
* @f_info: filter information for switch rule
*
* Call AQ command to update a previously created switch rule with a
* VSI list id
* VSI list ID
*/
static enum ice_status
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
@ -1141,7 +1141,7 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
/**
* ice_update_sw_rule_bridge_mode
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
@ -1196,7 +1196,7 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
* Update the previously created switch rule with the
* newly created VSI list id
* newly created VSI list ID
* if a VSI list was previously created
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
@ -1277,7 +1277,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
return 0;
/* Update the previously created VSI list set with
* the new VSI id passed in
* the new VSI ID passed in
*/
vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
opcode = ice_aqc_opc_update_sw_rules;
@ -1285,7 +1285,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
vsi_list_id, false, opcode,
new_fltr->lkup_type);
/* update VSI list mapping info with new VSI id */
/* update VSI list mapping info with new VSI ID */
if (!status)
set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
}
@ -1327,7 +1327,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
* @hw: pointer to the hardware structure
* @recp_id: lookup type for which VSI lists needs to be searched
* @vsi_handle: VSI handle to be found in VSI list
* @vsi_list_id: VSI list id found containing vsi_handle
* @vsi_list_id: VSI list ID found containing vsi_handle
*
* Helper function to search a VSI list with single entry containing given VSI
* handle element. This can be extended further to search VSI list with more
@ -1358,7 +1358,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
/**
* ice_add_rule_internal - add rule for a given lookup type
* @hw: pointer to the hardware structure
* @recp_id: lookup type (recipe id) for which rule has to be added
* @recp_id: lookup type (recipe ID) for which rule has to be added
* @f_entry: structure containing MAC forwarding information
*
* Adds or updates the rule lists for a given recipe
@ -1403,7 +1403,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
/**
* ice_remove_vsi_list_rule
* @hw: pointer to the hardware structure
* @vsi_list_id: VSI list id generated as part of allocate resource
* @vsi_list_id: VSI list ID generated as part of allocate resource
* @lkup_type: switch rule filter lookup type
*
* The VSI list should be emptied before this function is called to remove the
@ -1528,7 +1528,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_remove_rule_internal - Remove a filter rule of a given type
* @hw: pointer to the hardware structure
* @recp_id: recipe id for which the rule needs to removed
* @recp_id: recipe ID for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
static enum ice_status
@ -1578,7 +1578,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
if (status)
goto exit;
/* if vsi count goes to zero after updating the vsi list */
/* if VSI count goes to zero after updating the VSI list */
if (list_elem->vsi_count == 0)
remove_rule = true;
}
@ -1656,7 +1656,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is vsi num */
/* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
m_list_itr->fltr_info.src = hw_vsi_id;
@ -1732,7 +1732,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
((u8 *)r_iter + (elem_sent * s_rule_size));
}
/* Fill up rule id based on the value returned from FW */
/* Fill up rule ID based on the value returned from FW */
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
@ -1792,7 +1792,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
new_fltr = &f_entry->fltr_info;
/* VLAN id should only be 12 bits */
/* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM;
@ -1850,7 +1850,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
}
}
} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
/* Update existing VSI list to add new VSI id only if it used
/* Update existing VSI list to add new VSI ID only if it used
* by one VLAN rule.
*/
cur_fltr = &v_list_itr->fltr_info;
@ -1860,7 +1860,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* If VLAN rule exists and VSI list being used by this rule is
* referenced by more than 1 VLAN rule. Then create a new VSI
* list appending previous VSI with new VSI and update existing
* VLAN rule to point to new VSI list id
* VLAN rule to point to new VSI list ID
*/
struct ice_fltr_info tmp_fltr;
u16 vsi_handle_arr[2];
@ -2192,7 +2192,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_entry;
enum ice_status status = 0;
/* check to make sure VSI id is valid and within boundary */
/* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@ -2247,7 +2247,7 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
/**
* ice_remove_promisc - Remove promisc based filter rules
* @hw: pointer to the hardware structure
* @recp_id: recipe id for which the rule needs to removed
* @recp_id: recipe ID for which the rule needs to removed
* @v_list: list of promisc entries
*/
static enum ice_status
@ -2572,7 +2572,7 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
* @recp_id: Recipe id for which rules need to be replayed
* @recp_id: Recipe ID for which rules need to be replayed
* @list_head: list for which filters need to be replayed
*
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
@ -2596,7 +2596,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
f_entry.fltr_info = itr->fltr_info;
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
itr->fltr_info.vsi_handle == vsi_handle) {
/* update the src in case it is vsi num */
/* update the src in case it is VSI num */
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
status = ice_add_rule_internal(hw, recp_id, &f_entry);
@ -2611,7 +2611,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
f_entry.fltr_info.vsi_handle = vsi_handle;
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
/* update the src in case it is vsi num */
/* update the src in case it is VSI num */
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
if (recp_id == ICE_SW_LKUP_VLAN)
@ -2651,7 +2651,7 @@ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_rm_all_sw_replay_rule_info - deletes filter replay rules
* @hw: pointer to the hw struct
* @hw: pointer to the HW struct
*
* Deletes the filter replay rules.
*/

View File

@ -44,7 +44,7 @@ enum ice_sw_lkup_type {
ICE_SW_LKUP_LAST
};
/* type of filter src id */
/* type of filter src ID */
enum ice_src_id {
ICE_SRC_ID_UNKNOWN = 0,
ICE_SRC_ID_VSI,
@ -95,8 +95,8 @@ struct ice_fltr_info {
/* Depending on filter action */
union {
/* queue id in case of ICE_FWD_TO_Q and starting
* queue id in case of ICE_FWD_TO_QGRP.
/* queue ID in case of ICE_FWD_TO_Q and starting
* queue ID in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
u16 hw_vsi_id:10;
@ -143,7 +143,7 @@ struct ice_sw_recipe {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
struct ice_vsi_list_map_info {
struct list_head list_entry;
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
@ -165,7 +165,7 @@ struct ice_fltr_list_entry {
* used for VLAN membership.
*/
struct ice_fltr_mgmt_list_entry {
/* back pointer to VSI list id to VSI list mapping */
/* back pointer to VSI list ID to VSI list mapping */
struct ice_vsi_list_map_info *vsi_list_info;
u16 vsi_count;
#define ICE_INVAL_LG_ACT_INDEX 0xffff

View File

@ -6,6 +6,7 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
#include "ice.h"
#include "ice_dcb_lib.h"
#define ICE_RX_HDR_SIZE 256
@ -456,7 +457,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (!rx_ring->netdev || !cleaned_count)
return false;
/* get the RX descriptor and buffer based on next_to_use */
/* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_buf[ntu];
@ -959,10 +960,10 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: Rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
* @vlan_tag: VLAN tag for packet
*
* This function sends the completed packet (via. skb) up the stack using
* gro receive functions (with/without vlan tag)
* gro receive functions (with/without VLAN tag)
*/
static void
ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
@ -991,7 +992,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
bool failure = false;
/* start the loop to process RX packets bounded by 'budget' */
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
@ -1008,7 +1009,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
cleaned_count = 0;
}
/* get the RX desc from RX ring based on 'next_to_clean' */
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
/* status_error_len will always be zero for unused descriptors
@ -1096,19 +1097,69 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
return failure ? budget : (int)total_rx_pkts;
}
static unsigned int ice_itr_divisor(struct ice_port_info *pi)
/**
* ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
* @port_info: port_info structure containing the current link speed
* @avg_pkt_size: average size of Tx or Rx packets based on clean routine
* @itr: itr value to update
*
* Calculate how big of an increment should be applied to the ITR value passed
* in based on wmem_default, SKB overhead, Ethernet overhead, and the current
* link speed.
*
* The following is a calculation derived from:
* wmem_default / (size + overhead) = desired_pkts_per_int
* rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
* Assuming wmem_default is 212992 and overhead is 640 bytes per
* packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
* formula down to:
*
* wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
* ITR = -------------------------------------------- * --------------
* rate pkt_size + 640
*/
static unsigned int
ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
unsigned int avg_pkt_size,
unsigned int itr)
{
switch (pi->phy.link_info.link_speed) {
switch (port_info->phy.link_info.link_speed) {
case ICE_AQ_LINK_SPEED_100GB:
itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_50GB:
itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_40GB:
return ICE_ITR_ADAPTIVE_MIN_INC * 1024;
itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_25GB:
itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_20GB:
return ICE_ITR_ADAPTIVE_MIN_INC * 512;
case ICE_AQ_LINK_SPEED_100MB:
return ICE_ITR_ADAPTIVE_MIN_INC * 32;
itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_10GB:
/* fall through */
default:
return ICE_ITR_ADAPTIVE_MIN_INC * 256;
itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
avg_pkt_size + 640);
break;
}
if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
itr &= ICE_ITR_ADAPTIVE_LATENCY;
itr += ICE_ITR_ADAPTIVE_MAX_USECS;
}
return itr;
}
/**
@ -1127,8 +1178,8 @@ static unsigned int ice_itr_divisor(struct ice_port_info *pi)
static void
ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
{
unsigned int avg_wire_size, packets, bytes, itr;
unsigned long next_update = jiffies;
unsigned int packets, bytes, itr;
bool container_is_rx;
if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
@ -1173,7 +1224,7 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
if (packets && packets < 4 && bytes < 9000 &&
(q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
itr = ICE_ITR_ADAPTIVE_LATENCY;
goto adjust_by_size;
goto adjust_by_size_and_speed;
}
} else if (packets < 4) {
/* If we have Tx and Rx ITR maxed and Tx ITR is running in
@ -1241,70 +1292,11 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
*/
itr = ICE_ITR_ADAPTIVE_BULK;
adjust_by_size:
/* If packet counts are 256 or greater we can assume we have a gross
* overestimation of what the rate should be. Instead of trying to fine
* tune it just use the formula below to try and dial in an exact value
* gives the current packet size of the frame.
*/
avg_wire_size = bytes / packets;
adjust_by_size_and_speed:
/* The following is a crude approximation of:
* wmem_default / (size + overhead) = desired_pkts_per_int
* rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
* Assuming wmem_default is 212992 and overhead is 640 bytes per
* packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
* formula down to
*
* (170 * (size + 24)) / (size + 640) = ITR
*
* We first do some math on the packet size and then finally bitshift
* by 8 after rounding up. We also have to account for PCIe link speed
* difference as ITR scales based on this.
*/
if (avg_wire_size <= 60) {
/* Start at 250k ints/sec */
avg_wire_size = 4096;
} else if (avg_wire_size <= 380) {
/* 250K ints/sec to 60K ints/sec */
avg_wire_size *= 40;
avg_wire_size += 1696;
} else if (avg_wire_size <= 1084) {
/* 60K ints/sec to 36K ints/sec */
avg_wire_size *= 15;
avg_wire_size += 11452;
} else if (avg_wire_size <= 1980) {
/* 36K ints/sec to 30K ints/sec */
avg_wire_size *= 5;
avg_wire_size += 22420;
} else {
/* plateau at a limit of 30K ints/sec */
avg_wire_size = 32256;
}
/* If we are in low latency mode halve our delay which doubles the
* rate to somewhere between 100K to 16K ints/sec
*/
if (itr & ICE_ITR_ADAPTIVE_LATENCY)
avg_wire_size >>= 1;
/* Resultant value is 256 times larger than it needs to be. This
* gives us room to adjust the value as needed to either increase
* or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
*
* Use addition as we have already recorded the new latency flag
* for the ITR value.
*/
itr += DIV_ROUND_UP(avg_wire_size,
ice_itr_divisor(q_vector->vsi->port_info)) *
ICE_ITR_ADAPTIVE_MIN_INC;
if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
itr &= ICE_ITR_ADAPTIVE_LATENCY;
itr += ICE_ITR_ADAPTIVE_MAX_USECS;
}
/* based on checks above packets cannot be 0 so division is safe */
itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
bytes / packets, itr);
clear_counts:
/* write back value */
@ -1772,7 +1764,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/**
* ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
* @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf
*
@ -1798,7 +1790,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
* to the encapsulated ethertype.
*/
skb->protocol = vlan_get_protocol(skb);
goto out;
return 0;
}
/* if we have a HW VLAN tag being added, default to the HW one */
@ -1820,8 +1812,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
}
out:
return 0;
return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/**

View File

@ -45,6 +45,8 @@
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
#define ICE_RX_DMA_ATTR \
@ -160,6 +162,9 @@ struct ice_ring {
};
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
#ifdef CONFIG_DCB
u8 dcb_tc; /* Traffic class of ring */
#endif /* CONFIG_DCB */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */

View File

@ -107,7 +107,7 @@ struct ice_link_status {
};
/* Different reset sources for which a disable queue AQ call has to be made in
* order to clean the TX scheduler as a part of the reset
* order to clean the Tx scheduler as a part of the reset
*/
enum ice_disq_rst_src {
ICE_NO_RESET = 0,
@ -129,11 +129,11 @@ struct ice_phy_info {
struct ice_hw_common_caps {
u32 valid_functions;
/* TX/RX queues */
u16 num_rxq; /* Number/Total RX queues */
u16 rxq_first_id; /* First queue ID for RX queues */
u16 num_txq; /* Number/Total TX queues */
u16 txq_first_id; /* First queue ID for TX queues */
/* Tx/Rx queues */
u16 num_rxq; /* Number/Total Rx queues */
u16 rxq_first_id; /* First queue ID for Rx queues */
u16 num_txq; /* Number/Total Tx queues */
u16 txq_first_id; /* First queue ID for Tx queues */
/* MSI-X vectors */
u16 num_msix_vectors;
@ -148,6 +148,8 @@ struct ice_hw_common_caps {
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
u8 dcb;
};
/* Function specific capabilities */
@ -213,12 +215,14 @@ struct ice_nvm_info {
#define ice_for_each_traffic_class(_i) \
for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
#define ICE_INVAL_TEID 0xFFFFFFFF
struct ice_sched_node {
struct ice_sched_node *parent;
struct ice_sched_node *sibling; /* next sibling in the same layer */
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */
u32 agg_id; /* aggregator group ID */
u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
@ -245,7 +249,7 @@ enum ice_agg_type {
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_DFLT_BW_WT 1
/* vsi type list entry to locate corresponding vsi/ag nodes */
/* VSI type list entry to locate corresponding VSI/ag nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
@ -260,9 +264,62 @@ struct ice_sched_tx_policy {
u8 rdma_ena;
};
/* CEE or IEEE 802.1Qaz ETS Configuration data */
struct ice_dcb_ets_cfg {
u8 willing;
u8 cbs;
u8 maxtcs;
u8 prio_table[ICE_MAX_TRAFFIC_CLASS];
u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS];
u8 tsatable[ICE_MAX_TRAFFIC_CLASS];
};
/* CEE or IEEE 802.1Qaz PFC Configuration data */
struct ice_dcb_pfc_cfg {
u8 willing;
u8 mbc;
u8 pfccap;
u8 pfcena;
};
/* CEE or IEEE 802.1Qaz Application Priority data */
struct ice_dcb_app_priority_table {
u16 prot_id;
u8 priority;
u8 selector;
};
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 32
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
#define ICE_TLV_STATUS_ERR 0x4
#define ICE_APP_PROT_ID_FCOE 0x8906
#define ICE_APP_PROT_ID_ISCSI 0x0cbc
#define ICE_APP_PROT_ID_FIP 0x8914
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
#define ICE_CEE_APP_SEL_ETHTYPE 0x0
#define ICE_CEE_APP_SEL_TCPIP 0x1
struct ice_dcbx_cfg {
u32 numapps;
u32 tlv_status; /* CEE mode TLV status */
struct ice_dcb_ets_cfg etscfg;
struct ice_dcb_ets_cfg etsrec;
struct ice_dcb_pfc_cfg pfc;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
u8 app_mode;
#define ICE_DCBX_APPS_NON_WILLING 0x1
};
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
struct ice_hw *hw; /* back pointer to hw instance */
struct ice_hw *hw; /* back pointer to HW instance */
u32 last_node_teid; /* scheduler last node info */
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
@ -277,6 +334,13 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
/* LLDP/DCBX Status */
u8 dcbx_status;
u8 is_sw_lldp;
u8 lport;
#define ICE_LPORT_MASK 0xff
u8 is_vf;
@ -323,7 +387,7 @@ struct ice_hw {
u8 pf_id; /* device profile info */
/* TX Scheduler values */
/* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
u8 flattened_layers;
@ -334,7 +398,7 @@ struct ice_hw {
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
u8 reset_ongoing; /* true if hw is in reset, false otherwise */
u8 reset_ongoing; /* true if HW is in reset, false otherwise */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@ -413,6 +477,11 @@ struct ice_hw_port_stats {
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
u64 link_xoff_tx; /* lxofftxc */
u64 priority_xon_rx[8]; /* pxonrxc[8] */
u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
u64 priority_xon_tx[8]; /* pxontxc[8] */
u64 priority_xoff_tx[8]; /* pxofftxc[8] */
u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
u64 rx_size_64; /* prc64 */
u64 rx_size_127; /* prc127 */
u64 rx_size_255; /* prc255 */

View File

@ -375,9 +375,9 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
}
/**
* ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add pvid
* @ctxt: the vsi ctxt to fill
* @vid: the VLAN id to set as a PVID
* ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
* @ctxt: the VSI ctxt to fill
* @vid: the VLAN ID to set as a PVID
*/
static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
{
@ -391,7 +391,7 @@ static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
}
/**
* ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove pvid
* ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
* @ctxt: the VSI ctxt to fill
*/
static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
@ -406,8 +406,8 @@ static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
/**
* ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
* @vsi: the VSI to update
* @vid: the VLAN id to set as a PVID
* @enable: true for enable pvid false for disable
* @vid: the VLAN ID to set as a PVID
* @enable: true for enable PVID false for disable
*/
static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
{
@ -445,7 +445,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
* ice_vf_vsi_setup - Set up a VF VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
* @vf_id: defines VF id to which this VSI connects.
* @vf_id: defines VF ID to which this VSI connects.
*
* Returns pointer to the successfully allocated VSI struct on success,
* otherwise returns NULL on failure.
@ -513,7 +513,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
* We dont want to reconfigure interrupts since AVF driver doesn't
* We don't want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
@ -1508,9 +1508,9 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
/**
* ice_find_vsi_from_id
* @pf: the pf structure to search for the VSI
* @id: id of the VSI it is searching for
* @id: ID of the VSI it is searching for
*
* searches for the VSI with the given id
* searches for the VSI with the given ID
*/
static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
{
@ -1526,9 +1526,9 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
/**
* ice_vc_isvalid_vsi_id
* @vf: pointer to the VF info
* @vsi_id: VF relative VSI id
* @vsi_id: VF relative VSI ID
*
* check for the valid VSI id
* check for the valid VSI ID
*/
static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
@ -1543,10 +1543,10 @@ static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
/**
* ice_vc_isvalid_q_id
* @vf: pointer to the VF info
* @vsi_id: VSI id
* @qid: VSI relative queue id
* @vsi_id: VSI ID
* @qid: VSI relative queue ID
*
* check for the valid queue id
* check for the valid queue ID
*/
static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
{
@ -2005,7 +2005,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* ice_vc_handle_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @set: true if mac filters are being set, false otherwise
* @set: true if MAC filters are being set, false otherwise
*
* add guest MAC address filter
*/
@ -2065,7 +2065,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
maddr, vf->vf_id);
continue;
} else {
/* VF can't remove dflt_lan_addr/bcast mac */
/* VF can't remove dflt_lan_addr/bcast MAC */
dev_err(&pf->pdev->dev,
"VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
maddr, vf->vf_id);
@ -2091,7 +2091,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
/* get here if maddr is multicast or if VF can change mac */
/* get here if maddr is multicast or if VF can change MAC */
if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
goto handle_mac_exit;
@ -2154,7 +2154,7 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
* available queue pairs via virtchnl message response to vf.
* available queue pairs via virtchnl message response to VF.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
@ -2210,11 +2210,11 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
* ice_set_vf_port_vlan
* @netdev: network interface device structure
* @vf_id: VF identifier
* @vlan_id: VLAN id being set
* @vlan_id: VLAN ID being set
* @qos: priority setting
* @vlan_proto: VLAN protocol
*
* program VF Port VLAN id and/or qos
* program VF Port VLAN ID and/or QoS
*/
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@ -2257,7 +2257,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return ret;
}
/* If pvid, then remove all filters on the old VLAN */
/* If PVID, then remove all filters on the old VLAN */
if (vsi->info.pvid)
ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
VLAN_VID_MASK));
@ -2296,7 +2296,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
* @msg: pointer to the msg buffer
* @add_v: Add VLAN if true, otherwise delete VLAN
*
* Process virtchnl op to add or remove programmed guest VLAN id
* Process virtchnl op to add or remove programmed guest VLAN ID
*/
static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
{
@ -2443,7 +2443,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
* Add and program guest VLAN id
* Add and program guest VLAN ID
*/
static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
{
@ -2455,7 +2455,7 @@ static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
* remove programmed guest VLAN id
* remove programmed guest VLAN ID
*/
static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
{
@ -2771,9 +2771,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
* @mac: mac address
* @mac: MAC address
*
* program VF mac address
* program VF MAC address
*/
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
@ -2800,7 +2800,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL;
}
/* copy mac into dflt_lan_addr and trigger a VF reset. The reset
/* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
* flow will use the updated dflt_lan_addr and add a MAC filter
* using ice_add_mac. Also set pf_set_mac to indicate that the PF has
* set the MAC address for this VF.

View File

@ -48,10 +48,10 @@ enum ice_virtchnl_cap {
struct ice_vf {
struct ice_pf *pf;
s16 vf_id; /* VF id in the PF space */
s16 vf_id; /* VF ID in the PF space */
u32 driver_caps; /* reported by VF driver */
int first_vector_idx; /* first vector index of this VF */
struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
struct virtchnl_ether_addr dflt_lan_addr;
u16 port_vlan_id;
@ -59,10 +59,10 @@ struct ice_vf {
u8 trusted;
u16 lan_vsi_idx; /* index into PF struct */
u16 lan_vsi_num; /* ID as used by firmware */
u64 num_mdd_events; /* number of mdd events detected */
u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_caps; /* VF's adv. capabilities */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
u8 link_forced;