Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-08-20

This series contains updates to ice driver only.

Brett fixes the detection of a hung transmit ring by checking the
software based tail (next_to_use) to determine if there is pending work.
Updates the driver to assume that using more than one receive queue per
receive ring container is a rare case, so use unlikely() in the case
were we actually need to divide our budget for multiple queues.  Fixed
an issue where the write back on ITR bit was not being set when
interrupts are disabled, which was causing only write backs when polling
only when a cache line is filled.  Cleans up unnecessary wait times
during VF bring up and reset paths.  Increased the mailbox size for
receive queues that are used to communicate with VFs to accommodate the
large number of VFs that the driver can support.

Akeem restructures the initialization flows for VFs, including how VFs
are configured and resources allocated to improve flows so that when we
clean up resources, we do not try to free resources that were never
allocated.  Organizes code to ensure that VF specific code is located in
the SR-IOV specific file.

Paul fixes an issue when setting the pause parameter which was
incorrectly blocking users from changing receive or transmit pause
settings.  Ensure register access for MSIX vector index is only done in
the PF space and not absolute device space.

Usha fixes a potential kernel hang in the DCB rebuild path when in CEE
mode, where the ETS recommended DCB configuration is not being set or
set correctly.

Mitch updates the driver to process all receive descriptors, regardless
of the size of the associated data.

Tony fixes and issue during the reset/rebuild path of a PF VSI where we
were assuming that the PF VSI was always to be enabled, which can
attempt to bring up a PF VSI on a downed interface which can lead to
various crashes.

Pawel fixes up variable definitions to match the type of data being
stored.

v2: Dropped patch 1 of the series to add ethtool support to query/add
    channels on a VSI, while we re-qork the functionality to match the
    ethtool expected behavior to report combined (Tx and Rx) numbers.
v3: Updated patch 4 to use kzalloc() and kfree() instead devm_kzalloc()
    and devm_kfree().
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-08-20 17:02:44 -07:00
commit ac2eb56e75
9 changed files with 324 additions and 146 deletions

View File

@ -69,7 +69,8 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
#define ICE_MBXQ_LEN 64
#define ICE_MBXSQ_LEN 64
#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_MAX_TXQS 2048
@ -86,16 +87,6 @@ extern const char ice_drv_ver[];
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
#define ICE_MAX_VF_COUNT 256
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
#define ICE_NONQ_VECS_VF 1
#define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_RESET_WAIT 20
@ -220,6 +211,7 @@ enum ice_state {
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_STATE_NBITS /* must be last */
};

View File

@ -203,16 +203,87 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
return ret;
}
/**
* ice_cfg_etsrec_defaults - Set default ETS recommended DCB config
* @pi: port information structure
*/
static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
{
struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg;
u8 i;
/* Ensure ETS recommended DCB configuration is not already set */
if (dcbcfg->etsrec.maxtcs)
return;
/* In CEE mode, set the default to 1 TC */
dcbcfg->etsrec.maxtcs = 1;
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100;
dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT :
ICE_IEEE_TSA_ETS;
}
}
/**
* ice_dcb_need_recfg - Check if DCB needs reconfig
* @pf: board private structure
* @old_cfg: current DCB config
* @new_cfg: new DCB config
*/
static bool
ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg)
{
bool need_reconfig = false;
/* Check if ETS configuration has changed */
if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
sizeof(new_cfg->etscfg))) {
/* If Priority Table has changed reconfig is needed */
if (memcmp(&new_cfg->etscfg.prio_table,
&old_cfg->etscfg.prio_table,
sizeof(new_cfg->etscfg.prio_table))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
}
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable)))
dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable)))
dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
}
/* Check if PFC configuration has changed */
if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
}
/* Check if APP Table has changed */
if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig;
}
/**
* ice_dcb_rebuild - rebuild DCB post reset
* @pf: physical function instance
*/
void ice_dcb_rebuild(struct ice_pf *pf)
{
struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *prev_cfg;
enum ice_status ret;
u8 willing;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@ -224,9 +295,15 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg;
desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
/* Save current willing state and force FW to unwilling */
willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
local_dcbx_cfg->etscfg.willing = 0x0;
local_dcbx_cfg->pfc.willing = 0x0;
local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
@ -234,8 +311,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
/* Retrieve DCB config and ensure same as current in SW */
prev_cfg = devm_kmemdup(&pf->pdev->dev,
&pf->hw.port_info->local_dcbx_cfg,
prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg,
sizeof(*prev_cfg), GFP_KERNEL);
if (!prev_cfg) {
dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
@ -243,22 +319,22 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
ice_init_dcb(&pf->hw);
if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
sizeof(*prev_cfg))) {
if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
/* difference in cfg detected - disable DCB till next MIB */
dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
devm_kfree(&pf->pdev->dev, prev_cfg);
goto dcb_error;
}
/* fetched config congruent to previous configuration */
devm_kfree(&pf->pdev->dev, prev_cfg);
/* Configuration replayed - reset willing state to previous */
pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
/* Set the local desired config */
memset(&pf->hw.port_info->local_dcbx_cfg, 0, sizeof(*local_dcbx_cfg));
memcpy(local_dcbx_cfg, desired_dcbx_cfg, sizeof(*local_dcbx_cfg));
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
dev_err(&pf->pdev->dev, "Failed to set desired config\n");
goto dcb_error;
}
dev_info(&pf->pdev->dev, "DCB restored after reset\n");
@ -501,55 +577,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
return 0;
}
/**
* ice_dcb_need_recfg - Check if DCB needs reconfig
* @pf: board private structure
* @old_cfg: current DCB config
* @new_cfg: new DCB config
*/
static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg)
{
bool need_reconfig = false;
/* Check if ETS configuration has changed */
if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
sizeof(new_cfg->etscfg))) {
/* If Priority Table has changed reconfig is needed */
if (memcmp(&new_cfg->etscfg.prio_table,
&old_cfg->etscfg.prio_table,
sizeof(new_cfg->etscfg.prio_table))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
}
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable)))
dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable)))
dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
}
/* Check if PFC configuration has changed */
if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
}
/* Check if APP Table has changed */
if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig;
}
/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf

View File

@ -2856,6 +2856,7 @@ static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
struct ice_dcbx_cfg *dcbx_cfg;
@ -2866,6 +2867,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
u8 aq_failures;
bool link_up;
int err = 0;
u32 is_an;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
@ -2880,7 +2882,30 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EOPNOTSUPP;
}
if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
/* Get pause param reports configured and negotiated flow control pause
* when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is
* defined get pause param pause->autoneg reports SW configured setting,
* so compare pause->autoneg with SW configured to prevent the user from
* using set pause param to chance autoneg.
*/
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
/* Get current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status) {
kfree(pcaps);
return -EIO;
}
is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
kfree(pcaps);
if (pause->autoneg != is_an) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}

View File

@ -127,8 +127,11 @@
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
#define GLINT_DYN_CTL_ITR_INDX_S 3
#define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3)
#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))

View File

@ -41,12 +41,12 @@ static void ice_update_pf_stats(struct ice_pf *pf);
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
*/
static u32 ice_get_tx_pending(struct ice_ring *ring)
static u16 ice_get_tx_pending(struct ice_ring *ring)
{
u32 head, tail;
u16 head, tail;
head = ring->next_to_clean;
tail = readl(ring->tail);
tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@ -1507,8 +1507,8 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
}
@ -3701,8 +3701,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
err = netd->netdev_ops->ndo_open(netd);
rtnl_unlock();
}
} else {
err = ice_vsi_open(vsi);
}
}

View File

@ -607,6 +607,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int truesize = ICE_RXBUF_2048;
#endif
if (!size)
return;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
rx_buf->page_offset, size, truesize);
@ -662,6 +664,8 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
if (!size)
return rx_buf;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
rx_buf->page_offset, size,
@ -745,8 +749,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
/* hand second half of page back to the ring */
if (!rx_buf)
return;
if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
rx_ring->rx_stats.page_reuse_count++;
} else {
@ -1031,8 +1038,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
/* allocate (if needed) and populate skb */
if (skb)
ice_add_rx_frag(rx_buf, skb, size);
else
@ -1041,7 +1049,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
rx_buf->pagecnt_bias++;
if (rx_buf)
rx_buf->pagecnt_bias++;
break;
}
@ -1355,6 +1364,23 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
struct ice_ring_container *rx = &q_vector->rx;
u32 itr_val;
/* when exiting WB_ON_ITR lets set a low ITR value and trigger
* interrupts to expire right away in case we have more work ready to go
* already
*/
if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
/* set target back to last user set value */
rx->target_itr = rx->itr_setting;
/* set current to what we just wrote and dynamic if needed */
rx->current_itr = ICE_WB_ON_ITR_USECS |
(rx->itr_setting & ICE_ITR_DYNAMIC);
/* allow normal interrupt flow to start */
q_vector->itr_countdown = 0;
return;
}
/* This will do nothing if dynamic updates are not enabled */
ice_update_itr(q_vector, tx);
ice_update_itr(q_vector, rx);
@ -1399,6 +1425,41 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
itr_val);
}
/**
* ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
* @vsi: pointer to the VSI structure
* @q_vector: q_vector to set WB_ON_ITR on
*
* We need to tell hardware to write-back completed descriptors even when
* interrupts are disabled. Descriptors will be written back on cache line
* boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
* descriptors may not be written back if they don't fill a cache line until the
* next interrupt.
*
* This sets the write-back frequency to 2 microseconds as that is the minimum
* value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
* make sure hardware knows we aren't meddling with the INTENA_M bit.
*/
static void
ice_set_wb_on_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
/* already in WB_ON_ITR mode no need to change it */
if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
return;
if (q_vector->num_ring_rx)
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
ICE_RX_ITR));
if (q_vector->num_ring_tx)
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
ICE_TX_ITR));
q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
}
/**
* ice_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
@ -1414,8 +1475,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
container_of(napi, struct ice_q_vector, napi);
struct ice_vsi *vsi = q_vector->vsi;
bool clean_complete = true;
int budget_per_ring = 0;
struct ice_ring *ring;
int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
@ -1429,11 +1490,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (budget <= 0)
return budget;
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
if (q_vector->num_ring_rx)
/* normally we have 1 Rx ring per q_vector */
if (unlikely(q_vector->num_ring_rx > 1))
/* We attempt to distribute budget to each Rx queue fairly, but
* don't allow the budget to go below 1 because that would exit
* polling early.
*/
budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
else
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
ice_for_each_ring(ring, q_vector->rx) {
int cleaned;
@ -1454,6 +1520,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
ice_update_ena_itr(vsi, q_vector);
else
ice_set_wb_on_itr(vsi, q_vector);
return min_t(int, work_done, budget - 1);
}

View File

@ -144,6 +144,19 @@ enum ice_rx_dtype {
#define ICE_DFLT_INTRL 0
#define ICE_MAX_INTRL 236
#define ICE_WB_ON_ITR_USECS 2
#define ICE_IN_WB_ON_ITR_MODE 255
/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
* setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
* set the write-back latency to the usecs passed in.
*/
#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
GLINT_DYN_CTL_INTERVAL_M) | \
(((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
GLINT_DYN_CTL_WB_ON_ITR_M)
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
#define ICE_TX_LEGACY 1

View File

@ -382,12 +382,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
wr32(hw, PF_PCI_CIAA,
VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
for (i = 0; i < 100; i++) {
for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
reg = rd32(hw, PF_PCI_CIAD);
if ((reg & VF_TRANS_PENDING_M) != 0)
dev_err(&pf->pdev->dev,
"VF %d PCI transactions stuck\n", vf->vf_id);
udelay(1);
/* no transactions pending so stop polling */
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
dev_err(&pf->pdev->dev,
"VF %d PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@ -474,19 +477,20 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
}
/**
* ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW
* ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
* @pf: pointer to PF structure
* @vf: pointer to VF that the first MSIX vector index is being calculated for
*
* This returns the first MSIX vector index in HW that is used by this VF and
* this will always be the OICR index in the AVF driver so any functionality
* This returns the first MSIX vector index in PF space that is used by this VF.
* This index is used when accessing PF relative registers such as
* GLINT_VECT2FUNC and GLINT_DYN_CTL.
* This will always be the OICR index in the AVF driver so any functionality
* using vf->first_vector_idx for queue configuration will have to increment by
* 1 to avoid meddling with the OICR index.
*/
static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
{
return pf->hw.func_caps.common_cap.msix_vector_first_id +
pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
}
/**
@ -597,27 +601,30 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
*/
static void ice_ena_vf_mappings(struct ice_vf *vf)
{
int abs_vf_id, abs_first, abs_last;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int first, last, v;
struct ice_hw *hw;
int abs_vf_id;
u32 reg;
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
last = (first + pf->num_vf_msix) - 1;
abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
abs_last = (abs_first + pf->num_vf_msix) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* VF Vector allocation */
reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
& VPINT_ALLOC_PCI_FIRST_M) |
((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
@ -974,6 +981,47 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
return status;
}
/**
* ice_config_res_vfs - Finalize allocation of VFs resources in one go
* @pf: pointer to the PF structure
*
* This function is being called as last part of resetting all VFs, or when
* configuring VFs for the first time, where there is no resource to be freed
* Returns true if resources were properly allocated for all VFs, and false
* otherwise.
*/
static bool ice_config_res_vfs(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
int v;
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
"Cannot allocate VF resources, try with fewer number of VFs\n");
return false;
}
/* rearm global interrupts */
if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
ice_irq_dynamic_ena(hw, NULL, NULL);
/* Finish resetting each VF and allocate resources */
for (v = 0; v < pf->num_alloc_vfs; v++) {
struct ice_vf *vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
dev_dbg(&pf->pdev->dev,
"VF-id %d has %d queues configured\n",
vf->vf_id, vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
}
ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
@ -1023,7 +1071,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* finished resetting.
*/
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
usleep_range(10000, 20000);
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
@ -1031,8 +1078,11 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & VPGEN_VFRSTAT_VFRD_M))
if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
/* only delay if the check failed */
usleep_range(10, 20);
break;
}
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@ -1046,7 +1096,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
*/
if (v < pf->num_alloc_vfs)
dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) {
@ -1066,25 +1115,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_err(&pf->pdev->dev,
"Failed to free MSIX resources used by SR-IOV\n");
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
"Cannot allocate VF resources, try with fewer number of VFs\n");
if (!ice_config_res_vfs(pf))
return false;
}
/* Finish the reset on each VF */
for (v = 0; v < pf->num_alloc_vfs; v++) {
vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
dev_dbg(&pf->pdev->dev,
"VF-id %d has %d queues configured\n",
vf->vf_id, vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
}
ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@ -1137,12 +1169,14 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
* poll the status register to make sure that the reset
* completed successfully.
*/
usleep_range(10000, 20000);
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (reg & VPGEN_VFRSTAT_VFRD_M) {
rsd = true;
break;
}
/* only sleep if the reset is not done */
usleep_range(10, 20);
}
/* Display a warning if VF didn't manage to reset in time, but need to
@ -1152,8 +1186,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id);
usleep_range(10000, 20000);
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
*/
@ -1249,7 +1281,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
set_bit(__ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
@ -1278,13 +1310,13 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
}
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
if (!ice_reset_all_vfs(pf, true)) {
/* VF resources get allocated with initialization */
if (!ice_config_res_vfs(pf)) {
ret = -EIO;
goto err_unroll_sriov;
}
goto err_unroll_intr;
return ret;
err_unroll_sriov:
pf->vf = NULL;
@ -1296,6 +1328,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(__ICE_OICR_INTR_DIS, pf->state);
return ret;
}
@ -2250,8 +2283,8 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (v_ret) {
dev_err(&pf->pdev->dev,
"can't update MAC filters for VF %d, error %d\n",
vf->vf_id, v_ret);
"can't %s MAC filters for VF %d, error %d\n",
set ? "add" : "remove", vf->vf_id, v_ret);
} else {
if (set)
vf->num_mac += mac_count;
@ -2304,11 +2337,11 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
int req_queues = vfres->num_queue_pairs;
u16 req_queues = vfres->num_queue_pairs;
struct ice_pf *pf = vf->pf;
int max_allowed_vf_queues;
int tx_rx_queue_left;
int cur_queues;
u16 max_allowed_vf_queues;
u16 tx_rx_queue_left;
u16 cur_queues;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@ -2316,29 +2349,30 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
}
cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
tx_rx_queue_left = min_t(u16, pf->q_left_tx, pf->q_left_rx);
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (req_queues <= 0) {
if (!req_queues) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
"VF %d tried to request 0 queues. Ignoring.\n",
vf->vf_id);
} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues - cur_queues > tx_rx_queue_left) {
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
"VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
ice_vc_dis_vf(vf);
dev_info(&pf->pdev->dev,
"VF %d granted request of %d queues.\n",
"VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
return 0;
}

View File

@ -22,6 +22,23 @@
#define VF_DEVICE_STATUS 0xAA
#define VF_TRANS_PENDING_M 0x20
/* wait defines for polling PF_PCI_CIAD register status */
#define ICE_PCI_CIAD_WAIT_COUNT 100
#define ICE_PCI_CIAD_WAIT_DELAY_US 1
/* VF resources default values and limitation */
#define ICE_MAX_VF_COUNT 256
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
#define ICE_NONQ_VECS_VF 1
#define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65
#define ICE_MAX_POLICY_INTR_PER_VF 33
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
/* Specific VF states */
enum ice_vf_states {
ICE_VF_STATE_INIT = 0,
@ -45,7 +62,8 @@ struct ice_vf {
s16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
int first_vector_idx; /* first vector index of this VF */
/* first vector index of this VF in the PF space */
int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */