Merge branch 'qed-fixes'

Yuval Mintz says:

====================
qed*: Bug fixes

This series contain several small fixes, most of which deal with
either 100g support, sriov or bandwidth configurations.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-05-26 12:27:33 -07:00
commit b7e7ad611e
4 changed files with 78 additions and 23 deletions

View File

@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev)
}
}
static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
{
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
u16 num_pqs, multi_cos_tcs = 1;
u8 pf_wfq = qm_info->pf_wfq;
u32 pf_rl = qm_info->pf_rl;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
num_pqs, GFP_KERNEL);
qm_info->qm_pq_params = kcalloc(num_pqs,
sizeof(struct init_qm_pq_params),
b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->qm_pq_params)
goto alloc_err;
qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
num_vports, GFP_KERNEL);
qm_info->qm_vport_params = kcalloc(num_vports,
sizeof(struct init_qm_vport_params),
b_sleepable ? GFP_KERNEL
: GFP_ATOMIC);
if (!qm_info->qm_vport_params)
goto alloc_err;
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
MAX_NUM_PORTS, GFP_KERNEL);
qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
sizeof(struct init_qm_port_params),
b_sleepable ? GFP_KERNEL
: GFP_ATOMIC);
if (!qm_info->qm_port_params)
goto alloc_err;
qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
GFP_KERNEL);
qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->wfq_data)
goto alloc_err;
@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1;
qm_info->pf_wfq = 0;
qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
qm_info->vport_wfq_en = 1;
qm_info->pf_rl = pf_rl;
qm_info->pf_wfq = pf_wfq;
return 0;
@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_qm_info_free(p_hwfn);
/* initialize qed's qm data structure */
rc = qed_init_qm_info(p_hwfn);
rc = qed_init_qm_info(p_hwfn, false);
if (rc)
return rc;
@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
/* Prepare and process QM requirements */
rc = qed_init_qm_info(p_hwfn);
rc = qed_init_qm_info(p_hwfn, true);
if (rc)
goto alloc_err;
@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
hw_mode |= 1 << MODE_ASIC;
if (p_hwfn->cdev->num_hwfns > 1)
hw_mode |= 1 << MODE_100G;
p_hwfn->hw_info.hw_mode = hw_mode;
DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
"Configuring function for hw_mode: 0x%08x\n",
p_hwfn->hw_info.hw_mode);
}
/* Init run time data for all PFs on an engine. */
@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param;
int rc, mfw_rc, i;
if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
return -EINVAL;
}
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
{
int i;
if (cdev->num_hwfns > 1) {
DP_VERBOSE(cdev,
NETIF_MSG_LINK,
"WFQ configuration is not supported for this device\n");
return;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];

View File

@ -413,6 +413,7 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
/* Fallthrough */
case QED_INT_MODE_MSI:
if (cdev->num_hwfns == 1) {
rc = pci_enable_msi(cdev->pdev);
if (!rc) {
int_params->out.int_mode = QED_INT_MODE_MSI;
@ -422,6 +423,7 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
DP_NOTICE(cdev, "Failed to enable MSI\n");
if (force_mode)
goto out;
}
/* Fallthrough */
case QED_INT_MODE_INTA:

View File

@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
if (!IS_VF(edev))
return QEDE_ETHTOOL_TEST_MAX;
else
return 0;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);

View File

@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx,
{
struct qede_dev *edev = netdev_priv(dev);
return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
max_tx_rate);
}
@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
bool need_reload = false;
/* No action needed if hardware GRO is disabled during driver load */
if (changes & NETIF_F_GRO) {
if (dev->features & NETIF_F_GRO)
need_reload = !edev->gro_disable;
else
need_reload = edev->gro_disable;
}
if (need_reload && netif_running(edev->ndev)) {
dev->features = features;
qede_reload(edev, NULL, NULL);
return 1;
}
return 0;
}
#ifdef CONFIG_QEDE_VXLAN
static void qede_add_vxlan_port(struct net_device *dev,
sa_family_t sa_family, __be16 port)
@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_link_state = qede_set_vf_link_state,