mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
Merge branch 'tc-hw-offload'
John Fastabend says: ==================== tc offload for cls_u32 on ixgbe This extends the setup_tc framework so it can support more than just the mqprio offload and push other classifiers and qdiscs into the hardware. The series here targets the u32 classifier and ixgbe driver. I worked out the u32 classifier because it is protocol oblivious and aligns with multiple hardware devices I have access to. I did an initial implementation on ixgbe because (a) I have one in my box (b) its a stable driver and (c) it is relatively simple compared to the other devices I have here but still has enough flexibility to exercise the features of cls_u32. I intentionally limited the scope of this series to the basic feature set. Specifically this uses a 'big hammer' feature bit to do the offload or not. If the bit is set you get offloaded rules if it is not then rules will not be offloaded. If we can agree on this patch series there are some more patches on my queue we can talk about to make the offload decision per rule using flags similar to how we do l2 mac updates. Additionally the error strategy can be improved to be hard aborting, log and continue, etc. I think these are nice to have improvements but shouldn't block this series. Also by adding get_parse_graph and set_parse_graph attributes as in my previous flow_api work we can build programmable devices and programmatically learn when rules can or can not be loaded into the hardware. Again future work. ... v3 includes a couple style fixups suggested by Jiri and a quick fix to ixgbe to check features flag and not dev_features flag the latter being always set. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
86f447783f
@ -1626,11 +1626,17 @@ static void xgbe_poll_controller(struct net_device *netdev)
|
||||
}
|
||||
#endif /* End CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
|
||||
static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc_to_netdev)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
unsigned int offset, queue;
|
||||
u8 i;
|
||||
u8 i, tc;
|
||||
|
||||
if (handle != TC_H_ROOT || tc_to_netdev->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
tc = tc_to_netdev->tc;
|
||||
|
||||
if (tc && (tc != pdata->hw_feat.tc_cnt))
|
||||
return -EINVAL;
|
||||
|
@ -4272,6 +4272,14 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc)
|
||||
{
|
||||
if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
return bnx2x_setup_tc(dev, tc->tc);
|
||||
}
|
||||
|
||||
/* called with rtnl_lock */
|
||||
int bnx2x_change_mac_addr(struct net_device *dev, void *p)
|
||||
{
|
||||
|
@ -486,6 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
/* setup_tc callback */
|
||||
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
|
||||
int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc);
|
||||
|
||||
int bnx2x_get_vf_config(struct net_device *dev, int vf,
|
||||
struct ifla_vf_info *ivi);
|
||||
|
@ -13061,7 +13061,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = poll_bnx2x,
|
||||
#endif
|
||||
.ndo_setup_tc = bnx2x_setup_tc,
|
||||
.ndo_setup_tc = __bnx2x_setup_tc,
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
.ndo_set_vf_mac = bnx2x_set_vf_mac,
|
||||
.ndo_set_vf_vlan = bnx2x_set_vf_vlan,
|
||||
|
@ -5370,9 +5370,16 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_setup_tc(struct net_device *dev, u8 tc)
|
||||
static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *ntc)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
u8 tc;
|
||||
|
||||
if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
tc = ntc->tc;
|
||||
|
||||
if (tc > bp->max_tc) {
|
||||
netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
|
||||
|
@ -1204,6 +1204,15 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc)
|
||||
{
|
||||
if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
return fm10k_setup_tc(dev, tc->tc);
|
||||
}
|
||||
|
||||
static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
@ -1386,7 +1395,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
|
||||
.ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
|
||||
.ndo_set_rx_mode = fm10k_set_rx_mode,
|
||||
.ndo_get_stats64 = fm10k_get_stats64,
|
||||
.ndo_setup_tc = fm10k_setup_tc,
|
||||
.ndo_setup_tc = __fm10k_setup_tc,
|
||||
.ndo_set_vf_mac = fm10k_ndo_set_vf_mac,
|
||||
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
|
||||
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
|
||||
|
@ -788,7 +788,8 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
|
||||
bool is_vf, bool is_netdev);
|
||||
#ifdef I40E_FCOE
|
||||
int i40e_close(struct net_device *netdev);
|
||||
int i40e_setup_tc(struct net_device *netdev, u8 tc);
|
||||
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc);
|
||||
void i40e_netpoll(struct net_device *netdev);
|
||||
int i40e_fcoe_enable(struct net_device *netdev);
|
||||
int i40e_fcoe_disable(struct net_device *netdev);
|
||||
|
@ -1457,7 +1457,7 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = {
|
||||
.ndo_tx_timeout = i40e_tx_timeout,
|
||||
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
|
||||
.ndo_setup_tc = i40e_setup_tc,
|
||||
.ndo_setup_tc = __i40e_setup_tc,
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = i40e_netpoll,
|
||||
|
@ -5253,11 +5253,7 @@ void i40e_down(struct i40e_vsi *vsi)
|
||||
* @netdev: net device to configure
|
||||
* @tc: number of traffic classes to enable
|
||||
**/
|
||||
#ifdef I40E_FCOE
|
||||
int i40e_setup_tc(struct net_device *netdev, u8 tc)
|
||||
#else
|
||||
static int i40e_setup_tc(struct net_device *netdev, u8 tc)
|
||||
#endif
|
||||
{
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
@ -5310,6 +5306,19 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef I40E_FCOE
|
||||
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc)
|
||||
#else
|
||||
static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc)
|
||||
#endif
|
||||
{
|
||||
if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
return i40e_setup_tc(netdev, tc->tc);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_open - Called when a network interface is made active
|
||||
* @netdev: network interface device structure
|
||||
@ -8951,7 +8960,7 @@ static const struct net_device_ops i40e_netdev_ops = {
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = i40e_netpoll,
|
||||
#endif
|
||||
.ndo_setup_tc = i40e_setup_tc,
|
||||
.ndo_setup_tc = __i40e_setup_tc,
|
||||
#ifdef I40E_FCOE
|
||||
.ndo_fcoe_enable = i40e_fcoe_enable,
|
||||
.ndo_fcoe_disable = i40e_fcoe_disable,
|
||||
|
@ -796,6 +796,10 @@ struct ixgbe_adapter {
|
||||
u8 default_up;
|
||||
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
|
||||
|
||||
#define IXGBE_MAX_LINK_HANDLE 10
|
||||
struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE];
|
||||
unsigned long tables;
|
||||
|
||||
/* maximum number of RETA entries among all devices supported by ixgbe
|
||||
* driver: currently it's x550 device in non-SRIOV mode
|
||||
*/
|
||||
@ -925,6 +929,9 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
|
||||
u16 soft_id);
|
||||
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
|
||||
union ixgbe_atr_input *mask);
|
||||
int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_fdir_filter *input,
|
||||
u16 sw_idx);
|
||||
void ixgbe_set_rx_mode(struct net_device *netdev);
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
|
||||
|
@ -2520,9 +2520,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_fdir_filter *input,
|
||||
u16 sw_idx)
|
||||
int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_fdir_filter *input,
|
||||
u16 sw_idx)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct hlist_node *node2;
|
||||
|
@ -51,6 +51,8 @@
|
||||
#include <linux/prefetch.h>
|
||||
#include <scsi/fc/fc_fcoe.h>
|
||||
#include <net/vxlan.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include <net/tc_act/tc_gact.h>
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
#include <linux/of_net.h>
|
||||
@ -65,6 +67,7 @@
|
||||
#include "ixgbe_common.h"
|
||||
#include "ixgbe_dcb_82599.h"
|
||||
#include "ixgbe_sriov.h"
|
||||
#include "ixgbe_model.h"
|
||||
|
||||
char ixgbe_driver_name[] = "ixgbe";
|
||||
static const char ixgbe_driver_string[] =
|
||||
@ -5545,6 +5548,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
||||
#endif /* CONFIG_IXGBE_DCB */
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
||||
/* initialize static ixgbe jump table entries */
|
||||
adapter->jump_tables[0] = ixgbe_ipv4_fields;
|
||||
|
||||
adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
|
||||
hw->mac.num_rar_entries,
|
||||
GFP_ATOMIC);
|
||||
@ -8200,6 +8206,228 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
|
||||
struct tc_cls_u32_offload *cls)
|
||||
{
|
||||
int err;
|
||||
|
||||
spin_lock(&adapter->fdir_perfect_lock);
|
||||
err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle);
|
||||
spin_unlock(&adapter->fdir_perfect_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
|
||||
__be16 protocol,
|
||||
struct tc_cls_u32_offload *cls)
|
||||
{
|
||||
/* This ixgbe devices do not support hash tables at the moment
|
||||
* so abort when given hash tables.
|
||||
*/
|
||||
if (cls->hnode.divisor > 0)
|
||||
return -EINVAL;
|
||||
|
||||
set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
|
||||
struct tc_cls_u32_offload *cls)
|
||||
{
|
||||
clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
|
||||
__be16 protocol,
|
||||
struct tc_cls_u32_offload *cls)
|
||||
{
|
||||
u32 loc = cls->knode.handle & 0xfffff;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct ixgbe_mat_field *field_ptr;
|
||||
struct ixgbe_fdir_filter *input;
|
||||
union ixgbe_atr_input mask;
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
const struct tc_action *a;
|
||||
#endif
|
||||
int i, err = 0;
|
||||
u8 queue;
|
||||
u32 handle;
|
||||
|
||||
memset(&mask, 0, sizeof(union ixgbe_atr_input));
|
||||
handle = cls->knode.handle;
|
||||
|
||||
/* At the moment cls_u32 jumps to transport layer and skips past
|
||||
* L2 headers. The canonical method to match L2 frames is to use
|
||||
* negative values. However this is error prone at best but really
|
||||
* just broken because there is no way to "know" what sort of hdr
|
||||
* is in front of the transport layer. Fix cls_u32 to support L2
|
||||
* headers when needed.
|
||||
*/
|
||||
if (protocol != htons(ETH_P_IP))
|
||||
return -EINVAL;
|
||||
|
||||
if (cls->knode.link_handle ||
|
||||
cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) {
|
||||
struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
|
||||
u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle);
|
||||
|
||||
if (!test_bit(uhtid, &adapter->tables))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; nexthdr[i].jump; i++) {
|
||||
if (nexthdr->o != cls->knode.sel->offoff ||
|
||||
nexthdr->s != cls->knode.sel->offshift ||
|
||||
nexthdr->m != cls->knode.sel->offmask ||
|
||||
/* do not support multiple key jumps its just mad */
|
||||
cls->knode.sel->nkeys > 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (nexthdr->off != cls->knode.sel->keys[0].off ||
|
||||
nexthdr->val != cls->knode.sel->keys[0].val ||
|
||||
nexthdr->mask != cls->knode.sel->keys[0].mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (uhtid >= IXGBE_MAX_LINK_HANDLE)
|
||||
return -EINVAL;
|
||||
|
||||
adapter->jump_tables[uhtid] = nexthdr->jump;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
|
||||
e_err(drv, "Location out of range\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* cls u32 is a graph starting at root node 0x800. The driver tracks
|
||||
* links and also the fields used to advance the parser across each
|
||||
* link (e.g. nexthdr/eat parameters from 'tc'). This way we can map
|
||||
* the u32 graph onto the hardware parse graph denoted in ixgbe_model.h
|
||||
* To add support for new nodes update ixgbe_model.h parse structures
|
||||
* this function _should_ be generic try not to hardcode values here.
|
||||
*/
|
||||
if (TC_U32_USERHTID(handle) == 0x800) {
|
||||
field_ptr = adapter->jump_tables[0];
|
||||
} else {
|
||||
if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables))
|
||||
return -EINVAL;
|
||||
|
||||
field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)];
|
||||
}
|
||||
|
||||
if (!field_ptr)
|
||||
return -EINVAL;
|
||||
|
||||
input = kzalloc(sizeof(*input), GFP_KERNEL);
|
||||
if (!input)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < cls->knode.sel->nkeys; i++) {
|
||||
int off = cls->knode.sel->keys[i].off;
|
||||
__be32 val = cls->knode.sel->keys[i].val;
|
||||
__be32 m = cls->knode.sel->keys[i].mask;
|
||||
bool found_entry = false;
|
||||
int j;
|
||||
|
||||
for (j = 0; field_ptr[j].val; j++) {
|
||||
if (field_ptr[j].off == off &&
|
||||
field_ptr[j].mask == m) {
|
||||
field_ptr[j].val(input, &mask, val, m);
|
||||
input->filter.formatted.flow_type |=
|
||||
field_ptr[j].type;
|
||||
found_entry = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found_entry)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
|
||||
IXGBE_ATR_L4TYPE_MASK;
|
||||
|
||||
if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
|
||||
mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
|
||||
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
if (list_empty(&cls->knode.exts->actions))
|
||||
goto err_out;
|
||||
|
||||
list_for_each_entry(a, &cls->knode.exts->actions, list) {
|
||||
if (!is_tcf_gact_shot(a))
|
||||
goto err_out;
|
||||
}
|
||||
#endif
|
||||
|
||||
input->action = IXGBE_FDIR_DROP_QUEUE;
|
||||
queue = IXGBE_FDIR_DROP_QUEUE;
|
||||
input->sw_idx = loc;
|
||||
|
||||
spin_lock(&adapter->fdir_perfect_lock);
|
||||
|
||||
if (hlist_empty(&adapter->fdir_filter_list)) {
|
||||
memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
|
||||
err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
|
||||
if (err)
|
||||
goto err_out_w_lock;
|
||||
} else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
|
||||
err = -EINVAL;
|
||||
goto err_out_w_lock;
|
||||
}
|
||||
|
||||
ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
|
||||
err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
|
||||
input->sw_idx, queue);
|
||||
if (!err)
|
||||
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
|
||||
spin_unlock(&adapter->fdir_perfect_lock);
|
||||
|
||||
return err;
|
||||
err_out_w_lock:
|
||||
spin_unlock(&adapter->fdir_perfect_lock);
|
||||
err_out:
|
||||
kfree(input);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
|
||||
if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
|
||||
tc->type == TC_SETUP_CLSU32) {
|
||||
if (!(dev->features & NETIF_F_HW_TC))
|
||||
return -EINVAL;
|
||||
|
||||
switch (tc->cls_u32->command) {
|
||||
case TC_CLSU32_NEW_KNODE:
|
||||
case TC_CLSU32_REPLACE_KNODE:
|
||||
return ixgbe_configure_clsu32(adapter,
|
||||
proto, tc->cls_u32);
|
||||
case TC_CLSU32_DELETE_KNODE:
|
||||
return ixgbe_delete_clsu32(adapter, tc->cls_u32);
|
||||
case TC_CLSU32_NEW_HNODE:
|
||||
case TC_CLSU32_REPLACE_HNODE:
|
||||
return ixgbe_configure_clsu32_add_hnode(adapter, proto,
|
||||
tc->cls_u32);
|
||||
case TC_CLSU32_DELETE_HNODE:
|
||||
return ixgbe_configure_clsu32_del_hnode(adapter,
|
||||
tc->cls_u32);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
return ixgbe_setup_tc(dev, tc->tc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
@ -8262,19 +8490,17 @@ static int ixgbe_set_features(struct net_device *netdev,
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if Flow Director n-tuple support was enabled or disabled. If
|
||||
* the state changed, we need to reset.
|
||||
* Check if Flow Director n-tuple support or hw_tc support was
|
||||
* enabled or disabled. If the state changed, we need to reset.
|
||||
*/
|
||||
switch (features & NETIF_F_NTUPLE) {
|
||||
case NETIF_F_NTUPLE:
|
||||
if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
|
||||
/* turn off ATR, enable perfect filters and reset */
|
||||
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
|
||||
need_reset = true;
|
||||
|
||||
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
|
||||
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
|
||||
break;
|
||||
default:
|
||||
} else {
|
||||
/* turn off perfect filters, enable ATR and reset */
|
||||
if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
|
||||
need_reset = true;
|
||||
@ -8282,23 +8508,16 @@ static int ixgbe_set_features(struct net_device *netdev,
|
||||
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
|
||||
|
||||
/* We cannot enable ATR if SR-IOV is enabled */
|
||||
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
|
||||
break;
|
||||
|
||||
/* We cannot enable ATR if we have 2 or more traffic classes */
|
||||
if (netdev_get_num_tc(netdev) > 1)
|
||||
break;
|
||||
|
||||
/* We cannot enable ATR if RSS is disabled */
|
||||
if (adapter->ring_feature[RING_F_RSS].limit <= 1)
|
||||
break;
|
||||
|
||||
/* A sample rate of 0 indicates ATR disabled */
|
||||
if (!adapter->atr_sample_rate)
|
||||
break;
|
||||
|
||||
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
|
||||
break;
|
||||
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
|
||||
/* We cannot enable ATR if we have 2 or more tcs */
|
||||
(netdev_get_num_tc(netdev) > 1) ||
|
||||
/* We cannot enable ATR if RSS is disabled */
|
||||
(adapter->ring_feature[RING_F_RSS].limit <= 1) ||
|
||||
/* A sample rate of 0 indicates ATR disabled */
|
||||
(!adapter->atr_sample_rate))
|
||||
; /* do nothing not supported */
|
||||
else /* otherwise supported and set the flag */
|
||||
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
|
||||
}
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
@ -8657,9 +8876,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
|
||||
.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
|
||||
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
|
||||
.ndo_get_stats64 = ixgbe_get_stats64,
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
.ndo_setup_tc = ixgbe_setup_tc,
|
||||
#endif
|
||||
.ndo_setup_tc = __ixgbe_setup_tc,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ixgbe_netpoll,
|
||||
#endif
|
||||
@ -9030,7 +9247,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
case ixgbe_mac_X550EM_x:
|
||||
netdev->features |= NETIF_F_SCTP_CRC;
|
||||
netdev->hw_features |= NETIF_F_SCTP_CRC |
|
||||
NETIF_F_NTUPLE;
|
||||
NETIF_F_NTUPLE |
|
||||
NETIF_F_HW_TC;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
112
drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
Normal file
112
drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
Normal file
@ -0,0 +1,112 @@
|
||||
/*******************************************************************************
|
||||
*
|
||||
* Intel 10 Gigabit PCI Express Linux drive
|
||||
* Copyright(c) 2013 - 2015 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in
|
||||
* the file called "COPYING".
|
||||
*
|
||||
* Contact Information:
|
||||
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_MODEL_H_
|
||||
#define _IXGBE_MODEL_H_
|
||||
|
||||
#include "ixgbe.h"
|
||||
#include "ixgbe_type.h"
|
||||
|
||||
struct ixgbe_mat_field {
|
||||
unsigned int off;
|
||||
unsigned int mask;
|
||||
int (*val)(struct ixgbe_fdir_filter *input,
|
||||
union ixgbe_atr_input *mask,
|
||||
__u32 val, __u32 m);
|
||||
unsigned int type;
|
||||
};
|
||||
|
||||
static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
|
||||
union ixgbe_atr_input *mask,
|
||||
__u32 val, __u32 m)
|
||||
{
|
||||
input->filter.formatted.src_ip[0] = val;
|
||||
mask->formatted.src_ip[0] = m;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
|
||||
union ixgbe_atr_input *mask,
|
||||
__u32 val, __u32 m)
|
||||
{
|
||||
input->filter.formatted.dst_ip[0] = val;
|
||||
mask->formatted.dst_ip[0] = m;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
|
||||
{ .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip,
|
||||
.type = IXGBE_ATR_FLOW_TYPE_IPV4},
|
||||
{ .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip,
|
||||
.type = IXGBE_ATR_FLOW_TYPE_IPV4},
|
||||
{ .val = NULL } /* terminal node */
|
||||
};
|
||||
|
||||
static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input,
|
||||
union ixgbe_atr_input *mask,
|
||||
__u32 val, __u32 m)
|
||||
{
|
||||
input->filter.formatted.src_port = val & 0xffff;
|
||||
mask->formatted.src_port = m & 0xffff;
|
||||
return 0;
|
||||
};
|
||||
|
||||
static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input,
|
||||
union ixgbe_atr_input *mask,
|
||||
__u32 val, __u32 m)
|
||||
{
|
||||
input->filter.formatted.dst_port = val & 0xffff;
|
||||
mask->formatted.dst_port = m & 0xffff;
|
||||
return 0;
|
||||
};
|
||||
|
||||
static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
|
||||
{.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport,
|
||||
.type = IXGBE_ATR_FLOW_TYPE_TCPV4},
|
||||
{.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport,
|
||||
.type = IXGBE_ATR_FLOW_TYPE_TCPV4},
|
||||
{ .val = NULL } /* terminal node */
|
||||
};
|
||||
|
||||
struct ixgbe_nexthdr {
|
||||
/* offset, shift, and mask of position to next header */
|
||||
unsigned int o;
|
||||
__u32 s;
|
||||
__u32 m;
|
||||
/* match criteria to make this jump*/
|
||||
unsigned int off;
|
||||
__u32 val;
|
||||
__u32 mask;
|
||||
/* location of jump to make */
|
||||
struct ixgbe_mat_field *jump;
|
||||
};
|
||||
|
||||
static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = {
|
||||
{ .o = 0, .s = 6, .m = 0xf,
|
||||
.off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields},
|
||||
{ .jump = NULL } /* terminal node */
|
||||
};
|
||||
#endif /* _IXGBE_MODEL_H_ */
|
@ -69,6 +69,15 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc)
|
||||
{
|
||||
if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
return mlx4_en_setup_tc(dev, tc->tc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
|
||||
struct mlx4_en_filter {
|
||||
@ -2466,7 +2475,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
|
||||
#endif
|
||||
.ndo_set_features = mlx4_en_set_features,
|
||||
.ndo_fix_features = mlx4_en_fix_features,
|
||||
.ndo_setup_tc = mlx4_en_setup_tc,
|
||||
.ndo_setup_tc = __mlx4_en_setup_tc,
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
|
||||
#endif
|
||||
@ -2504,7 +2513,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
|
||||
#endif
|
||||
.ndo_set_features = mlx4_en_set_features,
|
||||
.ndo_fix_features = mlx4_en_fix_features,
|
||||
.ndo_setup_tc = mlx4_en_setup_tc,
|
||||
.ndo_setup_tc = __mlx4_en_setup_tc,
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
|
||||
#endif
|
||||
|
@ -32,7 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *net_dev);
|
||||
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
|
||||
int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
|
||||
int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc);
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
|
||||
extern unsigned int efx_piobuf_size;
|
||||
extern bool efx_separate_tx_channels;
|
||||
|
@ -562,14 +562,20 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
|
||||
efx->n_tx_channels : 0));
|
||||
}
|
||||
|
||||
int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
|
||||
int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *ntc)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_channel *channel;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
unsigned tc;
|
||||
unsigned tc, num_tc;
|
||||
int rc;
|
||||
|
||||
if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
num_tc = ntc->tc;
|
||||
|
||||
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1835,22 +1835,26 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
|
||||
static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev tc)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* setup tc must be called under rtnl lock */
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
/* Sanity-check the number of traffic classes requested */
|
||||
if ((dev->real_num_tx_queues <= 1) ||
|
||||
(dev->real_num_tx_queues < num_tc))
|
||||
(dev->real_num_tx_queues < tc->tc))
|
||||
return -EINVAL;
|
||||
|
||||
/* Configure traffic class to queue mappings */
|
||||
if (num_tc) {
|
||||
netdev_set_num_tc(dev, num_tc);
|
||||
for (i = 0; i < num_tc; i++)
|
||||
if (tc->tc) {
|
||||
netdev_set_num_tc(dev, tc->tc);
|
||||
for (i = 0; i < tc->tc; i++)
|
||||
netdev_set_tc_queue(dev, i, 1, i);
|
||||
} else {
|
||||
netdev_reset_tc(dev);
|
||||
|
@ -67,6 +67,8 @@ enum {
|
||||
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
|
||||
NETIF_F_BUSY_POLL_BIT, /* Busy poll */
|
||||
|
||||
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
|
||||
|
||||
/*
|
||||
* Add your fresh new feature above and remember to update
|
||||
* netdev_features_strings[] in net/core/ethtool.c and maybe
|
||||
@ -124,6 +126,7 @@ enum {
|
||||
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
|
||||
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
|
||||
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
|
||||
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
|
||||
|
||||
#define for_each_netdev_feature(mask_addr, bit) \
|
||||
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include <linux/neighbour.h>
|
||||
#include <uapi/linux/netdevice.h>
|
||||
#include <uapi/linux/if_bonding.h>
|
||||
#include <uapi/linux/pkt_cls.h>
|
||||
|
||||
struct netpoll_info;
|
||||
struct device;
|
||||
@ -778,6 +779,25 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
|
||||
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
|
||||
/* These structures hold the attributes of qdisc and classifiers
|
||||
* that are being passed to the netdevice through the setup_tc op.
|
||||
*/
|
||||
enum {
|
||||
TC_SETUP_MQPRIO,
|
||||
TC_SETUP_CLSU32,
|
||||
};
|
||||
|
||||
struct tc_cls_u32_offload;
|
||||
|
||||
struct tc_to_netdev {
|
||||
unsigned int type;
|
||||
union {
|
||||
u8 tc;
|
||||
struct tc_cls_u32_offload *cls_u32;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* This structure defines the management hooks for network devices.
|
||||
* The following hooks can be defined; unless noted otherwise, they are
|
||||
@ -1150,7 +1170,10 @@ struct net_device_ops {
|
||||
int (*ndo_set_vf_rss_query_en)(
|
||||
struct net_device *dev,
|
||||
int vf, bool setting);
|
||||
int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
|
||||
int (*ndo_setup_tc)(struct net_device *dev,
|
||||
u32 handle,
|
||||
__be16 protocol,
|
||||
struct tc_to_netdev *tc);
|
||||
#if IS_ENABLED(CONFIG_FCOE)
|
||||
int (*ndo_fcoe_enable)(struct net_device *dev);
|
||||
int (*ndo_fcoe_disable)(struct net_device *dev);
|
||||
|
@ -358,4 +358,38 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
|
||||
}
|
||||
#endif /* CONFIG_NET_CLS_IND */
|
||||
|
||||
struct tc_cls_u32_knode {
|
||||
struct tcf_exts *exts;
|
||||
u8 fshift;
|
||||
u32 handle;
|
||||
u32 val;
|
||||
u32 mask;
|
||||
u32 link_handle;
|
||||
struct tc_u32_sel *sel;
|
||||
};
|
||||
|
||||
struct tc_cls_u32_hnode {
|
||||
u32 handle;
|
||||
u32 prio;
|
||||
unsigned int divisor;
|
||||
};
|
||||
|
||||
enum tc_clsu32_command {
|
||||
TC_CLSU32_NEW_KNODE,
|
||||
TC_CLSU32_REPLACE_KNODE,
|
||||
TC_CLSU32_DELETE_KNODE,
|
||||
TC_CLSU32_NEW_HNODE,
|
||||
TC_CLSU32_REPLACE_HNODE,
|
||||
TC_CLSU32_DELETE_HNODE,
|
||||
};
|
||||
|
||||
struct tc_cls_u32_offload {
|
||||
/* knode values */
|
||||
enum tc_clsu32_command command;
|
||||
union {
|
||||
struct tc_cls_u32_knode knode;
|
||||
struct tc_cls_u32_hnode hnode;
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define __NET_TC_GACT_H
|
||||
|
||||
#include <net/act_api.h>
|
||||
#include <linux/tc_act/tc_gact.h>
|
||||
|
||||
struct tcf_gact {
|
||||
struct tcf_common common;
|
||||
@ -15,4 +16,19 @@ struct tcf_gact {
|
||||
#define to_gact(a) \
|
||||
container_of(a->priv, struct tcf_gact, common)
|
||||
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
static inline bool is_tcf_gact_shot(const struct tc_action *a)
|
||||
{
|
||||
struct tcf_gact *gact;
|
||||
|
||||
if (a->ops && a->ops->type != TCA_ACT_GACT)
|
||||
return false;
|
||||
|
||||
gact = a->priv;
|
||||
if (gact->tcf_action == TC_ACT_SHOT)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
#endif /* __NET_TC_GACT_H */
|
||||
|
@ -98,6 +98,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
|
||||
[NETIF_F_RXALL_BIT] = "rx-all",
|
||||
[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
|
||||
[NETIF_F_BUSY_POLL_BIT] = "busy-poll",
|
||||
[NETIF_F_HW_TC_BIT] = "hw-tc-offload",
|
||||
};
|
||||
|
||||
static const char
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include <net/netlink.h>
|
||||
#include <net/act_api.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
struct tc_u_knode {
|
||||
struct tc_u_knode __rcu *next;
|
||||
@ -424,6 +425,93 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct tc_cls_u32_offload u32_offload = {0};
|
||||
struct tc_to_netdev offload;
|
||||
|
||||
offload.type = TC_SETUP_CLSU32;
|
||||
offload.cls_u32 = &u32_offload;
|
||||
|
||||
if (dev->netdev_ops->ndo_setup_tc) {
|
||||
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
|
||||
offload.cls_u32->knode.handle = handle;
|
||||
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
tp->protocol, &offload);
|
||||
}
|
||||
}
|
||||
|
||||
static void u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct tc_cls_u32_offload u32_offload = {0};
|
||||
struct tc_to_netdev offload;
|
||||
|
||||
offload.type = TC_SETUP_CLSU32;
|
||||
offload.cls_u32 = &u32_offload;
|
||||
|
||||
if (dev->netdev_ops->ndo_setup_tc) {
|
||||
offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
|
||||
offload.cls_u32->hnode.divisor = h->divisor;
|
||||
offload.cls_u32->hnode.handle = h->handle;
|
||||
offload.cls_u32->hnode.prio = h->prio;
|
||||
|
||||
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
tp->protocol, &offload);
|
||||
}
|
||||
}
|
||||
|
||||
static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct tc_cls_u32_offload u32_offload = {0};
|
||||
struct tc_to_netdev offload;
|
||||
|
||||
offload.type = TC_SETUP_CLSU32;
|
||||
offload.cls_u32 = &u32_offload;
|
||||
|
||||
if (dev->netdev_ops->ndo_setup_tc) {
|
||||
offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
|
||||
offload.cls_u32->hnode.divisor = h->divisor;
|
||||
offload.cls_u32->hnode.handle = h->handle;
|
||||
offload.cls_u32->hnode.prio = h->prio;
|
||||
|
||||
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
tp->protocol, &offload);
|
||||
}
|
||||
}
|
||||
|
||||
static void u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct tc_cls_u32_offload u32_offload = {0};
|
||||
struct tc_to_netdev offload;
|
||||
|
||||
offload.type = TC_SETUP_CLSU32;
|
||||
offload.cls_u32 = &u32_offload;
|
||||
|
||||
if (dev->netdev_ops->ndo_setup_tc) {
|
||||
offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
|
||||
offload.cls_u32->knode.handle = n->handle;
|
||||
offload.cls_u32->knode.fshift = n->fshift;
|
||||
#ifdef CONFIG_CLS_U32_MARK
|
||||
offload.cls_u32->knode.val = n->val;
|
||||
offload.cls_u32->knode.mask = n->mask;
|
||||
#else
|
||||
offload.cls_u32->knode.val = 0;
|
||||
offload.cls_u32->knode.mask = 0;
|
||||
#endif
|
||||
offload.cls_u32->knode.sel = &n->sel;
|
||||
offload.cls_u32->knode.exts = &n->exts;
|
||||
if (n->ht_down)
|
||||
offload.cls_u32->knode.link_handle = n->ht_down->handle;
|
||||
|
||||
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
tp->protocol, &offload);
|
||||
}
|
||||
}
|
||||
|
||||
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
|
||||
{
|
||||
struct tc_u_knode *n;
|
||||
@ -434,6 +522,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
|
||||
RCU_INIT_POINTER(ht->ht[h],
|
||||
rtnl_dereference(n->next));
|
||||
tcf_unbind_filter(tp, &n->res);
|
||||
u32_remove_hw_knode(tp, n->handle);
|
||||
call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
|
||||
}
|
||||
}
|
||||
@ -454,6 +543,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
|
||||
phn;
|
||||
hn = &phn->next, phn = rtnl_dereference(*hn)) {
|
||||
if (phn == ht) {
|
||||
u32_clear_hw_hnode(tp, ht);
|
||||
RCU_INIT_POINTER(*hn, ht->next);
|
||||
kfree_rcu(ht, rcu);
|
||||
return 0;
|
||||
@ -540,8 +630,10 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
|
||||
if (ht == NULL)
|
||||
return 0;
|
||||
|
||||
if (TC_U32_KEY(ht->handle))
|
||||
if (TC_U32_KEY(ht->handle)) {
|
||||
u32_remove_hw_knode(tp, ht->handle);
|
||||
return u32_delete_key(tp, (struct tc_u_knode *)ht);
|
||||
}
|
||||
|
||||
if (root_ht == ht)
|
||||
return -EINVAL;
|
||||
@ -769,6 +861,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
||||
u32_replace_knode(tp, tp_c, new);
|
||||
tcf_unbind_filter(tp, &n->res);
|
||||
call_rcu(&n->rcu, u32_delete_key_rcu);
|
||||
u32_replace_hw_knode(tp, new);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -795,6 +888,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
||||
RCU_INIT_POINTER(ht->next, tp_c->hlist);
|
||||
rcu_assign_pointer(tp_c->hlist, ht);
|
||||
*arg = (unsigned long)ht;
|
||||
|
||||
u32_replace_hw_hnode(tp, ht);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -877,7 +972,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
||||
|
||||
RCU_INIT_POINTER(n->next, pins);
|
||||
rcu_assign_pointer(*ins, n);
|
||||
|
||||
u32_replace_hw_knode(tp, n);
|
||||
*arg = (unsigned long)n;
|
||||
return 0;
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ static void mqprio_destroy(struct Qdisc *sch)
|
||||
{
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct mqprio_sched *priv = qdisc_priv(sch);
|
||||
struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO};
|
||||
unsigned int ntx;
|
||||
|
||||
if (priv->qdiscs) {
|
||||
@ -39,7 +40,7 @@ static void mqprio_destroy(struct Qdisc *sch)
|
||||
}
|
||||
|
||||
if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
|
||||
dev->netdev_ops->ndo_setup_tc(dev, 0);
|
||||
dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
|
||||
else
|
||||
netdev_set_num_tc(dev, 0);
|
||||
}
|
||||
@ -140,8 +141,11 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
|
||||
* supplied and verified mapping
|
||||
*/
|
||||
if (qopt->hw) {
|
||||
struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO,
|
||||
.tc = qopt->num_tc};
|
||||
|
||||
priv->hw_owned = 1;
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
|
||||
if (err)
|
||||
goto err;
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user