mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 03:50:53 +07:00
net: core: rename indirect block ingress cb function
With indirect blocks, a driver can register for callbacks from a device
that is does not 'own', for example, a tunnel device. When registering to
or unregistering from a new device, a callback is triggered to generate
a bind/unbind event. This, in turn, allows the driver to receive any
existing rules or to properly clean up installed rules.
When first added, it was assumed that all indirect block registrations
would be for ingress offloads. However, the NFP driver can, in some
instances, support clsact qdisc binds for egress offload.
Change the name of the indirect block callback command in flow_offload to
remove the 'ingress' identifier from it. While this does not change
functionality, a follow up patch will implement a more more generic
callback than just those currently just supporting ingress offload.
Fixes: 4d12ba4278
("nfp: flower: allow offloading of matches on 'internal' ports")
Signed-off-by: John Hurley <john.hurley@netronome.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e0b60903b4
commit
dbad340889
@ -380,19 +380,18 @@ static inline void flow_block_init(struct flow_block *flow_block)
|
||||
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
|
||||
enum tc_setup_type type, void *type_data);
|
||||
|
||||
typedef void flow_indr_block_ing_cmd_t(struct net_device *dev,
|
||||
flow_indr_block_bind_cb_t *cb,
|
||||
void *cb_priv,
|
||||
enum flow_block_command command);
|
||||
typedef void flow_indr_block_cmd_t(struct net_device *dev,
|
||||
flow_indr_block_bind_cb_t *cb, void *cb_priv,
|
||||
enum flow_block_command command);
|
||||
|
||||
struct flow_indr_block_ing_entry {
|
||||
flow_indr_block_ing_cmd_t *cb;
|
||||
struct flow_indr_block_entry {
|
||||
flow_indr_block_cmd_t *cb;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry);
|
||||
void flow_indr_add_block_cb(struct flow_indr_block_entry *entry);
|
||||
|
||||
void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry);
|
||||
void flow_indr_del_block_cb(struct flow_indr_block_entry *entry);
|
||||
|
||||
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
flow_indr_block_bind_cb_t *cb,
|
||||
|
@ -283,7 +283,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_setup_simple);
|
||||
|
||||
static LIST_HEAD(block_ing_cb_list);
|
||||
static LIST_HEAD(block_cb_list);
|
||||
|
||||
static struct rhashtable indr_setup_block_ht;
|
||||
|
||||
@ -391,20 +391,19 @@ static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
|
||||
kfree(indr_block_cb);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(flow_indr_block_ing_cb_lock);
|
||||
static DEFINE_MUTEX(flow_indr_block_cb_lock);
|
||||
|
||||
static void flow_block_ing_cmd(struct net_device *dev,
|
||||
flow_indr_block_bind_cb_t *cb,
|
||||
void *cb_priv,
|
||||
enum flow_block_command command)
|
||||
static void flow_block_cmd(struct net_device *dev,
|
||||
flow_indr_block_bind_cb_t *cb, void *cb_priv,
|
||||
enum flow_block_command command)
|
||||
{
|
||||
struct flow_indr_block_ing_entry *entry;
|
||||
struct flow_indr_block_entry *entry;
|
||||
|
||||
mutex_lock(&flow_indr_block_ing_cb_lock);
|
||||
list_for_each_entry(entry, &block_ing_cb_list, list) {
|
||||
mutex_lock(&flow_indr_block_cb_lock);
|
||||
list_for_each_entry(entry, &block_cb_list, list) {
|
||||
entry->cb(dev, cb, cb_priv, command);
|
||||
}
|
||||
mutex_unlock(&flow_indr_block_ing_cb_lock);
|
||||
mutex_unlock(&flow_indr_block_cb_lock);
|
||||
}
|
||||
|
||||
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
@ -424,8 +423,8 @@ int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
if (err)
|
||||
goto err_dev_put;
|
||||
|
||||
flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
|
||||
FLOW_BLOCK_BIND);
|
||||
flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
|
||||
FLOW_BLOCK_BIND);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -464,8 +463,8 @@ void __flow_indr_block_cb_unregister(struct net_device *dev,
|
||||
if (!indr_block_cb)
|
||||
return;
|
||||
|
||||
flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
|
||||
FLOW_BLOCK_UNBIND);
|
||||
flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
|
||||
FLOW_BLOCK_UNBIND);
|
||||
|
||||
flow_indr_block_cb_del(indr_block_cb);
|
||||
flow_indr_block_dev_put(indr_dev);
|
||||
@ -499,21 +498,21 @@ void flow_indr_block_call(struct net_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flow_indr_block_call);
|
||||
|
||||
void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry)
|
||||
void flow_indr_add_block_cb(struct flow_indr_block_entry *entry)
|
||||
{
|
||||
mutex_lock(&flow_indr_block_ing_cb_lock);
|
||||
list_add_tail(&entry->list, &block_ing_cb_list);
|
||||
mutex_unlock(&flow_indr_block_ing_cb_lock);
|
||||
mutex_lock(&flow_indr_block_cb_lock);
|
||||
list_add_tail(&entry->list, &block_cb_list);
|
||||
mutex_unlock(&flow_indr_block_cb_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb);
|
||||
EXPORT_SYMBOL_GPL(flow_indr_add_block_cb);
|
||||
|
||||
void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry)
|
||||
void flow_indr_del_block_cb(struct flow_indr_block_entry *entry)
|
||||
{
|
||||
mutex_lock(&flow_indr_block_ing_cb_lock);
|
||||
mutex_lock(&flow_indr_block_cb_lock);
|
||||
list_del(&entry->list);
|
||||
mutex_unlock(&flow_indr_block_ing_cb_lock);
|
||||
mutex_unlock(&flow_indr_block_cb_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb);
|
||||
EXPORT_SYMBOL_GPL(flow_indr_del_block_cb);
|
||||
|
||||
static int __init init_flow_indr_rhashtable(void)
|
||||
{
|
||||
|
@ -588,7 +588,7 @@ static int nft_offload_netdev_event(struct notifier_block *this,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct flow_indr_block_ing_entry block_ing_entry = {
|
||||
static struct flow_indr_block_entry block_ing_entry = {
|
||||
.cb = nft_indr_block_cb,
|
||||
.list = LIST_HEAD_INIT(block_ing_entry.list),
|
||||
};
|
||||
@ -605,13 +605,13 @@ int nft_offload_init(void)
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
flow_indr_add_block_ing_cb(&block_ing_entry);
|
||||
flow_indr_add_block_cb(&block_ing_entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nft_offload_exit(void)
|
||||
{
|
||||
flow_indr_del_block_ing_cb(&block_ing_entry);
|
||||
flow_indr_del_block_cb(&block_ing_entry);
|
||||
unregister_netdevice_notifier(&nft_offload_netdev_notifier);
|
||||
}
|
||||
|
@ -3626,7 +3626,7 @@ static struct pernet_operations tcf_net_ops = {
|
||||
.size = sizeof(struct tcf_net),
|
||||
};
|
||||
|
||||
static struct flow_indr_block_ing_entry block_ing_entry = {
|
||||
static struct flow_indr_block_entry block_ing_entry = {
|
||||
.cb = tc_indr_block_get_and_ing_cmd,
|
||||
.list = LIST_HEAD_INIT(block_ing_entry.list),
|
||||
};
|
||||
@ -3643,7 +3643,7 @@ static int __init tc_filter_init(void)
|
||||
if (err)
|
||||
goto err_register_pernet_subsys;
|
||||
|
||||
flow_indr_add_block_ing_cb(&block_ing_entry);
|
||||
flow_indr_add_block_cb(&block_ing_entry);
|
||||
|
||||
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
|
||||
RTNL_FLAG_DOIT_UNLOCKED);
|
||||
|
Loading…
Reference in New Issue
Block a user