2019-05-27 13:55:01 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2014-11-28 20:34:17 +07:00
|
|
|
/*
|
|
|
|
* net/switchdev/switchdev.c - Switch device API
|
2015-09-24 15:02:41 +07:00
|
|
|
* Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
|
2015-03-10 03:59:09 +07:00
|
|
|
* Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
|
2014-11-28 20:34:17 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/init.h>
|
2015-01-16 05:49:36 +07:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/notifier.h>
|
2014-11-28 20:34:17 +07:00
|
|
|
#include <linux/netdevice.h>
|
2015-10-15 00:40:51 +07:00
|
|
|
#include <linux/etherdevice.h>
|
2015-05-10 23:47:56 +07:00
|
|
|
#include <linux/if_bridge.h>
|
2015-09-24 15:02:41 +07:00
|
|
|
#include <linux/list.h>
|
2015-10-15 00:40:48 +07:00
|
|
|
#include <linux/workqueue.h>
|
2015-10-12 19:31:01 +07:00
|
|
|
#include <linux/if_vlan.h>
|
2016-01-27 21:16:43 +07:00
|
|
|
#include <linux/rtnetlink.h>
|
2014-11-28 20:34:17 +07:00
|
|
|
#include <net/switchdev.h>
|
|
|
|
|
2015-10-15 00:40:48 +07:00
|
|
|
static LIST_HEAD(deferred);
|
|
|
|
static DEFINE_SPINLOCK(deferred_lock);
|
|
|
|
|
|
|
|
typedef void switchdev_deferred_func_t(struct net_device *dev,
|
|
|
|
const void *data);
|
|
|
|
|
|
|
|
struct switchdev_deferred_item {
|
|
|
|
struct list_head list;
|
|
|
|
struct net_device *dev;
|
|
|
|
switchdev_deferred_func_t *func;
|
2020-02-18 03:02:36 +07:00
|
|
|
unsigned long data[];
|
2015-10-15 00:40:48 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
|
|
|
|
{
|
|
|
|
struct switchdev_deferred_item *dfitem;
|
|
|
|
|
|
|
|
spin_lock_bh(&deferred_lock);
|
|
|
|
if (list_empty(&deferred)) {
|
|
|
|
dfitem = NULL;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
dfitem = list_first_entry(&deferred,
|
|
|
|
struct switchdev_deferred_item, list);
|
|
|
|
list_del(&dfitem->list);
|
|
|
|
unlock:
|
|
|
|
spin_unlock_bh(&deferred_lock);
|
|
|
|
return dfitem;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* switchdev_deferred_process - Process ops in deferred queue
|
|
|
|
*
|
|
|
|
* Called to flush the ops currently queued in deferred ops queue.
|
|
|
|
* rtnl_lock must be held.
|
|
|
|
*/
|
|
|
|
void switchdev_deferred_process(void)
|
|
|
|
{
|
|
|
|
struct switchdev_deferred_item *dfitem;
|
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
while ((dfitem = switchdev_deferred_dequeue())) {
|
|
|
|
dfitem->func(dfitem->dev, dfitem->data);
|
|
|
|
dev_put(dfitem->dev);
|
|
|
|
kfree(dfitem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(switchdev_deferred_process);
|
|
|
|
|
|
|
|
static void switchdev_deferred_process_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
rtnl_lock();
|
|
|
|
switchdev_deferred_process();
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
|
|
|
|
|
|
|
|
static int switchdev_deferred_enqueue(struct net_device *dev,
|
|
|
|
const void *data, size_t data_len,
|
|
|
|
switchdev_deferred_func_t *func)
|
|
|
|
{
|
|
|
|
struct switchdev_deferred_item *dfitem;
|
|
|
|
|
|
|
|
dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
|
|
|
|
if (!dfitem)
|
|
|
|
return -ENOMEM;
|
|
|
|
dfitem->dev = dev;
|
|
|
|
dfitem->func = func;
|
|
|
|
memcpy(dfitem->data, data, data_len);
|
|
|
|
dev_hold(dev);
|
|
|
|
spin_lock_bh(&deferred_lock);
|
|
|
|
list_add_tail(&dfitem->list, &deferred);
|
|
|
|
spin_unlock_bh(&deferred_lock);
|
|
|
|
schedule_work(&deferred_process_work);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-28 02:44:31 +07:00
|
|
|
static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
|
|
|
|
struct net_device *dev,
|
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct switchdev_trans *trans)
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
{
|
2019-02-28 02:44:31 +07:00
|
|
|
int err;
|
|
|
|
int rc;
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
|
2019-02-28 02:44:31 +07:00
|
|
|
struct switchdev_notifier_port_attr_info attr_info = {
|
|
|
|
.attr = attr,
|
|
|
|
.trans = trans,
|
|
|
|
.handled = false,
|
|
|
|
};
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
|
2019-02-28 02:44:31 +07:00
|
|
|
rc = call_switchdev_blocking_notifiers(nt, dev,
|
|
|
|
&attr_info.info, NULL);
|
|
|
|
err = notifier_to_errno(rc);
|
|
|
|
if (err) {
|
|
|
|
WARN_ON(!attr_info.handled);
|
|
|
|
return err;
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
}
|
|
|
|
|
2019-02-28 02:44:31 +07:00
|
|
|
if (!attr_info.handled)
|
|
|
|
return -EOPNOTSUPP;
|
2015-10-09 09:23:18 +07:00
|
|
|
|
2019-02-28 02:44:31 +07:00
|
|
|
return 0;
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
}
|
|
|
|
|
2015-10-15 00:40:50 +07:00
|
|
|
static int switchdev_port_attr_set_now(struct net_device *dev,
|
|
|
|
const struct switchdev_attr *attr)
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
{
|
2015-09-24 15:02:41 +07:00
|
|
|
struct switchdev_trans trans;
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Phase I: prepare for attr set. Driver/device should fail
|
|
|
|
* here if there are going to be issues in the commit phase,
|
|
|
|
* such as lack of resources or support. The driver/device
|
|
|
|
* should reserve resources needed for the commit phase here,
|
|
|
|
* but should not commit the attr.
|
|
|
|
*/
|
|
|
|
|
2015-09-24 15:02:49 +07:00
|
|
|
trans.ph_prepare = true;
|
2019-02-28 02:44:31 +07:00
|
|
|
err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
|
|
|
|
&trans);
|
2019-02-28 07:29:16 +07:00
|
|
|
if (err)
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
return err;
|
|
|
|
|
|
|
|
/* Phase II: commit attr set. This cannot fail as a fault
|
|
|
|
* of driver/device. If it does, it's a bug in the driver/device
|
|
|
|
* because the driver said everythings was OK in phase I.
|
|
|
|
*/
|
|
|
|
|
2015-09-24 15:02:49 +07:00
|
|
|
trans.ph_prepare = false;
|
2019-02-28 02:44:31 +07:00
|
|
|
err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
|
|
|
|
&trans);
|
2015-06-12 01:20:42 +07:00
|
|
|
WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
|
|
|
|
dev->name, attr->id);
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2015-10-15 00:40:50 +07:00
|
|
|
|
|
|
|
static void switchdev_port_attr_set_deferred(struct net_device *dev,
|
|
|
|
const void *data)
|
|
|
|
{
|
|
|
|
const struct switchdev_attr *attr = data;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = switchdev_port_attr_set_now(dev, attr);
|
|
|
|
if (err && err != -EOPNOTSUPP)
|
|
|
|
netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
|
|
|
|
err, attr->id);
|
2016-04-21 17:52:43 +07:00
|
|
|
if (attr->complete)
|
|
|
|
attr->complete(dev, err, attr->complete_priv);
|
2015-10-15 00:40:50 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int switchdev_port_attr_set_defer(struct net_device *dev,
|
|
|
|
const struct switchdev_attr *attr)
|
|
|
|
{
|
|
|
|
return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
|
|
|
|
switchdev_port_attr_set_deferred);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* switchdev_port_attr_set - Set port attribute
|
|
|
|
*
|
|
|
|
* @dev: port device
|
|
|
|
* @attr: attribute to set
|
|
|
|
*
|
|
|
|
* Use a 2-phase prepare-commit transaction model to ensure
|
|
|
|
* system is not left in a partially updated state due to
|
|
|
|
* failure from driver/device.
|
|
|
|
*
|
|
|
|
* rtnl_lock must be held and must not be in atomic section,
|
|
|
|
* in case SWITCHDEV_F_DEFER flag is not set.
|
|
|
|
*/
|
|
|
|
int switchdev_port_attr_set(struct net_device *dev,
|
|
|
|
const struct switchdev_attr *attr)
|
|
|
|
{
|
|
|
|
if (attr->flags & SWITCHDEV_F_DEFER)
|
|
|
|
return switchdev_port_attr_set_defer(dev, attr);
|
|
|
|
ASSERT_RTNL();
|
|
|
|
return switchdev_port_attr_set_now(dev, attr);
|
|
|
|
}
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 23:47:48 +07:00
|
|
|
EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
|
|
|
|
|
2015-10-29 13:17:31 +07:00
|
|
|
static size_t switchdev_obj_size(const struct switchdev_obj *obj)
|
|
|
|
{
|
|
|
|
switch (obj->id) {
|
|
|
|
case SWITCHDEV_OBJ_ID_PORT_VLAN:
|
|
|
|
return sizeof(struct switchdev_obj_port_vlan);
|
2016-01-11 03:06:22 +07:00
|
|
|
case SWITCHDEV_OBJ_ID_PORT_MDB:
|
|
|
|
return sizeof(struct switchdev_obj_port_mdb);
|
2017-11-10 05:10:59 +07:00
|
|
|
case SWITCHDEV_OBJ_ID_HOST_MDB:
|
|
|
|
return sizeof(struct switchdev_obj_port_mdb);
|
2015-10-29 13:17:31 +07:00
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-23 06:32:57 +07:00
|
|
|
static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
|
|
|
|
struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj,
|
2018-12-13 00:02:52 +07:00
|
|
|
struct switchdev_trans *trans,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-05-10 23:47:52 +07:00
|
|
|
{
|
2018-11-23 06:32:57 +07:00
|
|
|
int rc;
|
|
|
|
int err;
|
2015-05-10 23:47:52 +07:00
|
|
|
|
2018-11-23 06:32:57 +07:00
|
|
|
struct switchdev_notifier_port_obj_info obj_info = {
|
|
|
|
.obj = obj,
|
|
|
|
.trans = trans,
|
|
|
|
.handled = false,
|
|
|
|
};
|
2015-05-10 23:47:52 +07:00
|
|
|
|
2018-12-13 00:02:54 +07:00
|
|
|
rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
|
2018-11-23 06:32:57 +07:00
|
|
|
err = notifier_to_errno(rc);
|
|
|
|
if (err) {
|
|
|
|
WARN_ON(!obj_info.handled);
|
|
|
|
return err;
|
2015-05-10 23:47:52 +07:00
|
|
|
}
|
2018-11-23 06:32:57 +07:00
|
|
|
if (!obj_info.handled)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
return 0;
|
2015-05-10 23:47:52 +07:00
|
|
|
}
|
|
|
|
|
2015-10-15 00:40:52 +07:00
|
|
|
static int switchdev_port_obj_add_now(struct net_device *dev,
|
2018-12-13 00:02:52 +07:00
|
|
|
const struct switchdev_obj *obj,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-05-10 23:47:52 +07:00
|
|
|
{
|
2015-09-24 15:02:41 +07:00
|
|
|
struct switchdev_trans trans;
|
2015-05-10 23:47:52 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
/* Phase I: prepare for obj add. Driver/device should fail
|
|
|
|
* here if there are going to be issues in the commit phase,
|
|
|
|
* such as lack of resources or support. The driver/device
|
|
|
|
* should reserve resources needed for the commit phase here,
|
|
|
|
* but should not commit the obj.
|
|
|
|
*/
|
|
|
|
|
2015-09-24 15:02:49 +07:00
|
|
|
trans.ph_prepare = true;
|
2018-11-23 06:32:57 +07:00
|
|
|
err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
|
2018-12-13 00:02:52 +07:00
|
|
|
dev, obj, &trans, extack);
|
2019-02-28 07:29:16 +07:00
|
|
|
if (err)
|
2015-05-10 23:47:52 +07:00
|
|
|
return err;
|
|
|
|
|
|
|
|
/* Phase II: commit obj add. This cannot fail as a fault
|
|
|
|
* of driver/device. If it does, it's a bug in the driver/device
|
|
|
|
* because the driver said everythings was OK in phase I.
|
|
|
|
*/
|
|
|
|
|
2015-09-24 15:02:49 +07:00
|
|
|
trans.ph_prepare = false;
|
2018-11-23 06:32:57 +07:00
|
|
|
err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
|
2018-12-13 00:02:52 +07:00
|
|
|
dev, obj, &trans, extack);
|
2015-10-01 16:03:46 +07:00
|
|
|
WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
|
2015-05-10 23:47:52 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2015-10-15 00:40:52 +07:00
|
|
|
|
|
|
|
static void switchdev_port_obj_add_deferred(struct net_device *dev,
|
|
|
|
const void *data)
|
|
|
|
{
|
|
|
|
const struct switchdev_obj *obj = data;
|
|
|
|
int err;
|
|
|
|
|
2018-12-13 00:02:52 +07:00
|
|
|
err = switchdev_port_obj_add_now(dev, obj, NULL);
|
2015-10-15 00:40:52 +07:00
|
|
|
if (err && err != -EOPNOTSUPP)
|
|
|
|
netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
|
|
|
|
err, obj->id);
|
2016-04-21 17:52:43 +07:00
|
|
|
if (obj->complete)
|
|
|
|
obj->complete(dev, err, obj->complete_priv);
|
2015-10-15 00:40:52 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int switchdev_port_obj_add_defer(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj)
|
|
|
|
{
|
2015-10-29 13:17:31 +07:00
|
|
|
return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
|
2015-10-15 00:40:52 +07:00
|
|
|
switchdev_port_obj_add_deferred);
|
|
|
|
}
|
2015-05-10 23:47:52 +07:00
|
|
|
|
|
|
|
/**
|
2015-10-15 00:40:52 +07:00
|
|
|
* switchdev_port_obj_add - Add port object
|
2015-05-10 23:47:52 +07:00
|
|
|
*
|
|
|
|
* @dev: port device
|
2015-10-15 00:40:52 +07:00
|
|
|
* @obj: object to add
|
2020-07-13 06:15:13 +07:00
|
|
|
* @extack: netlink extended ack
|
2015-10-15 00:40:52 +07:00
|
|
|
*
|
|
|
|
* Use a 2-phase prepare-commit transaction model to ensure
|
|
|
|
* system is not left in a partially updated state due to
|
|
|
|
* failure from driver/device.
|
|
|
|
*
|
|
|
|
* rtnl_lock must be held and must not be in atomic section,
|
|
|
|
* in case SWITCHDEV_F_DEFER flag is not set.
|
2015-05-10 23:47:52 +07:00
|
|
|
*/
|
2015-10-15 00:40:52 +07:00
|
|
|
int switchdev_port_obj_add(struct net_device *dev,
|
2018-12-13 00:02:52 +07:00
|
|
|
const struct switchdev_obj *obj,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-10-15 00:40:52 +07:00
|
|
|
{
|
|
|
|
if (obj->flags & SWITCHDEV_F_DEFER)
|
|
|
|
return switchdev_port_obj_add_defer(dev, obj);
|
|
|
|
ASSERT_RTNL();
|
2018-12-13 00:02:52 +07:00
|
|
|
return switchdev_port_obj_add_now(dev, obj, extack);
|
2015-10-15 00:40:52 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
|
|
|
|
|
|
|
|
static int switchdev_port_obj_del_now(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj)
|
2015-05-10 23:47:52 +07:00
|
|
|
{
|
2018-11-23 06:32:57 +07:00
|
|
|
return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
|
2018-12-13 00:02:52 +07:00
|
|
|
dev, obj, NULL, NULL);
|
2015-05-10 23:47:52 +07:00
|
|
|
}
|
2015-10-15 00:40:52 +07:00
|
|
|
|
|
|
|
static void switchdev_port_obj_del_deferred(struct net_device *dev,
|
|
|
|
const void *data)
|
|
|
|
{
|
|
|
|
const struct switchdev_obj *obj = data;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = switchdev_port_obj_del_now(dev, obj);
|
|
|
|
if (err && err != -EOPNOTSUPP)
|
|
|
|
netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
|
|
|
|
err, obj->id);
|
2016-04-21 17:52:43 +07:00
|
|
|
if (obj->complete)
|
|
|
|
obj->complete(dev, err, obj->complete_priv);
|
2015-10-15 00:40:52 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int switchdev_port_obj_del_defer(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj)
|
|
|
|
{
|
2015-10-29 13:17:31 +07:00
|
|
|
return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
|
2015-10-15 00:40:52 +07:00
|
|
|
switchdev_port_obj_del_deferred);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* switchdev_port_obj_del - Delete port object
|
|
|
|
*
|
|
|
|
* @dev: port device
|
|
|
|
* @obj: object to delete
|
|
|
|
*
|
|
|
|
* rtnl_lock must be held and must not be in atomic section,
|
|
|
|
* in case SWITCHDEV_F_DEFER flag is not set.
|
|
|
|
*/
|
|
|
|
int switchdev_port_obj_del(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj)
|
|
|
|
{
|
|
|
|
if (obj->flags & SWITCHDEV_F_DEFER)
|
|
|
|
return switchdev_port_obj_del_defer(dev, obj);
|
|
|
|
ASSERT_RTNL();
|
|
|
|
return switchdev_port_obj_del_now(dev, obj);
|
|
|
|
}
|
2015-05-10 23:47:52 +07:00
|
|
|
EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
|
|
|
|
|
2017-06-08 13:44:13 +07:00
|
|
|
static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
|
2018-11-23 06:28:25 +07:00
|
|
|
static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
|
2015-01-16 05:49:36 +07:00
|
|
|
|
|
|
|
/**
|
2015-05-10 23:47:46 +07:00
|
|
|
* register_switchdev_notifier - Register notifier
|
2015-01-16 05:49:36 +07:00
|
|
|
* @nb: notifier_block
|
|
|
|
*
|
2017-06-08 13:44:13 +07:00
|
|
|
* Register switch device notifier.
|
2015-01-16 05:49:36 +07:00
|
|
|
*/
|
2015-05-10 23:47:46 +07:00
|
|
|
int register_switchdev_notifier(struct notifier_block *nb)
|
2015-01-16 05:49:36 +07:00
|
|
|
{
|
2017-06-08 13:44:13 +07:00
|
|
|
return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
|
2015-01-16 05:49:36 +07:00
|
|
|
}
|
2015-05-10 23:47:46 +07:00
|
|
|
EXPORT_SYMBOL_GPL(register_switchdev_notifier);
|
2015-01-16 05:49:36 +07:00
|
|
|
|
|
|
|
/**
|
2015-05-10 23:47:46 +07:00
|
|
|
* unregister_switchdev_notifier - Unregister notifier
|
2015-01-16 05:49:36 +07:00
|
|
|
* @nb: notifier_block
|
|
|
|
*
|
|
|
|
* Unregister switch device notifier.
|
|
|
|
*/
|
2015-05-10 23:47:46 +07:00
|
|
|
int unregister_switchdev_notifier(struct notifier_block *nb)
|
2015-01-16 05:49:36 +07:00
|
|
|
{
|
2017-06-08 13:44:13 +07:00
|
|
|
return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
|
2015-01-16 05:49:36 +07:00
|
|
|
}
|
2015-05-10 23:47:46 +07:00
|
|
|
EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
|
2015-01-16 05:49:36 +07:00
|
|
|
|
|
|
|
/**
|
2015-05-10 23:47:46 +07:00
|
|
|
* call_switchdev_notifiers - Call notifiers
|
2015-01-16 05:49:36 +07:00
|
|
|
* @val: value passed unmodified to notifier function
|
|
|
|
* @dev: port device
|
|
|
|
* @info: notifier information data
|
2020-09-22 20:32:19 +07:00
|
|
|
* @extack: netlink extended ack
|
2017-06-08 13:44:13 +07:00
|
|
|
* Call all network notifier blocks.
|
2015-01-16 05:49:36 +07:00
|
|
|
*/
|
2015-05-10 23:47:46 +07:00
|
|
|
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
|
2019-01-17 06:06:56 +07:00
|
|
|
struct switchdev_notifier_info *info,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-01-16 05:49:36 +07:00
|
|
|
{
|
|
|
|
info->dev = dev;
|
2019-01-17 06:06:56 +07:00
|
|
|
info->extack = extack;
|
2017-06-08 13:44:13 +07:00
|
|
|
return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
|
2015-01-16 05:49:36 +07:00
|
|
|
}
|
2015-05-10 23:47:46 +07:00
|
|
|
EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
|
2015-01-30 13:40:13 +07:00
|
|
|
|
2018-11-23 06:28:25 +07:00
|
|
|
int register_switchdev_blocking_notifier(struct notifier_block *nb)
|
|
|
|
{
|
|
|
|
struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
|
|
|
|
|
|
|
|
return blocking_notifier_chain_register(chain, nb);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
|
|
|
|
|
|
|
|
int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
|
|
|
|
{
|
|
|
|
struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
|
|
|
|
|
|
|
|
return blocking_notifier_chain_unregister(chain, nb);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
|
|
|
|
|
|
|
|
int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
|
2018-12-13 00:02:54 +07:00
|
|
|
struct switchdev_notifier_info *info,
|
|
|
|
struct netlink_ext_ack *extack)
|
2018-11-23 06:28:25 +07:00
|
|
|
{
|
|
|
|
info->dev = dev;
|
2018-12-13 00:02:54 +07:00
|
|
|
info->extack = extack;
|
2018-11-23 06:28:25 +07:00
|
|
|
return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
|
|
|
|
val, info);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
|
|
|
|
|
2018-11-23 06:29:44 +07:00
|
|
|
static int __switchdev_handle_port_obj_add(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*add_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj,
|
2018-12-13 00:02:56 +07:00
|
|
|
struct switchdev_trans *trans,
|
|
|
|
struct netlink_ext_ack *extack))
|
2018-11-23 06:29:44 +07:00
|
|
|
{
|
2018-12-13 00:02:56 +07:00
|
|
|
struct netlink_ext_ack *extack;
|
2018-11-23 06:29:44 +07:00
|
|
|
struct net_device *lower_dev;
|
|
|
|
struct list_head *iter;
|
|
|
|
int err = -EOPNOTSUPP;
|
|
|
|
|
2018-12-13 00:02:56 +07:00
|
|
|
extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
|
|
|
|
|
2018-11-23 06:29:44 +07:00
|
|
|
if (check_cb(dev)) {
|
|
|
|
/* This flag is only checked if the return value is success. */
|
|
|
|
port_obj_info->handled = true;
|
2018-12-13 00:02:56 +07:00
|
|
|
return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
|
|
|
|
extack);
|
2018-11-23 06:29:44 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Switch ports might be stacked under e.g. a LAG. Ignore the
|
|
|
|
* unsupported devices, another driver might be able to handle them. But
|
|
|
|
* propagate to the callers any hard errors.
|
|
|
|
*
|
|
|
|
* If the driver does its own bookkeeping of stacked ports, it's not
|
|
|
|
* necessary to go through this helper.
|
|
|
|
*/
|
|
|
|
netdev_for_each_lower_dev(dev, lower_dev, iter) {
|
2020-02-27 00:14:21 +07:00
|
|
|
if (netif_is_bridge_master(lower_dev))
|
|
|
|
continue;
|
|
|
|
|
2018-11-23 06:29:44 +07:00
|
|
|
err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
|
|
|
|
check_cb, add_cb);
|
|
|
|
if (err && err != -EOPNOTSUPP)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int switchdev_handle_port_obj_add(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*add_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj,
|
2018-12-13 00:02:56 +07:00
|
|
|
struct switchdev_trans *trans,
|
|
|
|
struct netlink_ext_ack *extack))
|
2018-11-23 06:29:44 +07:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
|
|
|
|
add_cb);
|
|
|
|
if (err == -EOPNOTSUPP)
|
|
|
|
err = 0;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
|
|
|
|
|
|
|
|
static int __switchdev_handle_port_obj_del(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*del_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj))
|
|
|
|
{
|
|
|
|
struct net_device *lower_dev;
|
|
|
|
struct list_head *iter;
|
|
|
|
int err = -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (check_cb(dev)) {
|
|
|
|
/* This flag is only checked if the return value is success. */
|
|
|
|
port_obj_info->handled = true;
|
|
|
|
return del_cb(dev, port_obj_info->obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Switch ports might be stacked under e.g. a LAG. Ignore the
|
|
|
|
* unsupported devices, another driver might be able to handle them. But
|
|
|
|
* propagate to the callers any hard errors.
|
|
|
|
*
|
|
|
|
* If the driver does its own bookkeeping of stacked ports, it's not
|
|
|
|
* necessary to go through this helper.
|
|
|
|
*/
|
|
|
|
netdev_for_each_lower_dev(dev, lower_dev, iter) {
|
2020-02-27 00:14:21 +07:00
|
|
|
if (netif_is_bridge_master(lower_dev))
|
|
|
|
continue;
|
|
|
|
|
2018-11-23 06:29:44 +07:00
|
|
|
err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
|
|
|
|
check_cb, del_cb);
|
|
|
|
if (err && err != -EOPNOTSUPP)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int switchdev_handle_port_obj_del(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*del_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj))
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
|
|
|
|
del_cb);
|
|
|
|
if (err == -EOPNOTSUPP)
|
|
|
|
err = 0;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
|
2019-02-28 02:44:25 +07:00
|
|
|
|
|
|
|
static int __switchdev_handle_port_attr_set(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_attr_info *port_attr_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*set_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct switchdev_trans *trans))
|
|
|
|
{
|
|
|
|
struct net_device *lower_dev;
|
|
|
|
struct list_head *iter;
|
|
|
|
int err = -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (check_cb(dev)) {
|
|
|
|
port_attr_info->handled = true;
|
|
|
|
return set_cb(dev, port_attr_info->attr,
|
|
|
|
port_attr_info->trans);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Switch ports might be stacked under e.g. a LAG. Ignore the
|
|
|
|
* unsupported devices, another driver might be able to handle them. But
|
|
|
|
* propagate to the callers any hard errors.
|
|
|
|
*
|
|
|
|
* If the driver does its own bookkeeping of stacked ports, it's not
|
|
|
|
* necessary to go through this helper.
|
|
|
|
*/
|
|
|
|
netdev_for_each_lower_dev(dev, lower_dev, iter) {
|
2020-02-27 00:14:21 +07:00
|
|
|
if (netif_is_bridge_master(lower_dev))
|
|
|
|
continue;
|
|
|
|
|
2019-02-28 02:44:25 +07:00
|
|
|
err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
|
|
|
|
check_cb, set_cb);
|
|
|
|
if (err && err != -EOPNOTSUPP)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int switchdev_handle_port_attr_set(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_attr_info *port_attr_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*set_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct switchdev_trans *trans))
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
|
|
|
|
set_cb);
|
|
|
|
if (err == -EOPNOTSUPP)
|
|
|
|
err = 0;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
|