Merge branch 'mlxsw-fdb-learning-offload'

Jiri Pirko says:

====================
mlxsw: Offload FDB learning configuration

Ido says:
This patchset addresses two long standing issues in the mlxsw driver
concerning FDB learning.

Patch 1 limits the number of FDB records processed by the driver in a
single session. This is useful in situations in which many new records
need to be processed, thereby causing the RTNL mutex to be held for
long periods of time.

Patches 2-6 offload the learning configuration (on / off) of bridge
ports to the device instead of having the driver decide whether a
record needs to be learned or not.

The last patch is fallout and removes configuration no longer necessary
after the first patches are applied.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-08-24 09:41:13 -07:00
commit d14c800ba6
4 changed files with 115 additions and 47 deletions

View File

@ -591,6 +591,12 @@ static const struct mlxsw_reg_info mlxsw_reg_sfn = {
*/
MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
/* reg_sfn_end
* Forces the current session to end.
* Access: OP
*/
MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1);
/* reg_sfn_num_rec
* Request: Number of learned notifications and aged-out notification
* records requested.
@ -605,6 +611,7 @@ static inline void mlxsw_reg_sfn_pack(char *payload)
{
MLXSW_REG_ZERO(sfn, payload);
mlxsw_reg_sfn_swid_set(payload, 0);
mlxsw_reg_sfn_end_set(payload, 1);
mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
}

View File

@ -555,8 +555,9 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, bool learn_enable)
int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool learn_enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvmlr_pl;
@ -565,13 +566,20 @@ static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
if (!spvmlr_pl)
return -ENOMEM;
mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
learn_enable);
mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
vid_end, learn_enable);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
kfree(spvmlr_pl);
return err;
}
static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, bool learn_enable)
{
return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
learn_enable);
}
static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
@ -973,10 +981,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
goto err_port_vp_mode_trans;
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
if (err)
goto err_port_vid_learning_set;
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
if (err)
goto err_port_add_vid;
@ -984,8 +988,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
return 0;
err_port_add_vid:
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
err_port_vid_learning_set:
if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans:
@ -1012,8 +1014,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
/* Drop FID reference. If this was the last reference the
* resources will be freed.
*/

View File

@ -558,6 +558,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool learn_enable);
#ifdef CONFIG_MLXSW_SPECTRUM_DCB

View File

@ -261,12 +261,40 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
false);
}
static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool set)
{
u16 vid;
int err;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
set);
}
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
set);
if (err)
goto err_port_vid_learning_set;
}
return 0;
err_port_vid_learning_set:
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
__mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
return err;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
unsigned long brport_flags)
{
unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
bool set;
int err;
if (!mlxsw_sp_port->bridged)
@ -276,17 +304,30 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
if ((uc_flood ^ brport_flags) & BR_FLOOD) {
set = mlxsw_sp_port->uc_flood ? false : true;
err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
!mlxsw_sp_port->uc_flood);
if (err)
return err;
}
if ((learning ^ brport_flags) & BR_LEARNING) {
err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
!mlxsw_sp_port->learning);
if (err)
goto err_port_learning_set;
}
mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
return 0;
err_port_learning_set:
if ((uc_flood ^ brport_flags) & BR_FLOOD)
mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
mlxsw_sp_port->uc_flood);
return err;
}
static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
@ -635,6 +676,27 @@ static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool learn_enable)
{
u16 vid, vid_e;
int err;
for (vid = vid_begin; vid <= vid_end;
vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
vid_end);
err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
vid_e, learn_enable);
if (err)
return err;
}
return 0;
}
static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool flag_untagged, bool flag_pvid)
@ -675,6 +737,14 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
mlxsw_sp_port->learning);
if (err) {
netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
vid_begin, vid_end);
goto err_port_vid_learning_set;
}
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++) {
set_bit(vid, mlxsw_sp_port->active_vlans);
@ -697,6 +767,9 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
err_port_stp_state_set:
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
false);
err_port_vid_learning_set:
if (old_pvid != mlxsw_sp_port->pvid)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
@ -1001,29 +1074,20 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
int err;
if (!mlxsw_sp_port->bridged)
return -EINVAL;
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
false, false);
if (err) {
netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
vid_end);
return err;
}
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
false);
pvid = mlxsw_sp_port->pvid;
if (pvid >= vid_begin && pvid <= vid_end) {
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
if (err) {
netdev_err(dev, "Unable to del PVID %d\n", pvid);
return err;
}
}
if (pvid >= vid_begin && pvid <= vid_end)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
__mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
false);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
@ -1366,8 +1430,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
vid = fid;
}
adding = adding && mlxsw_sp_port->learning;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
@ -1429,8 +1491,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
vid = fid;
}
adding = adding && mlxsw_sp_port->learning;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
@ -1496,20 +1556,18 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
rtnl_lock();
do {
mlxsw_reg_sfn_pack(sfn_pl);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
break;
}
num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
for (i = 0; i < num_rec; i++)
mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
mlxsw_reg_sfn_pack(sfn_pl);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
goto out;
}
num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
for (i = 0; i < num_rec; i++)
mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
} while (num_rec);
out:
rtnl_unlock();
kfree(sfn_pl);
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
}