mlxsw: Simplify mlxsw_sx_port_xmit function

Previously we only checked if the transmission queue is not full in the
middle of the xmit function. This lead to complex logic due to the fact
that sometimes we need to reallocate the headroom for our Tx header.

Allow the switch driver to know if the transmission queue is not full
before sending the packet and remove this complex logic.

Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ido Schimmel 2015-08-06 16:41:56 +02:00 committed by David S. Miller
parent 7b7b9cff74
commit d003462a50
4 changed files with 41 additions and 21 deletions

View File

@ -865,6 +865,16 @@ static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
return container_of(driver_priv, struct mlxsw_core, driver_priv);
}
bool mlxsw_core_skb_transmit_busy(void *driver_priv,
const struct mlxsw_tx_info *tx_info)
{
struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
tx_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{

View File

@ -73,6 +73,9 @@ struct mlxsw_tx_info {
bool is_emad;
};
bool mlxsw_core_skb_transmit_busy(void *driver_priv,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
@ -177,6 +180,8 @@ struct mlxsw_bus {
int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile);
void (*fini)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,

View File

@ -1443,6 +1443,15 @@ mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
}
static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
const struct mlxsw_tx_info *tx_info)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
return !mlxsw_pci_queue_elem_info_producer_get(q);
}
static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
@ -1625,11 +1634,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
}
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
.fini = mlxsw_pci_fini,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
.kind = "pci",
.init = mlxsw_pci_init,
.fini = mlxsw_pci_fini,
.skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
};
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)

View File

@ -300,31 +300,26 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
.local_port = mlxsw_sx_port->local_port,
.is_emad = false,
};
struct sk_buff *skb_old = NULL;
int err;
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
struct sk_buff *skb_new;
if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
return NETDEV_TX_BUSY;
skb_old = skb;
skb_new = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
if (!skb_new) {
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
struct sk_buff *skb_orig = skb;
skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
if (!skb) {
this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
dev_kfree_skb_any(skb_old);
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
skb = skb_new;
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
if (err == -EAGAIN) {
if (skb_old)
dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
if (skb_old)
dev_kfree_skb_any(skb_old);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);