ixgbe: Fix possible memory leak in ixgbe_set_ringparam

We were not correctly freeing the temporary rings on error in
ixgbe_set_ring_param.  In order to correct this I am unwinding a number of
changes that were made in order to get things back to the original working
form with modification for the current ring layouts.

This approach has multiple advantages including a smaller memory footprint,
and the fact that the interface is stopped while we are allocating the rings
meaning that there is less potential for some sort of memory corruption on the
ring.

The only disadvantage I see with this approach is that on a Rx allocation
failure we will report an error and only update the Tx rings.  However the
adapter should be fully functional in this state and the likelihood of such
an error is very low.  In addition it is not unreasonable to expect the
user to need to recheck the ring configuration should they experience an
error setting the ring sizes.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Alexander Duyck 2012-09-12 07:09:51 +00:00 committed by Jeff Kirsher
parent de52a12c29
commit 1f4702aa25

View File

@ -887,24 +887,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring) struct ethtool_ringparam *ring)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; struct ixgbe_ring *temp_ring;
int i, err = 0; int i, err = 0;
u32 new_rx_count, new_tx_count; u32 new_rx_count, new_tx_count;
bool need_update = false;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL; return -EINVAL;
new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); new_tx_count = clamp_t(u32, ring->tx_pending,
new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); IXGBE_MIN_TXD, IXGBE_MAX_TXD);
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring[0]->count) && new_rx_count = clamp_t(u32, ring->rx_pending,
(new_rx_count == adapter->rx_ring[0]->count)) { IXGBE_MIN_RXD, IXGBE_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring_count) &&
(new_rx_count == adapter->rx_ring_count)) {
/* nothing to do */ /* nothing to do */
return 0; return 0;
} }
@ -922,81 +921,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
goto clear_reset; goto clear_reset;
} }
temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); /* allocate temporary buffer to store rings in */
if (!temp_tx_ring) { i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
if (!temp_ring) {
err = -ENOMEM; err = -ENOMEM;
goto clear_reset; goto clear_reset;
} }
ixgbe_down(adapter);
/*
* Setup new Tx resources and free the old Tx resources in that order.
* We can then assign the new resources to the rings via a memcpy.
* The advantage to this approach is that we are guaranteed to still
* have resources even in the case of an allocation failure.
*/
if (new_tx_count != adapter->tx_ring_count) { if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
memcpy(&temp_tx_ring[i], adapter->tx_ring[i], memcpy(&temp_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring)); sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); temp_ring[i].count = new_tx_count;
err = ixgbe_setup_tx_resources(&temp_ring[i]);
if (err) { if (err) {
while (i) { while (i) {
i--; i--;
ixgbe_free_tx_resources(&temp_tx_ring[i]); ixgbe_free_tx_resources(&temp_ring[i]);
}
goto clear_reset;
}
}
need_update = true;
}
temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
if (!temp_rx_ring) {
err = -ENOMEM;
goto err_setup;
}
if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
if (err) {
while (i) {
i--;
ixgbe_free_rx_resources(&temp_rx_ring[i]);
} }
goto err_setup; goto err_setup;
} }
} }
need_update = true;
}
/* if rings need to be updated, here's the place to do it in one shot */ for (i = 0; i < adapter->num_tx_queues; i++) {
if (need_update) { ixgbe_free_tx_resources(adapter->tx_ring[i]);
ixgbe_down(adapter);
/* tx */ memcpy(adapter->tx_ring[i], &temp_ring[i],
if (new_tx_count != adapter->tx_ring_count) { sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_tx_queues; i++) {
ixgbe_free_tx_resources(adapter->tx_ring[i]);
memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
sizeof(struct ixgbe_ring));
}
adapter->tx_ring_count = new_tx_count;
} }
/* rx */ adapter->tx_ring_count = new_tx_count;
if (new_rx_count != adapter->rx_ring_count) { }
for (i = 0; i < adapter->num_rx_queues; i++) {
ixgbe_free_rx_resources(adapter->rx_ring[i]); /* Repeat the process for the Rx rings if needed */
memcpy(adapter->rx_ring[i], &temp_rx_ring[i], if (new_rx_count != adapter->rx_ring_count) {
sizeof(struct ixgbe_ring)); for (i = 0; i < adapter->num_rx_queues; i++) {
} memcpy(&temp_ring[i], adapter->rx_ring[i],
adapter->rx_ring_count = new_rx_count; sizeof(struct ixgbe_ring));
}
ixgbe_up(adapter); temp_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(&temp_ring[i]);
if (err) {
while (i) {
i--;
ixgbe_free_rx_resources(&temp_ring[i]);
}
goto err_setup;
}
}
for (i = 0; i < adapter->num_rx_queues; i++) {
ixgbe_free_rx_resources(adapter->rx_ring[i]);
memcpy(adapter->rx_ring[i], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
adapter->rx_ring_count = new_rx_count;
} }
vfree(temp_rx_ring);
err_setup: err_setup:
vfree(temp_tx_ring); ixgbe_up(adapter);
vfree(temp_ring);
clear_reset: clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state); clear_bit(__IXGBE_RESETTING, &adapter->state);
return err; return err;