mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-01 10:36:41 +07:00
iwlwifi: fix use after free bug for paged rx
In the paged rx patch (4854fde2), I introduced a bug that could possibly touch an already freed page. It is fixed by avoiding the access in this patch. I've also added some comments so that other people touching the code won't make the same mistake. In the future, if we cannot avoid access the page after being handled to the upper layer, we can use get_page/put_page to handle it. For now, it's just not necessary. It also fixed a debug message print bug reported by Stanislaw Gruszka <sgruszka@redhat.com>. Signed-off-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
parent
52aa081c40
commit
29b1b2688f
@ -548,6 +548,7 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
u16 len = le16_to_cpu(rx_hdr->len);
|
u16 len = le16_to_cpu(rx_hdr->len);
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int ret;
|
int ret;
|
||||||
|
__le16 fc = hdr->frame_control;
|
||||||
|
|
||||||
/* We received data from the HW, so stop the watchdog */
|
/* We received data from the HW, so stop the watchdog */
|
||||||
if (unlikely(len + IWL39_RX_FRAME_SIZE >
|
if (unlikely(len + IWL39_RX_FRAME_SIZE >
|
||||||
@ -580,9 +581,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
/* mac80211 currently doesn't support paged SKB. Convert it to
|
/* mac80211 currently doesn't support paged SKB. Convert it to
|
||||||
* linear SKB for management frame and data frame requires
|
* linear SKB for management frame and data frame requires
|
||||||
* software decryption or software defragementation. */
|
* software decryption or software defragementation. */
|
||||||
if (ieee80211_is_mgmt(hdr->frame_control) ||
|
if (ieee80211_is_mgmt(fc) ||
|
||||||
ieee80211_has_protected(hdr->frame_control) ||
|
ieee80211_has_protected(fc) ||
|
||||||
ieee80211_has_morefrags(hdr->frame_control) ||
|
ieee80211_has_morefrags(fc) ||
|
||||||
le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
|
le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
|
||||||
ret = skb_linearize(skb);
|
ret = skb_linearize(skb);
|
||||||
else
|
else
|
||||||
@ -594,11 +595,15 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
iwl_update_stats(priv, false, hdr->frame_control, len);
|
/*
|
||||||
|
* XXX: We cannot touch the page and its virtual memory (pkt) after
|
||||||
|
* here. It might have already been freed by the above skb change.
|
||||||
|
*/
|
||||||
|
|
||||||
|
iwl_update_stats(priv, false, fc, len);
|
||||||
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
|
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
|
||||||
ieee80211_rx(priv->hw, skb);
|
|
||||||
|
|
||||||
|
ieee80211_rx(priv->hw, skb);
|
||||||
out:
|
out:
|
||||||
priv->alloc_rxb_page--;
|
priv->alloc_rxb_page--;
|
||||||
rxb->page = NULL;
|
rxb->page = NULL;
|
||||||
|
@ -814,8 +814,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
|
|||||||
if (priv->rx_handlers[pkt->hdr.cmd]) {
|
if (priv->rx_handlers[pkt->hdr.cmd]) {
|
||||||
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
|
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
|
||||||
i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
||||||
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
|
|
||||||
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
|
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
|
||||||
|
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
|
||||||
} else {
|
} else {
|
||||||
/* No handling needed */
|
/* No handling needed */
|
||||||
IWL_DEBUG_RX(priv,
|
IWL_DEBUG_RX(priv,
|
||||||
@ -824,11 +824,18 @@ void iwl_rx_handle(struct iwl_priv *priv)
|
|||||||
pkt->hdr.cmd);
|
pkt->hdr.cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX: After here, we should always check rxb->page
|
||||||
|
* against NULL before touching it or its virtual
|
||||||
|
* memory (pkt). Because some rx_handler might have
|
||||||
|
* already taken or freed the pages.
|
||||||
|
*/
|
||||||
|
|
||||||
if (reclaim) {
|
if (reclaim) {
|
||||||
/* Invoke any callbacks, transfer the buffer to caller,
|
/* Invoke any callbacks, transfer the buffer to caller,
|
||||||
* and fire off the (possibly) blocking iwl_send_cmd()
|
* and fire off the (possibly) blocking iwl_send_cmd()
|
||||||
* as we reclaim the driver command queue */
|
* as we reclaim the driver command queue */
|
||||||
if (rxb && rxb->page)
|
if (rxb->page)
|
||||||
iwl_tx_cmd_complete(priv, rxb);
|
iwl_tx_cmd_complete(priv, rxb);
|
||||||
else
|
else
|
||||||
IWL_WARN(priv, "Claim null rxb?\n");
|
IWL_WARN(priv, "Claim null rxb?\n");
|
||||||
|
@ -241,6 +241,7 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||||||
struct iwl_rx_mem_buffer *rxb;
|
struct iwl_rx_mem_buffer *rxb;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
gfp_t gfp_mask = priority;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
spin_lock_irqsave(&rxq->lock, flags);
|
spin_lock_irqsave(&rxq->lock, flags);
|
||||||
@ -251,13 +252,13 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
|
|
||||||
if (rxq->free_count > RX_LOW_WATERMARK)
|
if (rxq->free_count > RX_LOW_WATERMARK)
|
||||||
priority |= __GFP_NOWARN;
|
gfp_mask |= __GFP_NOWARN;
|
||||||
|
|
||||||
if (priv->hw_params.rx_page_order > 0)
|
if (priv->hw_params.rx_page_order > 0)
|
||||||
priority |= __GFP_COMP;
|
gfp_mask |= __GFP_COMP;
|
||||||
|
|
||||||
/* Alloc a new receive buffer */
|
/* Alloc a new receive buffer */
|
||||||
page = alloc_pages(priority, priv->hw_params.rx_page_order);
|
page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
IWL_DEBUG_INFO(priv, "alloc_pages failed, "
|
IWL_DEBUG_INFO(priv, "alloc_pages failed, "
|
||||||
@ -922,6 +923,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
__le16 fc = hdr->frame_control;
|
||||||
|
|
||||||
/* We only process data packets if the interface is open */
|
/* We only process data packets if the interface is open */
|
||||||
if (unlikely(!priv->is_open)) {
|
if (unlikely(!priv->is_open)) {
|
||||||
@ -946,9 +948,9 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
/* mac80211 currently doesn't support paged SKB. Convert it to
|
/* mac80211 currently doesn't support paged SKB. Convert it to
|
||||||
* linear SKB for management frame and data frame requires
|
* linear SKB for management frame and data frame requires
|
||||||
* software decryption or software defragementation. */
|
* software decryption or software defragementation. */
|
||||||
if (ieee80211_is_mgmt(hdr->frame_control) ||
|
if (ieee80211_is_mgmt(fc) ||
|
||||||
ieee80211_has_protected(hdr->frame_control) ||
|
ieee80211_has_protected(fc) ||
|
||||||
ieee80211_has_morefrags(hdr->frame_control) ||
|
ieee80211_has_morefrags(fc) ||
|
||||||
le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
|
le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
|
||||||
ret = skb_linearize(skb);
|
ret = skb_linearize(skb);
|
||||||
else
|
else
|
||||||
@ -960,7 +962,12 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
iwl_update_stats(priv, false, hdr->frame_control, len);
|
/*
|
||||||
|
* XXX: We cannot touch the page and its virtual memory (hdr) after
|
||||||
|
* here. It might have already been freed by the above skb change.
|
||||||
|
*/
|
||||||
|
|
||||||
|
iwl_update_stats(priv, false, fc, len);
|
||||||
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
|
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
|
||||||
|
|
||||||
ieee80211_rx(priv->hw, skb);
|
ieee80211_rx(priv->hw, skb);
|
||||||
|
@ -1124,6 +1124,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||||||
struct iwl_rx_mem_buffer *rxb;
|
struct iwl_rx_mem_buffer *rxb;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
gfp_t gfp_mask = priority;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
spin_lock_irqsave(&rxq->lock, flags);
|
spin_lock_irqsave(&rxq->lock, flags);
|
||||||
@ -1135,13 +1136,13 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
|
|
||||||
if (rxq->free_count > RX_LOW_WATERMARK)
|
if (rxq->free_count > RX_LOW_WATERMARK)
|
||||||
priority |= __GFP_NOWARN;
|
gfp_mask |= __GFP_NOWARN;
|
||||||
|
|
||||||
if (priv->hw_params.rx_page_order > 0)
|
if (priv->hw_params.rx_page_order > 0)
|
||||||
priority |= __GFP_COMP;
|
gfp_mask |= __GFP_COMP;
|
||||||
|
|
||||||
/* Alloc a new receive buffer */
|
/* Alloc a new receive buffer */
|
||||||
page = alloc_pages(priority, priv->hw_params.rx_page_order);
|
page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
|
IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
|
||||||
@ -1410,8 +1411,8 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
|
|||||||
if (priv->rx_handlers[pkt->hdr.cmd]) {
|
if (priv->rx_handlers[pkt->hdr.cmd]) {
|
||||||
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
|
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
|
||||||
get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
||||||
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
|
|
||||||
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
|
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
|
||||||
|
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
|
||||||
} else {
|
} else {
|
||||||
/* No handling needed */
|
/* No handling needed */
|
||||||
IWL_DEBUG_RX(priv,
|
IWL_DEBUG_RX(priv,
|
||||||
@ -1420,11 +1421,18 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
|
|||||||
pkt->hdr.cmd);
|
pkt->hdr.cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX: After here, we should always check rxb->page
|
||||||
|
* against NULL before touching it or its virtual
|
||||||
|
* memory (pkt). Because some rx_handler might have
|
||||||
|
* already taken or freed the pages.
|
||||||
|
*/
|
||||||
|
|
||||||
if (reclaim) {
|
if (reclaim) {
|
||||||
/* Invoke any callbacks, transfer the buffer to caller,
|
/* Invoke any callbacks, transfer the buffer to caller,
|
||||||
* and fire off the (possibly) blocking iwl_send_cmd()
|
* and fire off the (possibly) blocking iwl_send_cmd()
|
||||||
* as we reclaim the driver command queue */
|
* as we reclaim the driver command queue */
|
||||||
if (rxb && rxb->page)
|
if (rxb->page)
|
||||||
iwl_tx_cmd_complete(priv, rxb);
|
iwl_tx_cmd_complete(priv, rxb);
|
||||||
else
|
else
|
||||||
IWL_WARN(priv, "Claim null rxb?\n");
|
IWL_WARN(priv, "Claim null rxb?\n");
|
||||||
|
Loading…
Reference in New Issue
Block a user