aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuis R. Rodriguez <lrodriguez@atheros.com>2008-12-03 06:35:30 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-12-05 09:35:59 -0500
commitc112d0c5b89037dd618083b5fdf4bb36b0c51d77 (patch)
tree195a14b4482ef3506ffa51caa4ab7f6a5b58e003
parentf8316df10c4e3bec5b4c3a5a8e026c577640c3a6 (diff)
ath9k: Use GFP_ATOMIC when allocating TX private area
Using GFP_KERNEL was wrong and produces a 'scheduling while atomic' bug as we're in a tasklet. Also, check for proper return values now, in case allocation fails and be sure to stop the TX queue in case of memory issues but gaurantee the TX queue will eventually be woken up. Signed-off-by: Senthil Balasubramanian <senthilkumar@atheros.com> Signed-off-by: Sujith <Sujith.Manoharan@atheros.com> Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/ath9k/xmit.c22
1 files changed, 20 insertions, 2 deletions
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 17fd05e2f247..9de27c681b86 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -1652,7 +1652,9 @@ static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1652 int hdrlen; 1652 int hdrlen;
1653 __le16 fc; 1653 __le16 fc;
1654 1654
1655 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_KERNEL); 1655 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1656 if (unlikely(!tx_info_priv))
1657 return -ENOMEM;
1656 tx_info->rate_driver_data[0] = tx_info_priv; 1658 tx_info->rate_driver_data[0] = tx_info_priv;
1657 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1659 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1658 fc = hdr->frame_control; 1660 fc = hdr->frame_control;
@@ -1801,10 +1803,26 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1801 1803
1802 r = ath_tx_setup_buffer(sc, bf, skb, txctl); 1804 r = ath_tx_setup_buffer(sc, bf, skb, txctl);
1803 if (unlikely(r)) { 1805 if (unlikely(r)) {
1804 spin_lock_bh(&sc->sc_txbuflock); 1806 struct ath_txq *txq = txctl->txq;
1807
1805 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1808 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
1809
1810 /* upon ath_tx_processq() this TX queue will be resumed, we
1811 * guarantee this will happen by knowing beforehand that
1812 * we will at least have to run TX completionon one buffer
1813 * on the queue */
1814 spin_lock_bh(&txq->axq_lock);
1815 if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
1816 ieee80211_stop_queue(sc->hw,
1817 skb_get_queue_mapping(skb));
1818 txq->stopped = 1;
1819 }
1820 spin_unlock_bh(&txq->axq_lock);
1821
1822 spin_lock_bh(&sc->sc_txbuflock);
1806 list_add_tail(&bf->list, &sc->sc_txbuf); 1823 list_add_tail(&bf->list, &sc->sc_txbuf);
1807 spin_unlock_bh(&sc->sc_txbuflock); 1824 spin_unlock_bh(&sc->sc_txbuflock);
1825
1808 return r; 1826 return r;
1809 } 1827 }
1810 1828