aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wimax
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wimax')
-rw-r--r--drivers/net/wimax/i2400m/netdev.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index cedd4d30d996..48896138418f 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -156,7 +156,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); 156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
157 struct net_device *net_dev = i2400m->wimax_dev.net_dev; 157 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
158 struct device *dev = i2400m_dev(i2400m); 158 struct device *dev = i2400m_dev(i2400m);
159 struct sk_buff *skb = i2400m->wake_tx_skb; 159 struct sk_buff *skb;
160 unsigned long flags; 160 unsigned long flags;
161 161
162 spin_lock_irqsave(&i2400m->tx_lock, flags); 162 spin_lock_irqsave(&i2400m->tx_lock, flags);
@@ -236,23 +236,26 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
236void i2400m_net_wake_stop(struct i2400m *i2400m) 236void i2400m_net_wake_stop(struct i2400m *i2400m)
237{ 237{
238 struct device *dev = i2400m_dev(i2400m); 238 struct device *dev = i2400m_dev(i2400m);
239 struct sk_buff *wake_tx_skb;
240 unsigned long flags;
239 241
240 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 242 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
241 /* See i2400m_hard_start_xmit(), references are taken there 243 /*
242 * and here we release them if the work was still 244 * See i2400m_hard_start_xmit(), references are taken there and
243 * pending. Note we can't differentiate work not pending vs 245 * here we release them if the packet was still pending.
244 * never scheduled, so the NULL check does that. */ 246 */
245 if (cancel_work_sync(&i2400m->wake_tx_ws) == 0 247 cancel_work_sync(&i2400m->wake_tx_ws);
246 && i2400m->wake_tx_skb != NULL) { 248
247 unsigned long flags; 249 spin_lock_irqsave(&i2400m->tx_lock, flags);
248 struct sk_buff *wake_tx_skb; 250 wake_tx_skb = i2400m->wake_tx_skb;
249 spin_lock_irqsave(&i2400m->tx_lock, flags); 251 i2400m->wake_tx_skb = NULL;
250 wake_tx_skb = i2400m->wake_tx_skb; /* compat help */ 252 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
251 i2400m->wake_tx_skb = NULL; /* compat help */ 253
252 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 254 if (wake_tx_skb) {
253 i2400m_put(i2400m); 255 i2400m_put(i2400m);
254 kfree_skb(wake_tx_skb); 256 kfree_skb(wake_tx_skb);
255 } 257 }
258
256 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 259 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
257} 260}
258 261
@@ -288,7 +291,7 @@ int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
288 * and if pending, release those resources. */ 291 * and if pending, release those resources. */
289 result = 0; 292 result = 0;
290 spin_lock_irqsave(&i2400m->tx_lock, flags); 293 spin_lock_irqsave(&i2400m->tx_lock, flags);
291 if (!work_pending(&i2400m->wake_tx_ws)) { 294 if (!i2400m->wake_tx_skb) {
292 netif_stop_queue(net_dev); 295 netif_stop_queue(net_dev);
293 i2400m_get(i2400m); 296 i2400m_get(i2400m);
294 i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */ 297 i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */