aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <tom.leiming@gmail.com>2012-04-25 23:33:46 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-15 13:41:42 -0400
commit5b6e9bcdeb65634b4ad604eb4536404bbfc62cfa (patch)
tree9c758413d018351b2e95942314a1fa973d7fd882
parent8aa51d64c1f526e43b1e7f89fb8b98c2fd583f4b (diff)
usbnet: fix skb traversing races during unlink(v2)
Commit 4231d47e6fe69f061f96c98c30eaf9fb4c14b96d(net/usbnet: avoid recursive locking in usbnet_stop()) fixes the recursive locking problem by releasing the skb queue lock before unlink, but may cause skb traversing races: - after URB is unlinked and the queue lock is released, the refered skb and skb->next may be moved to done queue, even be released - in skb_queue_walk_safe, the next skb is still obtained by next pointer of the last skb - so maybe trigger oops or other problems This patch extends the usage of entry->state to describe 'start_unlink' state, so always holding the queue(rx/tx) lock to change the state if the referd skb is in rx or tx queue because we need to know if the refered urb has been started unlinking in unlink_urbs. The other part of this patch is based on Huajun's patch: always traverse from head of the tx/rx queue to get skb which is to be unlinked but not been started unlinking. Signed-off-by: Huajun Li <huajun.li.lee@gmail.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Cc: Oliver Neukum <oneukum@suse.de> Cc: stable@kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/usb/usbnet.c54
-rw-r--r--include/linux/usb/usbnet.h3
2 files changed, 40 insertions, 17 deletions
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2d927fb4adf4..b38db48b1ce0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -282,17 +282,32 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
282} 282}
283EXPORT_SYMBOL_GPL(usbnet_change_mtu); 283EXPORT_SYMBOL_GPL(usbnet_change_mtu);
284 284
285/* The caller must hold list->lock */
286static void __usbnet_queue_skb(struct sk_buff_head *list,
287 struct sk_buff *newsk, enum skb_state state)
288{
289 struct skb_data *entry = (struct skb_data *) newsk->cb;
290
291 __skb_queue_tail(list, newsk);
292 entry->state = state;
293}
294
285/*-------------------------------------------------------------------------*/ 295/*-------------------------------------------------------------------------*/
286 296
287/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 297/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
288 * completion callbacks. 2.5 should have fixed those bugs... 298 * completion callbacks. 2.5 should have fixed those bugs...
289 */ 299 */
290 300
291static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) 301static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
302 struct sk_buff_head *list, enum skb_state state)
292{ 303{
293 unsigned long flags; 304 unsigned long flags;
305 enum skb_state old_state;
306 struct skb_data *entry = (struct skb_data *) skb->cb;
294 307
295 spin_lock_irqsave(&list->lock, flags); 308 spin_lock_irqsave(&list->lock, flags);
309 old_state = entry->state;
310 entry->state = state;
296 __skb_unlink(skb, list); 311 __skb_unlink(skb, list);
297 spin_unlock(&list->lock); 312 spin_unlock(&list->lock);
298 spin_lock(&dev->done.lock); 313 spin_lock(&dev->done.lock);
@@ -300,6 +315,7 @@ static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_hea
300 if (dev->done.qlen == 1) 315 if (dev->done.qlen == 1)
301 tasklet_schedule(&dev->bh); 316 tasklet_schedule(&dev->bh);
302 spin_unlock_irqrestore(&dev->done.lock, flags); 317 spin_unlock_irqrestore(&dev->done.lock, flags);
318 return old_state;
303} 319}
304 320
305/* some work can't be done in tasklets, so we use keventd 321/* some work can't be done in tasklets, so we use keventd
@@ -340,7 +356,6 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
340 entry = (struct skb_data *) skb->cb; 356 entry = (struct skb_data *) skb->cb;
341 entry->urb = urb; 357 entry->urb = urb;
342 entry->dev = dev; 358 entry->dev = dev;
343 entry->state = rx_start;
344 entry->length = 0; 359 entry->length = 0;
345 360
346 usb_fill_bulk_urb (urb, dev->udev, dev->in, 361 usb_fill_bulk_urb (urb, dev->udev, dev->in,
@@ -372,7 +387,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
372 tasklet_schedule (&dev->bh); 387 tasklet_schedule (&dev->bh);
373 break; 388 break;
374 case 0: 389 case 0:
375 __skb_queue_tail (&dev->rxq, skb); 390 __usbnet_queue_skb(&dev->rxq, skb, rx_start);
376 } 391 }
377 } else { 392 } else {
378 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 393 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -423,16 +438,17 @@ static void rx_complete (struct urb *urb)
423 struct skb_data *entry = (struct skb_data *) skb->cb; 438 struct skb_data *entry = (struct skb_data *) skb->cb;
424 struct usbnet *dev = entry->dev; 439 struct usbnet *dev = entry->dev;
425 int urb_status = urb->status; 440 int urb_status = urb->status;
441 enum skb_state state;
426 442
427 skb_put (skb, urb->actual_length); 443 skb_put (skb, urb->actual_length);
428 entry->state = rx_done; 444 state = rx_done;
429 entry->urb = NULL; 445 entry->urb = NULL;
430 446
431 switch (urb_status) { 447 switch (urb_status) {
432 /* success */ 448 /* success */
433 case 0: 449 case 0:
434 if (skb->len < dev->net->hard_header_len) { 450 if (skb->len < dev->net->hard_header_len) {
435 entry->state = rx_cleanup; 451 state = rx_cleanup;
436 dev->net->stats.rx_errors++; 452 dev->net->stats.rx_errors++;
437 dev->net->stats.rx_length_errors++; 453 dev->net->stats.rx_length_errors++;
438 netif_dbg(dev, rx_err, dev->net, 454 netif_dbg(dev, rx_err, dev->net,
@@ -471,7 +487,7 @@ static void rx_complete (struct urb *urb)
471 "rx throttle %d\n", urb_status); 487 "rx throttle %d\n", urb_status);
472 } 488 }
473block: 489block:
474 entry->state = rx_cleanup; 490 state = rx_cleanup;
475 entry->urb = urb; 491 entry->urb = urb;
476 urb = NULL; 492 urb = NULL;
477 break; 493 break;
@@ -482,17 +498,18 @@ block:
482 // FALLTHROUGH 498 // FALLTHROUGH
483 499
484 default: 500 default:
485 entry->state = rx_cleanup; 501 state = rx_cleanup;
486 dev->net->stats.rx_errors++; 502 dev->net->stats.rx_errors++;
487 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 503 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
488 break; 504 break;
489 } 505 }
490 506
491 defer_bh(dev, skb, &dev->rxq); 507 state = defer_bh(dev, skb, &dev->rxq, state);
492 508
493 if (urb) { 509 if (urb) {
494 if (netif_running (dev->net) && 510 if (netif_running (dev->net) &&
495 !test_bit (EVENT_RX_HALT, &dev->flags)) { 511 !test_bit (EVENT_RX_HALT, &dev->flags) &&
512 state != unlink_start) {
496 rx_submit (dev, urb, GFP_ATOMIC); 513 rx_submit (dev, urb, GFP_ATOMIC);
497 usb_mark_last_busy(dev->udev); 514 usb_mark_last_busy(dev->udev);
498 return; 515 return;
@@ -579,16 +596,23 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
579static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 596static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
580{ 597{
581 unsigned long flags; 598 unsigned long flags;
582 struct sk_buff *skb, *skbnext; 599 struct sk_buff *skb;
583 int count = 0; 600 int count = 0;
584 601
585 spin_lock_irqsave (&q->lock, flags); 602 spin_lock_irqsave (&q->lock, flags);
586 skb_queue_walk_safe(q, skb, skbnext) { 603 while (!skb_queue_empty(q)) {
587 struct skb_data *entry; 604 struct skb_data *entry;
588 struct urb *urb; 605 struct urb *urb;
589 int retval; 606 int retval;
590 607
591 entry = (struct skb_data *) skb->cb; 608 skb_queue_walk(q, skb) {
609 entry = (struct skb_data *) skb->cb;
610 if (entry->state != unlink_start)
611 goto found;
612 }
613 break;
614found:
615 entry->state = unlink_start;
592 urb = entry->urb; 616 urb = entry->urb;
593 617
594 /* 618 /*
@@ -1039,8 +1063,7 @@ static void tx_complete (struct urb *urb)
1039 } 1063 }
1040 1064
1041 usb_autopm_put_interface_async(dev->intf); 1065 usb_autopm_put_interface_async(dev->intf);
1042 entry->state = tx_done; 1066 (void) defer_bh(dev, skb, &dev->txq, tx_done);
1043 defer_bh(dev, skb, &dev->txq);
1044} 1067}
1045 1068
1046/*-------------------------------------------------------------------------*/ 1069/*-------------------------------------------------------------------------*/
@@ -1096,7 +1119,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1096 entry = (struct skb_data *) skb->cb; 1119 entry = (struct skb_data *) skb->cb;
1097 entry->urb = urb; 1120 entry->urb = urb;
1098 entry->dev = dev; 1121 entry->dev = dev;
1099 entry->state = tx_start;
1100 entry->length = length; 1122 entry->length = length;
1101 1123
1102 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1124 usb_fill_bulk_urb (urb, dev->udev, dev->out,
@@ -1155,7 +1177,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1155 break; 1177 break;
1156 case 0: 1178 case 0:
1157 net->trans_start = jiffies; 1179 net->trans_start = jiffies;
1158 __skb_queue_tail (&dev->txq, skb); 1180 __usbnet_queue_skb(&dev->txq, skb, tx_start);
1159 if (dev->txq.qlen >= TX_QLEN (dev)) 1181 if (dev->txq.qlen >= TX_QLEN (dev))
1160 netif_stop_queue (net); 1182 netif_stop_queue (net);
1161 } 1183 }
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 605b0aa8d852..76f439647c4b 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -191,7 +191,8 @@ extern void usbnet_cdc_status(struct usbnet *, struct urb *);
191enum skb_state { 191enum skb_state {
192 illegal = 0, 192 illegal = 0,
193 tx_start, tx_done, 193 tx_start, tx_done,
194 rx_start, rx_done, rx_cleanup 194 rx_start, rx_done, rx_cleanup,
195 unlink_start
195}; 196};
196 197
197struct skb_data { /* skb->cb is one of these */ 198struct skb_data { /* skb->cb is one of these */