aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/usb/usbnet.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-16 22:17:37 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-16 22:17:37 -0400
commit028940342a906db8da014a7603a0deddc2c323dd (patch)
tree688dbc38a3e218f2493d311b1d70a67668837347 /drivers/net/usb/usbnet.c
parentbe3eed2e96340d3c7a4d1ea1d63e7bd6095d1e34 (diff)
parent0e93b4b304ae052ba1bc73f6d34a68556fe93429 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers/net/usb/usbnet.c')
-rw-r--r--drivers/net/usb/usbnet.c54
1 files changed, 38 insertions, 16 deletions
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 80b837c88f0d..9f58330f1312 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -282,17 +282,32 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
282} 282}
283EXPORT_SYMBOL_GPL(usbnet_change_mtu); 283EXPORT_SYMBOL_GPL(usbnet_change_mtu);
284 284
285/* The caller must hold list->lock */
286static void __usbnet_queue_skb(struct sk_buff_head *list,
287 struct sk_buff *newsk, enum skb_state state)
288{
289 struct skb_data *entry = (struct skb_data *) newsk->cb;
290
291 __skb_queue_tail(list, newsk);
292 entry->state = state;
293}
294
285/*-------------------------------------------------------------------------*/ 295/*-------------------------------------------------------------------------*/
286 296
287/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 297/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
288 * completion callbacks. 2.5 should have fixed those bugs... 298 * completion callbacks. 2.5 should have fixed those bugs...
289 */ 299 */
290 300
291static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) 301static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
302 struct sk_buff_head *list, enum skb_state state)
292{ 303{
293 unsigned long flags; 304 unsigned long flags;
305 enum skb_state old_state;
306 struct skb_data *entry = (struct skb_data *) skb->cb;
294 307
295 spin_lock_irqsave(&list->lock, flags); 308 spin_lock_irqsave(&list->lock, flags);
309 old_state = entry->state;
310 entry->state = state;
296 __skb_unlink(skb, list); 311 __skb_unlink(skb, list);
297 spin_unlock(&list->lock); 312 spin_unlock(&list->lock);
298 spin_lock(&dev->done.lock); 313 spin_lock(&dev->done.lock);
@@ -300,6 +315,7 @@ static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_hea
300 if (dev->done.qlen == 1) 315 if (dev->done.qlen == 1)
301 tasklet_schedule(&dev->bh); 316 tasklet_schedule(&dev->bh);
302 spin_unlock_irqrestore(&dev->done.lock, flags); 317 spin_unlock_irqrestore(&dev->done.lock, flags);
318 return old_state;
303} 319}
304 320
305/* some work can't be done in tasklets, so we use keventd 321/* some work can't be done in tasklets, so we use keventd
@@ -340,7 +356,6 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
340 entry = (struct skb_data *) skb->cb; 356 entry = (struct skb_data *) skb->cb;
341 entry->urb = urb; 357 entry->urb = urb;
342 entry->dev = dev; 358 entry->dev = dev;
343 entry->state = rx_start;
344 entry->length = 0; 359 entry->length = 0;
345 360
346 usb_fill_bulk_urb (urb, dev->udev, dev->in, 361 usb_fill_bulk_urb (urb, dev->udev, dev->in,
@@ -372,7 +387,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
372 tasklet_schedule (&dev->bh); 387 tasklet_schedule (&dev->bh);
373 break; 388 break;
374 case 0: 389 case 0:
375 __skb_queue_tail (&dev->rxq, skb); 390 __usbnet_queue_skb(&dev->rxq, skb, rx_start);
376 } 391 }
377 } else { 392 } else {
378 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 393 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -423,16 +438,17 @@ static void rx_complete (struct urb *urb)
423 struct skb_data *entry = (struct skb_data *) skb->cb; 438 struct skb_data *entry = (struct skb_data *) skb->cb;
424 struct usbnet *dev = entry->dev; 439 struct usbnet *dev = entry->dev;
425 int urb_status = urb->status; 440 int urb_status = urb->status;
441 enum skb_state state;
426 442
427 skb_put (skb, urb->actual_length); 443 skb_put (skb, urb->actual_length);
428 entry->state = rx_done; 444 state = rx_done;
429 entry->urb = NULL; 445 entry->urb = NULL;
430 446
431 switch (urb_status) { 447 switch (urb_status) {
432 /* success */ 448 /* success */
433 case 0: 449 case 0:
434 if (skb->len < dev->net->hard_header_len) { 450 if (skb->len < dev->net->hard_header_len) {
435 entry->state = rx_cleanup; 451 state = rx_cleanup;
436 dev->net->stats.rx_errors++; 452 dev->net->stats.rx_errors++;
437 dev->net->stats.rx_length_errors++; 453 dev->net->stats.rx_length_errors++;
438 netif_dbg(dev, rx_err, dev->net, 454 netif_dbg(dev, rx_err, dev->net,
@@ -471,7 +487,7 @@ static void rx_complete (struct urb *urb)
471 "rx throttle %d\n", urb_status); 487 "rx throttle %d\n", urb_status);
472 } 488 }
473block: 489block:
474 entry->state = rx_cleanup; 490 state = rx_cleanup;
475 entry->urb = urb; 491 entry->urb = urb;
476 urb = NULL; 492 urb = NULL;
477 break; 493 break;
@@ -482,17 +498,18 @@ block:
482 // FALLTHROUGH 498 // FALLTHROUGH
483 499
484 default: 500 default:
485 entry->state = rx_cleanup; 501 state = rx_cleanup;
486 dev->net->stats.rx_errors++; 502 dev->net->stats.rx_errors++;
487 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 503 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
488 break; 504 break;
489 } 505 }
490 506
491 defer_bh(dev, skb, &dev->rxq); 507 state = defer_bh(dev, skb, &dev->rxq, state);
492 508
493 if (urb) { 509 if (urb) {
494 if (netif_running (dev->net) && 510 if (netif_running (dev->net) &&
495 !test_bit (EVENT_RX_HALT, &dev->flags)) { 511 !test_bit (EVENT_RX_HALT, &dev->flags) &&
512 state != unlink_start) {
496 rx_submit (dev, urb, GFP_ATOMIC); 513 rx_submit (dev, urb, GFP_ATOMIC);
497 usb_mark_last_busy(dev->udev); 514 usb_mark_last_busy(dev->udev);
498 return; 515 return;
@@ -579,16 +596,23 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
579static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 596static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
580{ 597{
581 unsigned long flags; 598 unsigned long flags;
582 struct sk_buff *skb, *skbnext; 599 struct sk_buff *skb;
583 int count = 0; 600 int count = 0;
584 601
585 spin_lock_irqsave (&q->lock, flags); 602 spin_lock_irqsave (&q->lock, flags);
586 skb_queue_walk_safe(q, skb, skbnext) { 603 while (!skb_queue_empty(q)) {
587 struct skb_data *entry; 604 struct skb_data *entry;
588 struct urb *urb; 605 struct urb *urb;
589 int retval; 606 int retval;
590 607
591 entry = (struct skb_data *) skb->cb; 608 skb_queue_walk(q, skb) {
609 entry = (struct skb_data *) skb->cb;
610 if (entry->state != unlink_start)
611 goto found;
612 }
613 break;
614found:
615 entry->state = unlink_start;
592 urb = entry->urb; 616 urb = entry->urb;
593 617
594 /* 618 /*
@@ -1040,8 +1064,7 @@ static void tx_complete (struct urb *urb)
1040 } 1064 }
1041 1065
1042 usb_autopm_put_interface_async(dev->intf); 1066 usb_autopm_put_interface_async(dev->intf);
1043 entry->state = tx_done; 1067 (void) defer_bh(dev, skb, &dev->txq, tx_done);
1044 defer_bh(dev, skb, &dev->txq);
1045} 1068}
1046 1069
1047/*-------------------------------------------------------------------------*/ 1070/*-------------------------------------------------------------------------*/
@@ -1097,7 +1120,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1097 entry = (struct skb_data *) skb->cb; 1120 entry = (struct skb_data *) skb->cb;
1098 entry->urb = urb; 1121 entry->urb = urb;
1099 entry->dev = dev; 1122 entry->dev = dev;
1100 entry->state = tx_start;
1101 entry->length = length; 1123 entry->length = length;
1102 1124
1103 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1125 usb_fill_bulk_urb (urb, dev->udev, dev->out,
@@ -1156,7 +1178,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1156 break; 1178 break;
1157 case 0: 1179 case 0:
1158 net->trans_start = jiffies; 1180 net->trans_start = jiffies;
1159 __skb_queue_tail (&dev->txq, skb); 1181 __usbnet_queue_skb(&dev->txq, skb, tx_start);
1160 if (dev->txq.qlen >= TX_QLEN (dev)) 1182 if (dev->txq.qlen >= TX_QLEN (dev))
1161 netif_stop_queue (net); 1183 netif_stop_queue (net);
1162 } 1184 }