diff options
author | Felipe Balbi <balbi@ti.com> | 2014-04-21 11:15:12 -0400 |
---|---|---|
committer | Felipe Balbi <balbi@ti.com> | 2014-04-21 11:15:12 -0400 |
commit | 9189a330936fe053d48e14c10a8d90aaef4408c9 (patch) | |
tree | 820000776fa15e97c7aee757982e9e093d7abd35 /drivers/usb/gadget/u_ether.c | |
parent | 252455c40316009cc791f00338ee2e367d2d2739 (diff) |
Revert "usb: gadget: u_ether: move hardware transmit to RX NAPI"
This reverts commit 716fb91dfe1777bd6d5e598f3d3572214b3ed296.
That commit caused a regression which would end up in a kernel
BUG() as below:
[ 101.554300] g_ether gadget: full-speed config #1: CDC Subset/SAFE
[ 101.585186] ------------[ cut here ]------------
[ 101.600587] kernel BUG at include/linux/netdevice.h:495!
[ 101.615850] Internal error: Oops - BUG: 0 [#1] PREEMPT ARM
[ 101.645539] Modules linked in:
[ 101.660483] CPU: 0 PID: 0 Comm: swapper Not tainted 3.15.0-rc1+ #104
[ 101.690175] task: c05dc5c8 ti: c05d2000 task.ti: c05d2000
[ 101.705579] PC is at eth_start+0x64/0x8c
[ 101.720981] LR is at __netif_schedule+0x7c/0x90
[ 101.736455] pc : [<c0299174>] lr : [<c036a134>] psr: 60000093
[ 101.736455] sp : c05d3d18 ip : c05d3cf8 fp : c05d3d2c
[ 101.782340] r10: 00000000 r9 : c196c1f0 r8 : c196c1a0
[ 101.797823] r7 : 00000000 r6 : 00000002 r5 : c1976400 r4 : c1976400
[ 101.828058] r3 : 00000000 r2 : c05d3ce8 r1 : 00000001 r0 : 00000002
[ 101.858722] Flags: nZCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment kernel
Reported-by: Robert Jarzmik <robert.jarzmik@free.fr>
Signed-of-by: Felipe Balbi <balbi@ti.com>
Diffstat (limited to 'drivers/usb/gadget/u_ether.c')
-rw-r--r-- | drivers/usb/gadget/u_ether.c | 101 |
1 files changed, 35 insertions, 66 deletions
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 50d09c289137..b7d4f82872b7 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c | |||
@@ -48,8 +48,6 @@ | |||
48 | 48 | ||
49 | #define UETH__VERSION "29-May-2008" | 49 | #define UETH__VERSION "29-May-2008" |
50 | 50 | ||
51 | #define GETHER_NAPI_WEIGHT 32 | ||
52 | |||
53 | struct eth_dev { | 51 | struct eth_dev { |
54 | /* lock is held while accessing port_usb | 52 | /* lock is held while accessing port_usb |
55 | */ | 53 | */ |
@@ -74,7 +72,6 @@ struct eth_dev { | |||
74 | struct sk_buff_head *list); | 72 | struct sk_buff_head *list); |
75 | 73 | ||
76 | struct work_struct work; | 74 | struct work_struct work; |
77 | struct napi_struct rx_napi; | ||
78 | 75 | ||
79 | unsigned long todo; | 76 | unsigned long todo; |
80 | #define WORK_RX_MEMORY 0 | 77 | #define WORK_RX_MEMORY 0 |
@@ -256,16 +253,18 @@ enomem: | |||
256 | DBG(dev, "rx submit --> %d\n", retval); | 253 | DBG(dev, "rx submit --> %d\n", retval); |
257 | if (skb) | 254 | if (skb) |
258 | dev_kfree_skb_any(skb); | 255 | dev_kfree_skb_any(skb); |
256 | spin_lock_irqsave(&dev->req_lock, flags); | ||
257 | list_add(&req->list, &dev->rx_reqs); | ||
258 | spin_unlock_irqrestore(&dev->req_lock, flags); | ||
259 | } | 259 | } |
260 | return retval; | 260 | return retval; |
261 | } | 261 | } |
262 | 262 | ||
263 | static void rx_complete(struct usb_ep *ep, struct usb_request *req) | 263 | static void rx_complete(struct usb_ep *ep, struct usb_request *req) |
264 | { | 264 | { |
265 | struct sk_buff *skb = req->context; | 265 | struct sk_buff *skb = req->context, *skb2; |
266 | struct eth_dev *dev = ep->driver_data; | 266 | struct eth_dev *dev = ep->driver_data; |
267 | int status = req->status; | 267 | int status = req->status; |
268 | bool rx_queue = 0; | ||
269 | 268 | ||
270 | switch (status) { | 269 | switch (status) { |
271 | 270 | ||
@@ -289,8 +288,30 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req) | |||
289 | } else { | 288 | } else { |
290 | skb_queue_tail(&dev->rx_frames, skb); | 289 | skb_queue_tail(&dev->rx_frames, skb); |
291 | } | 290 | } |
292 | if (!status) | 291 | skb = NULL; |
293 | rx_queue = 1; | 292 | |
293 | skb2 = skb_dequeue(&dev->rx_frames); | ||
294 | while (skb2) { | ||
295 | if (status < 0 | ||
296 | || ETH_HLEN > skb2->len | ||
297 | || skb2->len > VLAN_ETH_FRAME_LEN) { | ||
298 | dev->net->stats.rx_errors++; | ||
299 | dev->net->stats.rx_length_errors++; | ||
300 | DBG(dev, "rx length %d\n", skb2->len); | ||
301 | dev_kfree_skb_any(skb2); | ||
302 | goto next_frame; | ||
303 | } | ||
304 | skb2->protocol = eth_type_trans(skb2, dev->net); | ||
305 | dev->net->stats.rx_packets++; | ||
306 | dev->net->stats.rx_bytes += skb2->len; | ||
307 | |||
308 | /* no buffer copies needed, unless hardware can't | ||
309 | * use skb buffers. | ||
310 | */ | ||
311 | status = netif_rx(skb2); | ||
312 | next_frame: | ||
313 | skb2 = skb_dequeue(&dev->rx_frames); | ||
314 | } | ||
294 | break; | 315 | break; |
295 | 316 | ||
296 | /* software-driven interface shutdown */ | 317 | /* software-driven interface shutdown */ |
@@ -313,20 +334,22 @@ quiesce: | |||
313 | /* FALLTHROUGH */ | 334 | /* FALLTHROUGH */ |
314 | 335 | ||
315 | default: | 336 | default: |
316 | rx_queue = 1; | ||
317 | dev_kfree_skb_any(skb); | ||
318 | dev->net->stats.rx_errors++; | 337 | dev->net->stats.rx_errors++; |
319 | DBG(dev, "rx status %d\n", status); | 338 | DBG(dev, "rx status %d\n", status); |
320 | break; | 339 | break; |
321 | } | 340 | } |
322 | 341 | ||
342 | if (skb) | ||
343 | dev_kfree_skb_any(skb); | ||
344 | if (!netif_running(dev->net)) { | ||
323 | clean: | 345 | clean: |
324 | spin_lock(&dev->req_lock); | 346 | spin_lock(&dev->req_lock); |
325 | list_add(&req->list, &dev->rx_reqs); | 347 | list_add(&req->list, &dev->rx_reqs); |
326 | spin_unlock(&dev->req_lock); | 348 | spin_unlock(&dev->req_lock); |
327 | 349 | req = NULL; | |
328 | if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi))) | 350 | } |
329 | __napi_schedule(&dev->rx_napi); | 351 | if (req) |
352 | rx_submit(dev, req, GFP_ATOMIC); | ||
330 | } | 353 | } |
331 | 354 | ||
332 | static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) | 355 | static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) |
@@ -391,24 +414,16 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) | |||
391 | { | 414 | { |
392 | struct usb_request *req; | 415 | struct usb_request *req; |
393 | unsigned long flags; | 416 | unsigned long flags; |
394 | int rx_counts = 0; | ||
395 | 417 | ||
396 | /* fill unused rxq slots with some skb */ | 418 | /* fill unused rxq slots with some skb */ |
397 | spin_lock_irqsave(&dev->req_lock, flags); | 419 | spin_lock_irqsave(&dev->req_lock, flags); |
398 | while (!list_empty(&dev->rx_reqs)) { | 420 | while (!list_empty(&dev->rx_reqs)) { |
399 | |||
400 | if (++rx_counts > qlen(dev->gadget, dev->qmult)) | ||
401 | break; | ||
402 | |||
403 | req = container_of(dev->rx_reqs.next, | 421 | req = container_of(dev->rx_reqs.next, |
404 | struct usb_request, list); | 422 | struct usb_request, list); |
405 | list_del_init(&req->list); | 423 | list_del_init(&req->list); |
406 | spin_unlock_irqrestore(&dev->req_lock, flags); | 424 | spin_unlock_irqrestore(&dev->req_lock, flags); |
407 | 425 | ||
408 | if (rx_submit(dev, req, gfp_flags) < 0) { | 426 | if (rx_submit(dev, req, gfp_flags) < 0) { |
409 | spin_lock_irqsave(&dev->req_lock, flags); | ||
410 | list_add(&req->list, &dev->rx_reqs); | ||
411 | spin_unlock_irqrestore(&dev->req_lock, flags); | ||
412 | defer_kevent(dev, WORK_RX_MEMORY); | 427 | defer_kevent(dev, WORK_RX_MEMORY); |
413 | return; | 428 | return; |
414 | } | 429 | } |
@@ -418,41 +433,6 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) | |||
418 | spin_unlock_irqrestore(&dev->req_lock, flags); | 433 | spin_unlock_irqrestore(&dev->req_lock, flags); |
419 | } | 434 | } |
420 | 435 | ||
421 | static int gether_poll(struct napi_struct *napi, int budget) | ||
422 | { | ||
423 | struct eth_dev *dev = container_of(napi, struct eth_dev, rx_napi); | ||
424 | struct sk_buff *skb; | ||
425 | unsigned int work_done = 0; | ||
426 | int status = 0; | ||
427 | |||
428 | while ((skb = skb_dequeue(&dev->rx_frames))) { | ||
429 | if (status < 0 | ||
430 | || ETH_HLEN > skb->len | ||
431 | || skb->len > VLAN_ETH_FRAME_LEN) { | ||
432 | dev->net->stats.rx_errors++; | ||
433 | dev->net->stats.rx_length_errors++; | ||
434 | DBG(dev, "rx length %d\n", skb->len); | ||
435 | dev_kfree_skb_any(skb); | ||
436 | continue; | ||
437 | } | ||
438 | skb->protocol = eth_type_trans(skb, dev->net); | ||
439 | dev->net->stats.rx_packets++; | ||
440 | dev->net->stats.rx_bytes += skb->len; | ||
441 | |||
442 | status = netif_rx_ni(skb); | ||
443 | } | ||
444 | |||
445 | if (netif_running(dev->net)) { | ||
446 | rx_fill(dev, GFP_KERNEL); | ||
447 | work_done++; | ||
448 | } | ||
449 | |||
450 | if (work_done < budget) | ||
451 | napi_complete(&dev->rx_napi); | ||
452 | |||
453 | return work_done; | ||
454 | } | ||
455 | |||
456 | static void eth_work(struct work_struct *work) | 436 | static void eth_work(struct work_struct *work) |
457 | { | 437 | { |
458 | struct eth_dev *dev = container_of(work, struct eth_dev, work); | 438 | struct eth_dev *dev = container_of(work, struct eth_dev, work); |
@@ -645,7 +625,6 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) | |||
645 | /* and open the tx floodgates */ | 625 | /* and open the tx floodgates */ |
646 | atomic_set(&dev->tx_qlen, 0); | 626 | atomic_set(&dev->tx_qlen, 0); |
647 | netif_wake_queue(dev->net); | 627 | netif_wake_queue(dev->net); |
648 | napi_enable(&dev->rx_napi); | ||
649 | } | 628 | } |
650 | 629 | ||
651 | static int eth_open(struct net_device *net) | 630 | static int eth_open(struct net_device *net) |
@@ -672,7 +651,6 @@ static int eth_stop(struct net_device *net) | |||
672 | unsigned long flags; | 651 | unsigned long flags; |
673 | 652 | ||
674 | VDBG(dev, "%s\n", __func__); | 653 | VDBG(dev, "%s\n", __func__); |
675 | napi_disable(&dev->rx_napi); | ||
676 | netif_stop_queue(net); | 654 | netif_stop_queue(net); |
677 | 655 | ||
678 | DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", | 656 | DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", |
@@ -790,7 +768,6 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, | |||
790 | return ERR_PTR(-ENOMEM); | 768 | return ERR_PTR(-ENOMEM); |
791 | 769 | ||
792 | dev = netdev_priv(net); | 770 | dev = netdev_priv(net); |
793 | netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT); | ||
794 | spin_lock_init(&dev->lock); | 771 | spin_lock_init(&dev->lock); |
795 | spin_lock_init(&dev->req_lock); | 772 | spin_lock_init(&dev->req_lock); |
796 | INIT_WORK(&dev->work, eth_work); | 773 | INIT_WORK(&dev->work, eth_work); |
@@ -853,7 +830,6 @@ struct net_device *gether_setup_name_default(const char *netname) | |||
853 | return ERR_PTR(-ENOMEM); | 830 | return ERR_PTR(-ENOMEM); |
854 | 831 | ||
855 | dev = netdev_priv(net); | 832 | dev = netdev_priv(net); |
856 | netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT); | ||
857 | spin_lock_init(&dev->lock); | 833 | spin_lock_init(&dev->lock); |
858 | spin_lock_init(&dev->req_lock); | 834 | spin_lock_init(&dev->req_lock); |
859 | INIT_WORK(&dev->work, eth_work); | 835 | INIT_WORK(&dev->work, eth_work); |
@@ -1137,7 +1113,6 @@ void gether_disconnect(struct gether *link) | |||
1137 | { | 1113 | { |
1138 | struct eth_dev *dev = link->ioport; | 1114 | struct eth_dev *dev = link->ioport; |
1139 | struct usb_request *req; | 1115 | struct usb_request *req; |
1140 | struct sk_buff *skb; | ||
1141 | 1116 | ||
1142 | WARN_ON(!dev); | 1117 | WARN_ON(!dev); |
1143 | if (!dev) | 1118 | if (!dev) |
@@ -1164,12 +1139,6 @@ void gether_disconnect(struct gether *link) | |||
1164 | spin_lock(&dev->req_lock); | 1139 | spin_lock(&dev->req_lock); |
1165 | } | 1140 | } |
1166 | spin_unlock(&dev->req_lock); | 1141 | spin_unlock(&dev->req_lock); |
1167 | |||
1168 | spin_lock(&dev->rx_frames.lock); | ||
1169 | while ((skb = __skb_dequeue(&dev->rx_frames))) | ||
1170 | dev_kfree_skb_any(skb); | ||
1171 | spin_unlock(&dev->rx_frames.lock); | ||
1172 | |||
1173 | link->in_ep->driver_data = NULL; | 1142 | link->in_ep->driver_data = NULL; |
1174 | link->in_ep->desc = NULL; | 1143 | link->in_ep->desc = NULL; |
1175 | 1144 | ||