diff options
author | Jim Sung <jsung@syncadence.com> | 2010-11-04 21:47:51 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-11-11 10:03:48 -0500 |
commit | 28609d4083bcd4879e951b0c4ecf4c3a88761261 (patch) | |
tree | c5b5863370ca4ee125e44ffcaa5f1afe86986fdf /drivers/usb | |
parent | 58c0d9d70109bd7e82bdb9517007311a48499960 (diff) |
usb: subtle increased memory usage in u_serial
OK, the USB gadget serial driver actually has a couple of problems. On
gs_open(), it always allocates and queues an additional QUEUE_SIZE (16)
worth of requests, so with a loop like this:
i=1 ; while echo $i > /dev/ttyGS0 ; do let i++ ; done
eventually we run into OOM (Out of Memory).
Technically, it is not a leak as everything gets freed up when the USB
connection is broken, but not on gs_close().
With a USB device/gadget controller driver that has limited resources
(e.g., Marvell has a this MAX_XDS_FOR_TR_CALLS of 64 for transmit and
receive), so even after 4
stty -F /dev/ttyGS0
we cannot transmit anymore. We can still receive (not necessarily
reliably) as now we have 16 * 4 = 64 descriptors/buffers ready, but the
device is otherwise not usable.
Signed-off-by: Jim Sung <jsung@syncadence.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r-- | drivers/usb/gadget/u_serial.c | 54 |
1 files changed, 40 insertions, 14 deletions
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index 01e5354a4c20..40f7716b31fc 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
@@ -105,11 +105,15 @@ struct gs_port { | |||
105 | wait_queue_head_t close_wait; /* wait for last close */ | 105 | wait_queue_head_t close_wait; /* wait for last close */ |
106 | 106 | ||
107 | struct list_head read_pool; | 107 | struct list_head read_pool; |
108 | int read_started; | ||
109 | int read_allocated; | ||
108 | struct list_head read_queue; | 110 | struct list_head read_queue; |
109 | unsigned n_read; | 111 | unsigned n_read; |
110 | struct tasklet_struct push; | 112 | struct tasklet_struct push; |
111 | 113 | ||
112 | struct list_head write_pool; | 114 | struct list_head write_pool; |
115 | int write_started; | ||
116 | int write_allocated; | ||
113 | struct gs_buf port_write_buf; | 117 | struct gs_buf port_write_buf; |
114 | wait_queue_head_t drain_wait; /* wait while writes drain */ | 118 | wait_queue_head_t drain_wait; /* wait while writes drain */ |
115 | 119 | ||
@@ -363,6 +367,9 @@ __acquires(&port->port_lock) | |||
363 | struct usb_request *req; | 367 | struct usb_request *req; |
364 | int len; | 368 | int len; |
365 | 369 | ||
370 | if (port->write_started >= QUEUE_SIZE) | ||
371 | break; | ||
372 | |||
366 | req = list_entry(pool->next, struct usb_request, list); | 373 | req = list_entry(pool->next, struct usb_request, list); |
367 | len = gs_send_packet(port, req->buf, in->maxpacket); | 374 | len = gs_send_packet(port, req->buf, in->maxpacket); |
368 | if (len == 0) { | 375 | if (len == 0) { |
@@ -397,6 +404,8 @@ __acquires(&port->port_lock) | |||
397 | break; | 404 | break; |
398 | } | 405 | } |
399 | 406 | ||
407 | port->write_started++; | ||
408 | |||
400 | /* abort immediately after disconnect */ | 409 | /* abort immediately after disconnect */ |
401 | if (!port->port_usb) | 410 | if (!port->port_usb) |
402 | break; | 411 | break; |
@@ -418,7 +427,6 @@ __acquires(&port->port_lock) | |||
418 | { | 427 | { |
419 | struct list_head *pool = &port->read_pool; | 428 | struct list_head *pool = &port->read_pool; |
420 | struct usb_ep *out = port->port_usb->out; | 429 | struct usb_ep *out = port->port_usb->out; |
421 | unsigned started = 0; | ||
422 | 430 | ||
423 | while (!list_empty(pool)) { | 431 | while (!list_empty(pool)) { |
424 | struct usb_request *req; | 432 | struct usb_request *req; |
@@ -430,6 +438,9 @@ __acquires(&port->port_lock) | |||
430 | if (!tty) | 438 | if (!tty) |
431 | break; | 439 | break; |
432 | 440 | ||
441 | if (port->read_started >= QUEUE_SIZE) | ||
442 | break; | ||
443 | |||
433 | req = list_entry(pool->next, struct usb_request, list); | 444 | req = list_entry(pool->next, struct usb_request, list); |
434 | list_del(&req->list); | 445 | list_del(&req->list); |
435 | req->length = out->maxpacket; | 446 | req->length = out->maxpacket; |
@@ -447,13 +458,13 @@ __acquires(&port->port_lock) | |||
447 | list_add(&req->list, pool); | 458 | list_add(&req->list, pool); |
448 | break; | 459 | break; |
449 | } | 460 | } |
450 | started++; | 461 | port->read_started++; |
451 | 462 | ||
452 | /* abort immediately after disconnect */ | 463 | /* abort immediately after disconnect */ |
453 | if (!port->port_usb) | 464 | if (!port->port_usb) |
454 | break; | 465 | break; |
455 | } | 466 | } |
456 | return started; | 467 | return port->read_started; |
457 | } | 468 | } |
458 | 469 | ||
459 | /* | 470 | /* |
@@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port) | |||
535 | } | 546 | } |
536 | recycle: | 547 | recycle: |
537 | list_move(&req->list, &port->read_pool); | 548 | list_move(&req->list, &port->read_pool); |
549 | port->read_started--; | ||
538 | } | 550 | } |
539 | 551 | ||
540 | /* Push from tty to ldisc; without low_latency set this is handled by | 552 | /* Push from tty to ldisc; without low_latency set this is handled by |
@@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) | |||
587 | 599 | ||
588 | spin_lock(&port->port_lock); | 600 | spin_lock(&port->port_lock); |
589 | list_add(&req->list, &port->write_pool); | 601 | list_add(&req->list, &port->write_pool); |
602 | port->write_started--; | ||
590 | 603 | ||
591 | switch (req->status) { | 604 | switch (req->status) { |
592 | default: | 605 | default: |
@@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) | |||
608 | spin_unlock(&port->port_lock); | 621 | spin_unlock(&port->port_lock); |
609 | } | 622 | } |
610 | 623 | ||
611 | static void gs_free_requests(struct usb_ep *ep, struct list_head *head) | 624 | static void gs_free_requests(struct usb_ep *ep, struct list_head *head, |
625 | int *allocated) | ||
612 | { | 626 | { |
613 | struct usb_request *req; | 627 | struct usb_request *req; |
614 | 628 | ||
@@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head) | |||
616 | req = list_entry(head->next, struct usb_request, list); | 630 | req = list_entry(head->next, struct usb_request, list); |
617 | list_del(&req->list); | 631 | list_del(&req->list); |
618 | gs_free_req(ep, req); | 632 | gs_free_req(ep, req); |
633 | if (allocated) | ||
634 | (*allocated)--; | ||
619 | } | 635 | } |
620 | } | 636 | } |
621 | 637 | ||
622 | static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, | 638 | static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, |
623 | void (*fn)(struct usb_ep *, struct usb_request *)) | 639 | void (*fn)(struct usb_ep *, struct usb_request *), |
640 | int *allocated) | ||
624 | { | 641 | { |
625 | int i; | 642 | int i; |
626 | struct usb_request *req; | 643 | struct usb_request *req; |
644 | int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE; | ||
627 | 645 | ||
628 | /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't | 646 | /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't |
629 | * do quite that many this time, don't fail ... we just won't | 647 | * do quite that many this time, don't fail ... we just won't |
630 | * be as speedy as we might otherwise be. | 648 | * be as speedy as we might otherwise be. |
631 | */ | 649 | */ |
632 | for (i = 0; i < QUEUE_SIZE; i++) { | 650 | for (i = 0; i < n; i++) { |
633 | req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); | 651 | req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); |
634 | if (!req) | 652 | if (!req) |
635 | return list_empty(head) ? -ENOMEM : 0; | 653 | return list_empty(head) ? -ENOMEM : 0; |
636 | req->complete = fn; | 654 | req->complete = fn; |
637 | list_add_tail(&req->list, head); | 655 | list_add_tail(&req->list, head); |
656 | if (allocated) | ||
657 | (*allocated)++; | ||
638 | } | 658 | } |
639 | return 0; | 659 | return 0; |
640 | } | 660 | } |
@@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port) | |||
661 | * configurations may use different endpoints with a given port; | 681 | * configurations may use different endpoints with a given port; |
662 | * and high speed vs full speed changes packet sizes too. | 682 | * and high speed vs full speed changes packet sizes too. |
663 | */ | 683 | */ |
664 | status = gs_alloc_requests(ep, head, gs_read_complete); | 684 | status = gs_alloc_requests(ep, head, gs_read_complete, |
685 | &port->read_allocated); | ||
665 | if (status) | 686 | if (status) |
666 | return status; | 687 | return status; |
667 | 688 | ||
668 | status = gs_alloc_requests(port->port_usb->in, &port->write_pool, | 689 | status = gs_alloc_requests(port->port_usb->in, &port->write_pool, |
669 | gs_write_complete); | 690 | gs_write_complete, &port->write_allocated); |
670 | if (status) { | 691 | if (status) { |
671 | gs_free_requests(ep, head); | 692 | gs_free_requests(ep, head, &port->read_allocated); |
672 | return status; | 693 | return status; |
673 | } | 694 | } |
674 | 695 | ||
@@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port) | |||
680 | if (started) { | 701 | if (started) { |
681 | tty_wakeup(port->port_tty); | 702 | tty_wakeup(port->port_tty); |
682 | } else { | 703 | } else { |
683 | gs_free_requests(ep, head); | 704 | gs_free_requests(ep, head, &port->read_allocated); |
684 | gs_free_requests(port->port_usb->in, &port->write_pool); | 705 | gs_free_requests(port->port_usb->in, &port->write_pool, |
706 | &port->write_allocated); | ||
685 | status = -EIO; | 707 | status = -EIO; |
686 | } | 708 | } |
687 | 709 | ||
@@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser) | |||
1315 | spin_lock_irqsave(&port->port_lock, flags); | 1337 | spin_lock_irqsave(&port->port_lock, flags); |
1316 | if (port->open_count == 0 && !port->openclose) | 1338 | if (port->open_count == 0 && !port->openclose) |
1317 | gs_buf_free(&port->port_write_buf); | 1339 | gs_buf_free(&port->port_write_buf); |
1318 | gs_free_requests(gser->out, &port->read_pool); | 1340 | gs_free_requests(gser->out, &port->read_pool, NULL); |
1319 | gs_free_requests(gser->out, &port->read_queue); | 1341 | gs_free_requests(gser->out, &port->read_queue, NULL); |
1320 | gs_free_requests(gser->in, &port->write_pool); | 1342 | gs_free_requests(gser->in, &port->write_pool, NULL); |
1343 | |||
1344 | port->read_allocated = port->read_started = | ||
1345 | port->write_allocated = port->write_started = 0; | ||
1346 | |||
1321 | spin_unlock_irqrestore(&port->port_lock, flags); | 1347 | spin_unlock_irqrestore(&port->port_lock, flags); |
1322 | } | 1348 | } |