diff options
author | David Brownell <dbrownell@users.sourceforge.net> | 2008-07-07 15:16:08 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2008-08-13 20:32:53 -0400 |
commit | 937ef73d5075997a8d1777abf217a48bef2ce029 (patch) | |
tree | a52d840362b3421116c29aa9a2636a8c30d70154 | |
parent | e8b24450a635bbbd3a2b4c2649eef060c742ebc0 (diff) |
USB: serial gadget: rx path data loss fixes
Update RX path handling in new serial gadget code to cope better with
RX blockage: queue every RX packet until its contents can safely be
passed up to the ldisc. Most of the RX path work is now done in the
RX tasklet, instead of just the final "push to ldisc" step. This
addresses some cases of data loss:
- A longstanding serial gadget bug: when tty_insert_flip_string()
didn't copy the entire buffer, the rest of the characters were
dropped! Now that packet stays queued until the rest of its data
is pushed to the ldisc.
- Another longstanding issue: in the unlikely case that an RX
transfer returns data and also reports a fault, that data is
no longer discarded.
- In the recently added RX throttling logic: it needs to stop
pushing data into the TTY layer, instead of just not submitting
new USB read requests. When the TTY is throttled long enough,
backpressure will eventually make the OUT endpoint NAK.
Also: an #ifdef is removed (no longer necessary); and start switching
to a better convention for debug messages (prefix them with tty name).
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | drivers/usb/gadget/u_serial.c | 236 |
1 files changed, 146 insertions, 90 deletions
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index abf9505d3a75..6641efa55639 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
@@ -52,6 +52,8 @@ | |||
52 | * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. | 52 | * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. |
53 | */ | 53 | */ |
54 | 54 | ||
55 | #define PREFIX "ttyGS" | ||
56 | |||
55 | /* | 57 | /* |
56 | * gserial is the lifecycle interface, used by USB functions | 58 | * gserial is the lifecycle interface, used by USB functions |
57 | * gs_port is the I/O nexus, used by the tty driver | 59 | * gs_port is the I/O nexus, used by the tty driver |
@@ -100,6 +102,8 @@ struct gs_port { | |||
100 | wait_queue_head_t close_wait; /* wait for last close */ | 102 | wait_queue_head_t close_wait; /* wait for last close */ |
101 | 103 | ||
102 | struct list_head read_pool; | 104 | struct list_head read_pool; |
105 | struct list_head read_queue; | ||
106 | unsigned n_read; | ||
103 | struct tasklet_struct push; | 107 | struct tasklet_struct push; |
104 | 108 | ||
105 | struct list_head write_pool; | 109 | struct list_head write_pool; |
@@ -367,11 +371,9 @@ __acquires(&port->port_lock) | |||
367 | req->length = len; | 371 | req->length = len; |
368 | list_del(&req->list); | 372 | list_del(&req->list); |
369 | 373 | ||
370 | #ifdef VERBOSE_DEBUG | 374 | pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", |
371 | pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n", | 375 | port->port_num, len, *((u8 *)req->buf), |
372 | __func__, in->name, len, *((u8 *)req->buf), | ||
373 | *((u8 *)req->buf+1), *((u8 *)req->buf+2)); | 376 | *((u8 *)req->buf+1), *((u8 *)req->buf+2)); |
374 | #endif | ||
375 | 377 | ||
376 | /* Drop lock while we call out of driver; completions | 378 | /* Drop lock while we call out of driver; completions |
377 | * could be issued while we do so. Disconnection may | 379 | * could be issued while we do so. Disconnection may |
@@ -401,56 +403,6 @@ __acquires(&port->port_lock) | |||
401 | return status; | 403 | return status; |
402 | } | 404 | } |
403 | 405 | ||
404 | static void gs_rx_push(unsigned long _port) | ||
405 | { | ||
406 | struct gs_port *port = (void *)_port; | ||
407 | struct tty_struct *tty = port->port_tty; | ||
408 | |||
409 | /* With low_latency, tty_flip_buffer_push() doesn't put its | ||
410 | * real work through a workqueue, so the ldisc has a better | ||
411 | * chance to keep up with peak USB data rates. | ||
412 | */ | ||
413 | if (tty) { | ||
414 | tty_flip_buffer_push(tty); | ||
415 | wake_up_interruptible(&tty->read_wait); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * gs_recv_packet | ||
421 | * | ||
422 | * Called for each USB packet received. Reads the packet | ||
423 | * header and stuffs the data in the appropriate tty buffer. | ||
424 | * Returns 0 if successful, or a negative error number. | ||
425 | * | ||
426 | * Called during USB completion routine, on interrupt time. | ||
427 | * With port_lock. | ||
428 | */ | ||
429 | static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size) | ||
430 | { | ||
431 | unsigned len; | ||
432 | struct tty_struct *tty; | ||
433 | |||
434 | /* I/O completions can continue for a while after close(), until the | ||
435 | * request queue empties. Just discard any data we receive, until | ||
436 | * something reopens this TTY ... as if there were no HW flow control. | ||
437 | */ | ||
438 | tty = port->port_tty; | ||
439 | if (tty == NULL) { | ||
440 | pr_vdebug("%s: ttyGS%d, after close\n", | ||
441 | __func__, port->port_num); | ||
442 | return -EIO; | ||
443 | } | ||
444 | |||
445 | len = tty_insert_flip_string(tty, packet, size); | ||
446 | if (len > 0) | ||
447 | tasklet_schedule(&port->push); | ||
448 | if (len < size) | ||
449 | pr_debug("%s: ttyGS%d, drop %d bytes\n", | ||
450 | __func__, port->port_num, size - len); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* | 406 | /* |
455 | * Context: caller owns port_lock, and port_usb is set | 407 | * Context: caller owns port_lock, and port_usb is set |
456 | */ | 408 | */ |
@@ -469,9 +421,9 @@ __acquires(&port->port_lock) | |||
469 | int status; | 421 | int status; |
470 | struct tty_struct *tty; | 422 | struct tty_struct *tty; |
471 | 423 | ||
472 | /* no more rx if closed or throttled */ | 424 | /* no more rx if closed */ |
473 | tty = port->port_tty; | 425 | tty = port->port_tty; |
474 | if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) | 426 | if (!tty) |
475 | break; | 427 | break; |
476 | 428 | ||
477 | req = list_entry(pool->next, struct usb_request, list); | 429 | req = list_entry(pool->next, struct usb_request, list); |
@@ -500,36 +452,134 @@ __acquires(&port->port_lock) | |||
500 | return started; | 452 | return started; |
501 | } | 453 | } |
502 | 454 | ||
503 | static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) | 455 | /* |
456 | * RX tasklet takes data out of the RX queue and hands it up to the TTY | ||
457 | * layer until it refuses to take any more data (or is throttled back). | ||
458 | * Then it issues reads for any further data. | ||
459 | * | ||
460 | * If the RX queue becomes full enough that no usb_request is queued, | ||
461 | * the OUT endpoint may begin NAKing as soon as its FIFO fills up. | ||
462 | * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) | ||
463 | * can be buffered before the TTY layer's buffers (currently 64 KB). | ||
464 | */ | ||
465 | static void gs_rx_push(unsigned long _port) | ||
504 | { | 466 | { |
505 | int status; | 467 | struct gs_port *port = (void *)_port; |
506 | struct gs_port *port = ep->driver_data; | 468 | struct tty_struct *tty; |
469 | struct list_head *queue = &port->read_queue; | ||
470 | bool disconnect = false; | ||
471 | bool do_push = false; | ||
507 | 472 | ||
508 | spin_lock(&port->port_lock); | 473 | /* hand any queued data to the tty */ |
509 | list_add(&req->list, &port->read_pool); | 474 | spin_lock_irq(&port->port_lock); |
475 | tty = port->port_tty; | ||
476 | while (!list_empty(queue)) { | ||
477 | struct usb_request *req; | ||
510 | 478 | ||
511 | switch (req->status) { | 479 | req = list_first_entry(queue, struct usb_request, list); |
512 | case 0: | ||
513 | /* normal completion */ | ||
514 | status = gs_recv_packet(port, req->buf, req->actual); | ||
515 | if (status && status != -EIO) | ||
516 | pr_debug("%s: %s %s err %d\n", | ||
517 | __func__, "recv", ep->name, status); | ||
518 | gs_start_rx(port); | ||
519 | break; | ||
520 | 480 | ||
521 | case -ESHUTDOWN: | 481 | /* discard data if tty was closed */ |
522 | /* disconnect */ | 482 | if (!tty) |
523 | pr_vdebug("%s: %s shutdown\n", __func__, ep->name); | 483 | goto recycle; |
524 | break; | ||
525 | 484 | ||
526 | default: | 485 | /* leave data queued if tty was rx throttled */ |
527 | /* presumably a transient fault */ | 486 | if (test_bit(TTY_THROTTLED, &tty->flags)) |
528 | pr_warning("%s: unexpected %s status %d\n", | 487 | break; |
529 | __func__, ep->name, req->status); | 488 | |
530 | gs_start_rx(port); | 489 | switch (req->status) { |
531 | break; | 490 | case -ESHUTDOWN: |
491 | disconnect = true; | ||
492 | pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); | ||
493 | break; | ||
494 | |||
495 | default: | ||
496 | /* presumably a transient fault */ | ||
497 | pr_warning(PREFIX "%d: unexpected RX status %d\n", | ||
498 | port->port_num, req->status); | ||
499 | /* FALLTHROUGH */ | ||
500 | case 0: | ||
501 | /* normal completion */ | ||
502 | break; | ||
503 | } | ||
504 | |||
505 | /* push data to (open) tty */ | ||
506 | if (req->actual) { | ||
507 | char *packet = req->buf; | ||
508 | unsigned size = req->actual; | ||
509 | unsigned n; | ||
510 | int count; | ||
511 | |||
512 | /* we may have pushed part of this packet already... */ | ||
513 | n = port->n_read; | ||
514 | if (n) { | ||
515 | packet += n; | ||
516 | size -= n; | ||
517 | } | ||
518 | |||
519 | count = tty_insert_flip_string(tty, packet, size); | ||
520 | if (count) | ||
521 | do_push = true; | ||
522 | if (count != size) { | ||
523 | /* stop pushing; TTY layer can't handle more */ | ||
524 | port->n_read += count; | ||
525 | pr_vdebug(PREFIX "%d: rx block %d/%d\n", | ||
526 | port->port_num, | ||
527 | count, req->actual); | ||
528 | break; | ||
529 | } | ||
530 | port->n_read = 0; | ||
531 | } | ||
532 | recycle: | ||
533 | list_move(&req->list, &port->read_pool); | ||
534 | } | ||
535 | |||
536 | /* Push from tty to ldisc; this is immediate with low_latency, and | ||
537 | * may trigger callbacks to this driver ... so drop the spinlock. | ||
538 | */ | ||
539 | if (tty && do_push) { | ||
540 | spin_unlock_irq(&port->port_lock); | ||
541 | tty_flip_buffer_push(tty); | ||
542 | wake_up_interruptible(&tty->read_wait); | ||
543 | spin_lock_irq(&port->port_lock); | ||
544 | |||
545 | /* tty may have been closed */ | ||
546 | tty = port->port_tty; | ||
532 | } | 547 | } |
548 | |||
549 | |||
550 | /* We want our data queue to become empty ASAP, keeping data | ||
551 | * in the tty and ldisc (not here). If we couldn't push any | ||
552 | * this time around, there may be trouble unless there's an | ||
553 | * implicit tty_unthrottle() call on its way... | ||
554 | * | ||
555 | * REVISIT we should probably add a timer to keep the tasklet | ||
556 | * from starving ... but it's not clear that case ever happens. | ||
557 | */ | ||
558 | if (!list_empty(queue) && tty) { | ||
559 | if (!test_bit(TTY_THROTTLED, &tty->flags)) { | ||
560 | if (do_push) | ||
561 | tasklet_schedule(&port->push); | ||
562 | else | ||
563 | pr_warning(PREFIX "%d: RX not scheduled?\n", | ||
564 | port->port_num); | ||
565 | } | ||
566 | } | ||
567 | |||
568 | /* If we're still connected, refill the USB RX queue. */ | ||
569 | if (!disconnect && port->port_usb) | ||
570 | gs_start_rx(port); | ||
571 | |||
572 | spin_unlock_irq(&port->port_lock); | ||
573 | } | ||
574 | |||
575 | static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) | ||
576 | { | ||
577 | struct gs_port *port = ep->driver_data; | ||
578 | |||
579 | /* Queue all received data until the tty layer is ready for it. */ | ||
580 | spin_lock(&port->port_lock); | ||
581 | list_add_tail(&req->list, &port->read_queue); | ||
582 | tasklet_schedule(&port->push); | ||
533 | spin_unlock(&port->port_lock); | 583 | spin_unlock(&port->port_lock); |
534 | } | 584 | } |
535 | 585 | ||
@@ -625,6 +675,7 @@ static int gs_start_io(struct gs_port *port) | |||
625 | } | 675 | } |
626 | 676 | ||
627 | /* queue read requests */ | 677 | /* queue read requests */ |
678 | port->n_read = 0; | ||
628 | started = gs_start_rx(port); | 679 | started = gs_start_rx(port); |
629 | 680 | ||
630 | /* unblock any pending writes into our circular buffer */ | 681 | /* unblock any pending writes into our circular buffer */ |
@@ -633,9 +684,10 @@ static int gs_start_io(struct gs_port *port) | |||
633 | } else { | 684 | } else { |
634 | gs_free_requests(ep, head); | 685 | gs_free_requests(ep, head); |
635 | gs_free_requests(port->port_usb->in, &port->write_pool); | 686 | gs_free_requests(port->port_usb->in, &port->write_pool); |
687 | status = -EIO; | ||
636 | } | 688 | } |
637 | 689 | ||
638 | return started ? 0 : status; | 690 | return status; |
639 | } | 691 | } |
640 | 692 | ||
641 | /*-------------------------------------------------------------------------*/ | 693 | /*-------------------------------------------------------------------------*/ |
@@ -809,8 +861,6 @@ static void gs_close(struct tty_struct *tty, struct file *file) | |||
809 | else | 861 | else |
810 | gs_buf_clear(&port->port_write_buf); | 862 | gs_buf_clear(&port->port_write_buf); |
811 | 863 | ||
812 | tasklet_kill(&port->push); | ||
813 | |||
814 | tty->driver_data = NULL; | 864 | tty->driver_data = NULL; |
815 | port->port_tty = NULL; | 865 | port->port_tty = NULL; |
816 | 866 | ||
@@ -911,15 +961,17 @@ static void gs_unthrottle(struct tty_struct *tty) | |||
911 | { | 961 | { |
912 | struct gs_port *port = tty->driver_data; | 962 | struct gs_port *port = tty->driver_data; |
913 | unsigned long flags; | 963 | unsigned long flags; |
914 | unsigned started = 0; | ||
915 | 964 | ||
916 | spin_lock_irqsave(&port->port_lock, flags); | 965 | spin_lock_irqsave(&port->port_lock, flags); |
917 | if (port->port_usb) | 966 | if (port->port_usb) { |
918 | started = gs_start_rx(port); | 967 | /* Kickstart read queue processing. We don't do xon/xoff, |
968 | * rts/cts, or other handshaking with the host, but if the | ||
969 | * read queue backs up enough we'll be NAKing OUT packets. | ||
970 | */ | ||
971 | tasklet_schedule(&port->push); | ||
972 | pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num); | ||
973 | } | ||
919 | spin_unlock_irqrestore(&port->port_lock, flags); | 974 | spin_unlock_irqrestore(&port->port_lock, flags); |
920 | |||
921 | pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n", | ||
922 | port->port_num, started); | ||
923 | } | 975 | } |
924 | 976 | ||
925 | static const struct tty_operations gs_tty_ops = { | 977 | static const struct tty_operations gs_tty_ops = { |
@@ -953,6 +1005,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) | |||
953 | tasklet_init(&port->push, gs_rx_push, (unsigned long) port); | 1005 | tasklet_init(&port->push, gs_rx_push, (unsigned long) port); |
954 | 1006 | ||
955 | INIT_LIST_HEAD(&port->read_pool); | 1007 | INIT_LIST_HEAD(&port->read_pool); |
1008 | INIT_LIST_HEAD(&port->read_queue); | ||
956 | INIT_LIST_HEAD(&port->write_pool); | 1009 | INIT_LIST_HEAD(&port->write_pool); |
957 | 1010 | ||
958 | port->port_num = port_num; | 1011 | port->port_num = port_num; |
@@ -997,7 +1050,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count) | |||
997 | 1050 | ||
998 | gs_tty_driver->owner = THIS_MODULE; | 1051 | gs_tty_driver->owner = THIS_MODULE; |
999 | gs_tty_driver->driver_name = "g_serial"; | 1052 | gs_tty_driver->driver_name = "g_serial"; |
1000 | gs_tty_driver->name = "ttyGS"; | 1053 | gs_tty_driver->name = PREFIX; |
1001 | /* uses dynamically assigned dev_t values */ | 1054 | /* uses dynamically assigned dev_t values */ |
1002 | 1055 | ||
1003 | gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; | 1056 | gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; |
@@ -1104,6 +1157,8 @@ void gserial_cleanup(void) | |||
1104 | ports[i].port = NULL; | 1157 | ports[i].port = NULL; |
1105 | mutex_unlock(&ports[i].lock); | 1158 | mutex_unlock(&ports[i].lock); |
1106 | 1159 | ||
1160 | tasklet_kill(&port->push); | ||
1161 | |||
1107 | /* wait for old opens to finish */ | 1162 | /* wait for old opens to finish */ |
1108 | wait_event(port->close_wait, gs_closed(port)); | 1163 | wait_event(port->close_wait, gs_closed(port)); |
1109 | 1164 | ||
@@ -1241,6 +1296,7 @@ void gserial_disconnect(struct gserial *gser) | |||
1241 | if (port->open_count == 0 && !port->openclose) | 1296 | if (port->open_count == 0 && !port->openclose) |
1242 | gs_buf_free(&port->port_write_buf); | 1297 | gs_buf_free(&port->port_write_buf); |
1243 | gs_free_requests(gser->out, &port->read_pool); | 1298 | gs_free_requests(gser->out, &port->read_pool); |
1299 | gs_free_requests(gser->out, &port->read_queue); | ||
1244 | gs_free_requests(gser->in, &port->write_pool); | 1300 | gs_free_requests(gser->in, &port->write_pool); |
1245 | spin_unlock_irqrestore(&port->port_lock, flags); | 1301 | spin_unlock_irqrestore(&port->port_lock, flags); |
1246 | } | 1302 | } |