diff options
Diffstat (limited to 'drivers/usb/gadget/u_serial.c')
-rw-r--r-- | drivers/usb/gadget/u_serial.c | 290 |
1 files changed, 187 insertions, 103 deletions
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index abf9505d3a75..53d59287f2bc 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
@@ -52,13 +52,16 @@ | |||
52 | * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. | 52 | * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. |
53 | */ | 53 | */ |
54 | 54 | ||
55 | #define PREFIX "ttyGS" | ||
56 | |||
55 | /* | 57 | /* |
56 | * gserial is the lifecycle interface, used by USB functions | 58 | * gserial is the lifecycle interface, used by USB functions |
57 | * gs_port is the I/O nexus, used by the tty driver | 59 | * gs_port is the I/O nexus, used by the tty driver |
58 | * tty_struct links to the tty/filesystem framework | 60 | * tty_struct links to the tty/filesystem framework |
59 | * | 61 | * |
60 | * gserial <---> gs_port ... links will be null when the USB link is | 62 | * gserial <---> gs_port ... links will be null when the USB link is |
61 | * inactive; managed by gserial_{connect,disconnect}(). | 63 | * inactive; managed by gserial_{connect,disconnect}(). each gserial |
64 | * instance can wrap its own USB control protocol. | ||
62 | * gserial->ioport == usb_ep->driver_data ... gs_port | 65 | * gserial->ioport == usb_ep->driver_data ... gs_port |
63 | * gs_port->port_usb ... gserial | 66 | * gs_port->port_usb ... gserial |
64 | * | 67 | * |
@@ -100,6 +103,8 @@ struct gs_port { | |||
100 | wait_queue_head_t close_wait; /* wait for last close */ | 103 | wait_queue_head_t close_wait; /* wait for last close */ |
101 | 104 | ||
102 | struct list_head read_pool; | 105 | struct list_head read_pool; |
106 | struct list_head read_queue; | ||
107 | unsigned n_read; | ||
103 | struct tasklet_struct push; | 108 | struct tasklet_struct push; |
104 | 109 | ||
105 | struct list_head write_pool; | 110 | struct list_head write_pool; |
@@ -177,7 +182,7 @@ static void gs_buf_clear(struct gs_buf *gb) | |||
177 | /* | 182 | /* |
178 | * gs_buf_data_avail | 183 | * gs_buf_data_avail |
179 | * | 184 | * |
180 | * Return the number of bytes of data available in the circular | 185 | * Return the number of bytes of data written into the circular |
181 | * buffer. | 186 | * buffer. |
182 | */ | 187 | */ |
183 | static unsigned gs_buf_data_avail(struct gs_buf *gb) | 188 | static unsigned gs_buf_data_avail(struct gs_buf *gb) |
@@ -278,7 +283,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count) | |||
278 | * Allocate a usb_request and its buffer. Returns a pointer to the | 283 | * Allocate a usb_request and its buffer. Returns a pointer to the |
279 | * usb_request or NULL if there is an error. | 284 | * usb_request or NULL if there is an error. |
280 | */ | 285 | */ |
281 | static struct usb_request * | 286 | struct usb_request * |
282 | gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) | 287 | gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) |
283 | { | 288 | { |
284 | struct usb_request *req; | 289 | struct usb_request *req; |
@@ -302,7 +307,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) | |||
302 | * | 307 | * |
303 | * Free a usb_request and its buffer. | 308 | * Free a usb_request and its buffer. |
304 | */ | 309 | */ |
305 | static void gs_free_req(struct usb_ep *ep, struct usb_request *req) | 310 | void gs_free_req(struct usb_ep *ep, struct usb_request *req) |
306 | { | 311 | { |
307 | kfree(req->buf); | 312 | kfree(req->buf); |
308 | usb_ep_free_request(ep, req); | 313 | usb_ep_free_request(ep, req); |
@@ -367,11 +372,9 @@ __acquires(&port->port_lock) | |||
367 | req->length = len; | 372 | req->length = len; |
368 | list_del(&req->list); | 373 | list_del(&req->list); |
369 | 374 | ||
370 | #ifdef VERBOSE_DEBUG | 375 | pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", |
371 | pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n", | 376 | port->port_num, len, *((u8 *)req->buf), |
372 | __func__, in->name, len, *((u8 *)req->buf), | ||
373 | *((u8 *)req->buf+1), *((u8 *)req->buf+2)); | 377 | *((u8 *)req->buf+1), *((u8 *)req->buf+2)); |
374 | #endif | ||
375 | 378 | ||
376 | /* Drop lock while we call out of driver; completions | 379 | /* Drop lock while we call out of driver; completions |
377 | * could be issued while we do so. Disconnection may | 380 | * could be issued while we do so. Disconnection may |
@@ -401,56 +404,6 @@ __acquires(&port->port_lock) | |||
401 | return status; | 404 | return status; |
402 | } | 405 | } |
403 | 406 | ||
404 | static void gs_rx_push(unsigned long _port) | ||
405 | { | ||
406 | struct gs_port *port = (void *)_port; | ||
407 | struct tty_struct *tty = port->port_tty; | ||
408 | |||
409 | /* With low_latency, tty_flip_buffer_push() doesn't put its | ||
410 | * real work through a workqueue, so the ldisc has a better | ||
411 | * chance to keep up with peak USB data rates. | ||
412 | */ | ||
413 | if (tty) { | ||
414 | tty_flip_buffer_push(tty); | ||
415 | wake_up_interruptible(&tty->read_wait); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * gs_recv_packet | ||
421 | * | ||
422 | * Called for each USB packet received. Reads the packet | ||
423 | * header and stuffs the data in the appropriate tty buffer. | ||
424 | * Returns 0 if successful, or a negative error number. | ||
425 | * | ||
426 | * Called during USB completion routine, on interrupt time. | ||
427 | * With port_lock. | ||
428 | */ | ||
429 | static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size) | ||
430 | { | ||
431 | unsigned len; | ||
432 | struct tty_struct *tty; | ||
433 | |||
434 | /* I/O completions can continue for a while after close(), until the | ||
435 | * request queue empties. Just discard any data we receive, until | ||
436 | * something reopens this TTY ... as if there were no HW flow control. | ||
437 | */ | ||
438 | tty = port->port_tty; | ||
439 | if (tty == NULL) { | ||
440 | pr_vdebug("%s: ttyGS%d, after close\n", | ||
441 | __func__, port->port_num); | ||
442 | return -EIO; | ||
443 | } | ||
444 | |||
445 | len = tty_insert_flip_string(tty, packet, size); | ||
446 | if (len > 0) | ||
447 | tasklet_schedule(&port->push); | ||
448 | if (len < size) | ||
449 | pr_debug("%s: ttyGS%d, drop %d bytes\n", | ||
450 | __func__, port->port_num, size - len); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* | 407 | /* |
455 | * Context: caller owns port_lock, and port_usb is set | 408 | * Context: caller owns port_lock, and port_usb is set |
456 | */ | 409 | */ |
@@ -469,9 +422,9 @@ __acquires(&port->port_lock) | |||
469 | int status; | 422 | int status; |
470 | struct tty_struct *tty; | 423 | struct tty_struct *tty; |
471 | 424 | ||
472 | /* no more rx if closed or throttled */ | 425 | /* no more rx if closed */ |
473 | tty = port->port_tty; | 426 | tty = port->port_tty; |
474 | if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) | 427 | if (!tty) |
475 | break; | 428 | break; |
476 | 429 | ||
477 | req = list_entry(pool->next, struct usb_request, list); | 430 | req = list_entry(pool->next, struct usb_request, list); |
@@ -500,36 +453,134 @@ __acquires(&port->port_lock) | |||
500 | return started; | 453 | return started; |
501 | } | 454 | } |
502 | 455 | ||
503 | static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) | 456 | /* |
457 | * RX tasklet takes data out of the RX queue and hands it up to the TTY | ||
458 | * layer until it refuses to take any more data (or is throttled back). | ||
459 | * Then it issues reads for any further data. | ||
460 | * | ||
461 | * If the RX queue becomes full enough that no usb_request is queued, | ||
462 | * the OUT endpoint may begin NAKing as soon as its FIFO fills up. | ||
463 | * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) | ||
464 | * can be buffered before the TTY layer's buffers (currently 64 KB). | ||
465 | */ | ||
466 | static void gs_rx_push(unsigned long _port) | ||
504 | { | 467 | { |
505 | int status; | 468 | struct gs_port *port = (void *)_port; |
506 | struct gs_port *port = ep->driver_data; | 469 | struct tty_struct *tty; |
470 | struct list_head *queue = &port->read_queue; | ||
471 | bool disconnect = false; | ||
472 | bool do_push = false; | ||
507 | 473 | ||
508 | spin_lock(&port->port_lock); | 474 | /* hand any queued data to the tty */ |
509 | list_add(&req->list, &port->read_pool); | 475 | spin_lock_irq(&port->port_lock); |
476 | tty = port->port_tty; | ||
477 | while (!list_empty(queue)) { | ||
478 | struct usb_request *req; | ||
510 | 479 | ||
511 | switch (req->status) { | 480 | req = list_first_entry(queue, struct usb_request, list); |
512 | case 0: | ||
513 | /* normal completion */ | ||
514 | status = gs_recv_packet(port, req->buf, req->actual); | ||
515 | if (status && status != -EIO) | ||
516 | pr_debug("%s: %s %s err %d\n", | ||
517 | __func__, "recv", ep->name, status); | ||
518 | gs_start_rx(port); | ||
519 | break; | ||
520 | 481 | ||
521 | case -ESHUTDOWN: | 482 | /* discard data if tty was closed */ |
522 | /* disconnect */ | 483 | if (!tty) |
523 | pr_vdebug("%s: %s shutdown\n", __func__, ep->name); | 484 | goto recycle; |
524 | break; | ||
525 | 485 | ||
526 | default: | 486 | /* leave data queued if tty was rx throttled */ |
527 | /* presumably a transient fault */ | 487 | if (test_bit(TTY_THROTTLED, &tty->flags)) |
528 | pr_warning("%s: unexpected %s status %d\n", | 488 | break; |
529 | __func__, ep->name, req->status); | 489 | |
530 | gs_start_rx(port); | 490 | switch (req->status) { |
531 | break; | 491 | case -ESHUTDOWN: |
492 | disconnect = true; | ||
493 | pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); | ||
494 | break; | ||
495 | |||
496 | default: | ||
497 | /* presumably a transient fault */ | ||
498 | pr_warning(PREFIX "%d: unexpected RX status %d\n", | ||
499 | port->port_num, req->status); | ||
500 | /* FALLTHROUGH */ | ||
501 | case 0: | ||
502 | /* normal completion */ | ||
503 | break; | ||
504 | } | ||
505 | |||
506 | /* push data to (open) tty */ | ||
507 | if (req->actual) { | ||
508 | char *packet = req->buf; | ||
509 | unsigned size = req->actual; | ||
510 | unsigned n; | ||
511 | int count; | ||
512 | |||
513 | /* we may have pushed part of this packet already... */ | ||
514 | n = port->n_read; | ||
515 | if (n) { | ||
516 | packet += n; | ||
517 | size -= n; | ||
518 | } | ||
519 | |||
520 | count = tty_insert_flip_string(tty, packet, size); | ||
521 | if (count) | ||
522 | do_push = true; | ||
523 | if (count != size) { | ||
524 | /* stop pushing; TTY layer can't handle more */ | ||
525 | port->n_read += count; | ||
526 | pr_vdebug(PREFIX "%d: rx block %d/%d\n", | ||
527 | port->port_num, | ||
528 | count, req->actual); | ||
529 | break; | ||
530 | } | ||
531 | port->n_read = 0; | ||
532 | } | ||
533 | recycle: | ||
534 | list_move(&req->list, &port->read_pool); | ||
532 | } | 535 | } |
536 | |||
537 | /* Push from tty to ldisc; this is immediate with low_latency, and | ||
538 | * may trigger callbacks to this driver ... so drop the spinlock. | ||
539 | */ | ||
540 | if (tty && do_push) { | ||
541 | spin_unlock_irq(&port->port_lock); | ||
542 | tty_flip_buffer_push(tty); | ||
543 | wake_up_interruptible(&tty->read_wait); | ||
544 | spin_lock_irq(&port->port_lock); | ||
545 | |||
546 | /* tty may have been closed */ | ||
547 | tty = port->port_tty; | ||
548 | } | ||
549 | |||
550 | |||
551 | /* We want our data queue to become empty ASAP, keeping data | ||
552 | * in the tty and ldisc (not here). If we couldn't push any | ||
553 | * this time around, there may be trouble unless there's an | ||
554 | * implicit tty_unthrottle() call on its way... | ||
555 | * | ||
556 | * REVISIT we should probably add a timer to keep the tasklet | ||
557 | * from starving ... but it's not clear that case ever happens. | ||
558 | */ | ||
559 | if (!list_empty(queue) && tty) { | ||
560 | if (!test_bit(TTY_THROTTLED, &tty->flags)) { | ||
561 | if (do_push) | ||
562 | tasklet_schedule(&port->push); | ||
563 | else | ||
564 | pr_warning(PREFIX "%d: RX not scheduled?\n", | ||
565 | port->port_num); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | /* If we're still connected, refill the USB RX queue. */ | ||
570 | if (!disconnect && port->port_usb) | ||
571 | gs_start_rx(port); | ||
572 | |||
573 | spin_unlock_irq(&port->port_lock); | ||
574 | } | ||
575 | |||
576 | static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) | ||
577 | { | ||
578 | struct gs_port *port = ep->driver_data; | ||
579 | |||
580 | /* Queue all received data until the tty layer is ready for it. */ | ||
581 | spin_lock(&port->port_lock); | ||
582 | list_add_tail(&req->list, &port->read_queue); | ||
583 | tasklet_schedule(&port->push); | ||
533 | spin_unlock(&port->port_lock); | 584 | spin_unlock(&port->port_lock); |
534 | } | 585 | } |
535 | 586 | ||
@@ -625,6 +676,7 @@ static int gs_start_io(struct gs_port *port) | |||
625 | } | 676 | } |
626 | 677 | ||
627 | /* queue read requests */ | 678 | /* queue read requests */ |
679 | port->n_read = 0; | ||
628 | started = gs_start_rx(port); | 680 | started = gs_start_rx(port); |
629 | 681 | ||
630 | /* unblock any pending writes into our circular buffer */ | 682 | /* unblock any pending writes into our circular buffer */ |
@@ -633,9 +685,10 @@ static int gs_start_io(struct gs_port *port) | |||
633 | } else { | 685 | } else { |
634 | gs_free_requests(ep, head); | 686 | gs_free_requests(ep, head); |
635 | gs_free_requests(port->port_usb->in, &port->write_pool); | 687 | gs_free_requests(port->port_usb->in, &port->write_pool); |
688 | status = -EIO; | ||
636 | } | 689 | } |
637 | 690 | ||
638 | return started ? 0 : status; | 691 | return status; |
639 | } | 692 | } |
640 | 693 | ||
641 | /*-------------------------------------------------------------------------*/ | 694 | /*-------------------------------------------------------------------------*/ |
@@ -736,10 +789,13 @@ static int gs_open(struct tty_struct *tty, struct file *file) | |||
736 | 789 | ||
737 | /* if connected, start the I/O stream */ | 790 | /* if connected, start the I/O stream */ |
738 | if (port->port_usb) { | 791 | if (port->port_usb) { |
792 | struct gserial *gser = port->port_usb; | ||
793 | |||
739 | pr_debug("gs_open: start ttyGS%d\n", port->port_num); | 794 | pr_debug("gs_open: start ttyGS%d\n", port->port_num); |
740 | gs_start_io(port); | 795 | gs_start_io(port); |
741 | 796 | ||
742 | /* REVISIT for ACM, issue "network connected" event */ | 797 | if (gser->connect) |
798 | gser->connect(gser); | ||
743 | } | 799 | } |
744 | 800 | ||
745 | pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); | 801 | pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); |
@@ -766,6 +822,7 @@ static int gs_writes_finished(struct gs_port *p) | |||
766 | static void gs_close(struct tty_struct *tty, struct file *file) | 822 | static void gs_close(struct tty_struct *tty, struct file *file) |
767 | { | 823 | { |
768 | struct gs_port *port = tty->driver_data; | 824 | struct gs_port *port = tty->driver_data; |
825 | struct gserial *gser; | ||
769 | 826 | ||
770 | spin_lock_irq(&port->port_lock); | 827 | spin_lock_irq(&port->port_lock); |
771 | 828 | ||
@@ -785,32 +842,31 @@ static void gs_close(struct tty_struct *tty, struct file *file) | |||
785 | port->openclose = true; | 842 | port->openclose = true; |
786 | port->open_count = 0; | 843 | port->open_count = 0; |
787 | 844 | ||
788 | if (port->port_usb) | 845 | gser = port->port_usb; |
789 | /* REVISIT for ACM, issue "network disconnected" event */; | 846 | if (gser && gser->disconnect) |
847 | gser->disconnect(gser); | ||
790 | 848 | ||
791 | /* wait for circular write buffer to drain, disconnect, or at | 849 | /* wait for circular write buffer to drain, disconnect, or at |
792 | * most GS_CLOSE_TIMEOUT seconds; then discard the rest | 850 | * most GS_CLOSE_TIMEOUT seconds; then discard the rest |
793 | */ | 851 | */ |
794 | if (gs_buf_data_avail(&port->port_write_buf) > 0 | 852 | if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { |
795 | && port->port_usb) { | ||
796 | spin_unlock_irq(&port->port_lock); | 853 | spin_unlock_irq(&port->port_lock); |
797 | wait_event_interruptible_timeout(port->drain_wait, | 854 | wait_event_interruptible_timeout(port->drain_wait, |
798 | gs_writes_finished(port), | 855 | gs_writes_finished(port), |
799 | GS_CLOSE_TIMEOUT * HZ); | 856 | GS_CLOSE_TIMEOUT * HZ); |
800 | spin_lock_irq(&port->port_lock); | 857 | spin_lock_irq(&port->port_lock); |
858 | gser = port->port_usb; | ||
801 | } | 859 | } |
802 | 860 | ||
803 | /* Iff we're disconnected, there can be no I/O in flight so it's | 861 | /* Iff we're disconnected, there can be no I/O in flight so it's |
804 | * ok to free the circular buffer; else just scrub it. And don't | 862 | * ok to free the circular buffer; else just scrub it. And don't |
805 | * let the push tasklet fire again until we're re-opened. | 863 | * let the push tasklet fire again until we're re-opened. |
806 | */ | 864 | */ |
807 | if (port->port_usb == NULL) | 865 | if (gser == NULL) |
808 | gs_buf_free(&port->port_write_buf); | 866 | gs_buf_free(&port->port_write_buf); |
809 | else | 867 | else |
810 | gs_buf_clear(&port->port_write_buf); | 868 | gs_buf_clear(&port->port_write_buf); |
811 | 869 | ||
812 | tasklet_kill(&port->push); | ||
813 | |||
814 | tty->driver_data = NULL; | 870 | tty->driver_data = NULL; |
815 | port->port_tty = NULL; | 871 | port->port_tty = NULL; |
816 | 872 | ||
@@ -911,15 +967,35 @@ static void gs_unthrottle(struct tty_struct *tty) | |||
911 | { | 967 | { |
912 | struct gs_port *port = tty->driver_data; | 968 | struct gs_port *port = tty->driver_data; |
913 | unsigned long flags; | 969 | unsigned long flags; |
914 | unsigned started = 0; | ||
915 | 970 | ||
916 | spin_lock_irqsave(&port->port_lock, flags); | 971 | spin_lock_irqsave(&port->port_lock, flags); |
917 | if (port->port_usb) | 972 | if (port->port_usb) { |
918 | started = gs_start_rx(port); | 973 | /* Kickstart read queue processing. We don't do xon/xoff, |
974 | * rts/cts, or other handshaking with the host, but if the | ||
975 | * read queue backs up enough we'll be NAKing OUT packets. | ||
976 | */ | ||
977 | tasklet_schedule(&port->push); | ||
978 | pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num); | ||
979 | } | ||
919 | spin_unlock_irqrestore(&port->port_lock, flags); | 980 | spin_unlock_irqrestore(&port->port_lock, flags); |
981 | } | ||
982 | |||
983 | static int gs_break_ctl(struct tty_struct *tty, int duration) | ||
984 | { | ||
985 | struct gs_port *port = tty->driver_data; | ||
986 | int status = 0; | ||
987 | struct gserial *gser; | ||
988 | |||
989 | pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n", | ||
990 | port->port_num, duration); | ||
920 | 991 | ||
921 | pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n", | 992 | spin_lock_irq(&port->port_lock); |
922 | port->port_num, started); | 993 | gser = port->port_usb; |
994 | if (gser && gser->send_break) | ||
995 | status = gser->send_break(gser, duration); | ||
996 | spin_unlock_irq(&port->port_lock); | ||
997 | |||
998 | return status; | ||
923 | } | 999 | } |
924 | 1000 | ||
925 | static const struct tty_operations gs_tty_ops = { | 1001 | static const struct tty_operations gs_tty_ops = { |
@@ -931,6 +1007,7 @@ static const struct tty_operations gs_tty_ops = { | |||
931 | .write_room = gs_write_room, | 1007 | .write_room = gs_write_room, |
932 | .chars_in_buffer = gs_chars_in_buffer, | 1008 | .chars_in_buffer = gs_chars_in_buffer, |
933 | .unthrottle = gs_unthrottle, | 1009 | .unthrottle = gs_unthrottle, |
1010 | .break_ctl = gs_break_ctl, | ||
934 | }; | 1011 | }; |
935 | 1012 | ||
936 | /*-------------------------------------------------------------------------*/ | 1013 | /*-------------------------------------------------------------------------*/ |
@@ -953,6 +1030,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) | |||
953 | tasklet_init(&port->push, gs_rx_push, (unsigned long) port); | 1030 | tasklet_init(&port->push, gs_rx_push, (unsigned long) port); |
954 | 1031 | ||
955 | INIT_LIST_HEAD(&port->read_pool); | 1032 | INIT_LIST_HEAD(&port->read_pool); |
1033 | INIT_LIST_HEAD(&port->read_queue); | ||
956 | INIT_LIST_HEAD(&port->write_pool); | 1034 | INIT_LIST_HEAD(&port->write_pool); |
957 | 1035 | ||
958 | port->port_num = port_num; | 1036 | port->port_num = port_num; |
@@ -997,7 +1075,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count) | |||
997 | 1075 | ||
998 | gs_tty_driver->owner = THIS_MODULE; | 1076 | gs_tty_driver->owner = THIS_MODULE; |
999 | gs_tty_driver->driver_name = "g_serial"; | 1077 | gs_tty_driver->driver_name = "g_serial"; |
1000 | gs_tty_driver->name = "ttyGS"; | 1078 | gs_tty_driver->name = PREFIX; |
1001 | /* uses dynamically assigned dev_t values */ | 1079 | /* uses dynamically assigned dev_t values */ |
1002 | 1080 | ||
1003 | gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; | 1081 | gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; |
@@ -1104,6 +1182,8 @@ void gserial_cleanup(void) | |||
1104 | ports[i].port = NULL; | 1182 | ports[i].port = NULL; |
1105 | mutex_unlock(&ports[i].lock); | 1183 | mutex_unlock(&ports[i].lock); |
1106 | 1184 | ||
1185 | tasklet_kill(&port->push); | ||
1186 | |||
1107 | /* wait for old opens to finish */ | 1187 | /* wait for old opens to finish */ |
1108 | wait_event(port->close_wait, gs_closed(port)); | 1188 | wait_event(port->close_wait, gs_closed(port)); |
1109 | 1189 | ||
@@ -1175,14 +1255,17 @@ int gserial_connect(struct gserial *gser, u8 port_num) | |||
1175 | 1255 | ||
1176 | /* REVISIT if waiting on "carrier detect", signal. */ | 1256 | /* REVISIT if waiting on "carrier detect", signal. */ |
1177 | 1257 | ||
1178 | /* REVISIT for ACM, issue "network connection" status notification: | 1258 | /* if it's already open, start I/O ... and notify the serial |
1179 | * connected if open_count, else disconnected. | 1259 | * protocol about open/close status (connect/disconnect). |
1180 | */ | 1260 | */ |
1181 | |||
1182 | /* if it's already open, start I/O */ | ||
1183 | if (port->open_count) { | 1261 | if (port->open_count) { |
1184 | pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); | 1262 | pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); |
1185 | gs_start_io(port); | 1263 | gs_start_io(port); |
1264 | if (gser->connect) | ||
1265 | gser->connect(gser); | ||
1266 | } else { | ||
1267 | if (gser->disconnect) | ||
1268 | gser->disconnect(gser); | ||
1186 | } | 1269 | } |
1187 | 1270 | ||
1188 | spin_unlock_irqrestore(&port->port_lock, flags); | 1271 | spin_unlock_irqrestore(&port->port_lock, flags); |
@@ -1241,6 +1324,7 @@ void gserial_disconnect(struct gserial *gser) | |||
1241 | if (port->open_count == 0 && !port->openclose) | 1324 | if (port->open_count == 0 && !port->openclose) |
1242 | gs_buf_free(&port->port_write_buf); | 1325 | gs_buf_free(&port->port_write_buf); |
1243 | gs_free_requests(gser->out, &port->read_pool); | 1326 | gs_free_requests(gser->out, &port->read_pool); |
1327 | gs_free_requests(gser->out, &port->read_queue); | ||
1244 | gs_free_requests(gser->in, &port->write_pool); | 1328 | gs_free_requests(gser->in, &port->write_pool); |
1245 | spin_unlock_irqrestore(&port->port_lock, flags); | 1329 | spin_unlock_irqrestore(&port->port_lock, flags); |
1246 | } | 1330 | } |