aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/class/cdc-acm.c142
-rw-r--r--drivers/usb/class/cdc-acm.h5
2 files changed, 126 insertions, 21 deletions
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index c3201affa0b6..ba86fec872b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -159,12 +159,34 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
159 spin_lock_irqsave(&acm->write_lock, flags); 159 spin_lock_irqsave(&acm->write_lock, flags);
160 acm->write_ready = 1; 160 acm->write_ready = 1;
161 wb->use = 0; 161 wb->use = 0;
162 acm->transmitting--;
162 spin_unlock_irqrestore(&acm->write_lock, flags); 163 spin_unlock_irqrestore(&acm->write_lock, flags);
163} 164}
164 165
165/* 166/*
166 * Poke write. 167 * Poke write.
168 *
169 * the caller is responsible for locking
167 */ 170 */
171
172static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
173{
174 int rc;
175
176 acm->transmitting++;
177
178 wb->urb->transfer_buffer = wb->buf;
179 wb->urb->transfer_dma = wb->dmah;
180 wb->urb->transfer_buffer_length = wb->len;
181 wb->urb->dev = acm->dev;
182
183 if ((rc = usb_submit_urb(wb->urb, GFP_ATOMIC)) < 0) {
184 dbg("usb_submit_urb(write bulk) failed: %d", rc);
185 acm_write_done(acm, wb);
186 }
187 return rc;
188}
189
168static int acm_write_start(struct acm *acm, int wbn) 190static int acm_write_start(struct acm *acm, int wbn)
169{ 191{
170 unsigned long flags; 192 unsigned long flags;
@@ -182,26 +204,31 @@ static int acm_write_start(struct acm *acm, int wbn)
182 return 0; /* A white lie */ 204 return 0; /* A white lie */
183 } 205 }
184 206
207 wb = &acm->wb[wbn];
208 if(acm_wb_is_avail(acm) <= 1)
209 acm->write_ready = 0;
210
211 dbg("%s susp_count: %d", __func__, acm->susp_count);
212 if (acm->susp_count) {
213 acm->old_ready = acm->write_ready;
214 acm->delayed_wb = wb;
215 acm->write_ready = 0;
216 schedule_work(&acm->waker);
217 spin_unlock_irqrestore(&acm->write_lock, flags);
218 return 0; /* A white lie */
219 }
220 usb_mark_last_busy(acm->dev);
221
185 if (!acm_wb_is_used(acm, wbn)) { 222 if (!acm_wb_is_used(acm, wbn)) {
186 spin_unlock_irqrestore(&acm->write_lock, flags); 223 spin_unlock_irqrestore(&acm->write_lock, flags);
187 return 0; 224 return 0;
188 } 225 }
189 wb = &acm->wb[wbn];
190 226
191 if(acm_wb_is_avail(acm) <= 1) 227 rc = acm_start_wb(acm, wb);
192 acm->write_ready = 0;
193 spin_unlock_irqrestore(&acm->write_lock, flags); 228 spin_unlock_irqrestore(&acm->write_lock, flags);
194 229
195 wb->urb->transfer_buffer = wb->buf;
196 wb->urb->transfer_dma = wb->dmah;
197 wb->urb->transfer_buffer_length = wb->len;
198 wb->urb->dev = acm->dev;
199
200 if ((rc = usb_submit_urb(wb->urb, GFP_ATOMIC)) < 0) {
201 dbg("usb_submit_urb(write bulk) failed: %d", rc);
202 acm_write_done(acm, wb);
203 }
204 return rc; 230 return rc;
231
205} 232}
206/* 233/*
207 * attributes exported through sysfs 234 * attributes exported through sysfs
@@ -304,6 +331,7 @@ static void acm_ctrl_irq(struct urb *urb)
304 break; 331 break;
305 } 332 }
306exit: 333exit:
334 usb_mark_last_busy(acm->dev);
307 retval = usb_submit_urb (urb, GFP_ATOMIC); 335 retval = usb_submit_urb (urb, GFP_ATOMIC);
308 if (retval) 336 if (retval)
309 err ("%s - usb_submit_urb failed with result %d", 337 err ("%s - usb_submit_urb failed with result %d",
@@ -320,8 +348,11 @@ static void acm_read_bulk(struct urb *urb)
320 348
321 dbg("Entering acm_read_bulk with status %d", status); 349 dbg("Entering acm_read_bulk with status %d", status);
322 350
323 if (!ACM_READY(acm)) 351 if (!ACM_READY(acm)) {
352 dev_dbg(&acm->data->dev, "Aborting, acm not ready");
324 return; 353 return;
354 }
355 usb_mark_last_busy(acm->dev);
325 356
326 if (status) 357 if (status)
327 dev_dbg(&acm->data->dev, "bulk rx status %d\n", status); 358 dev_dbg(&acm->data->dev, "bulk rx status %d\n", status);
@@ -331,6 +362,7 @@ static void acm_read_bulk(struct urb *urb)
331 362
332 if (likely(status == 0)) { 363 if (likely(status == 0)) {
333 spin_lock(&acm->read_lock); 364 spin_lock(&acm->read_lock);
365 acm->processing++;
334 list_add_tail(&rcv->list, &acm->spare_read_urbs); 366 list_add_tail(&rcv->list, &acm->spare_read_urbs);
335 list_add_tail(&buf->list, &acm->filled_read_bufs); 367 list_add_tail(&buf->list, &acm->filled_read_bufs);
336 spin_unlock(&acm->read_lock); 368 spin_unlock(&acm->read_lock);
@@ -343,7 +375,8 @@ static void acm_read_bulk(struct urb *urb)
343 /* nevertheless the tasklet must be kicked unconditionally 375 /* nevertheless the tasklet must be kicked unconditionally
344 so the queue cannot dry up */ 376 so the queue cannot dry up */
345 } 377 }
346 tasklet_schedule(&acm->urb_task); 378 if (likely(!acm->susp_count))
379 tasklet_schedule(&acm->urb_task);
347} 380}
348 381
349static void acm_rx_tasklet(unsigned long _acm) 382static void acm_rx_tasklet(unsigned long _acm)
@@ -354,16 +387,23 @@ static void acm_rx_tasklet(unsigned long _acm)
354 struct acm_ru *rcv; 387 struct acm_ru *rcv;
355 unsigned long flags; 388 unsigned long flags;
356 unsigned char throttled; 389 unsigned char throttled;
390
357 dbg("Entering acm_rx_tasklet"); 391 dbg("Entering acm_rx_tasklet");
358 392
359 if (!ACM_READY(acm)) 393 if (!ACM_READY(acm))
394 {
395 dbg("acm_rx_tasklet: ACM not ready");
360 return; 396 return;
397 }
361 398
362 spin_lock_irqsave(&acm->throttle_lock, flags); 399 spin_lock_irqsave(&acm->throttle_lock, flags);
363 throttled = acm->throttle; 400 throttled = acm->throttle;
364 spin_unlock_irqrestore(&acm->throttle_lock, flags); 401 spin_unlock_irqrestore(&acm->throttle_lock, flags);
365 if (throttled) 402 if (throttled)
403 {
404 dbg("acm_rx_tasklet: throttled");
366 return; 405 return;
406 }
367 407
368next_buffer: 408next_buffer:
369 spin_lock_irqsave(&acm->read_lock, flags); 409 spin_lock_irqsave(&acm->read_lock, flags);
@@ -403,6 +443,7 @@ urbs:
403 while (!list_empty(&acm->spare_read_bufs)) { 443 while (!list_empty(&acm->spare_read_bufs)) {
404 spin_lock_irqsave(&acm->read_lock, flags); 444 spin_lock_irqsave(&acm->read_lock, flags);
405 if (list_empty(&acm->spare_read_urbs)) { 445 if (list_empty(&acm->spare_read_urbs)) {
446 acm->processing = 0;
406 spin_unlock_irqrestore(&acm->read_lock, flags); 447 spin_unlock_irqrestore(&acm->read_lock, flags);
407 return; 448 return;
408 } 449 }
@@ -425,18 +466,23 @@ urbs:
425 rcv->urb->transfer_dma = buf->dma; 466 rcv->urb->transfer_dma = buf->dma;
426 rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 467 rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
427 468
428 dbg("acm_rx_tasklet: sending urb 0x%p, rcv 0x%p, buf 0x%p", rcv->urb, rcv, buf);
429
430 /* This shouldn't kill the driver as unsuccessful URBs are returned to the 469 /* This shouldn't kill the driver as unsuccessful URBs are returned to the
431 free-urbs-pool and resubmited ASAP */ 470 free-urbs-pool and resubmited ASAP */
432 if (usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) { 471 spin_lock_irqsave(&acm->read_lock, flags);
472 if (acm->susp_count || usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
433 list_add(&buf->list, &acm->spare_read_bufs); 473 list_add(&buf->list, &acm->spare_read_bufs);
434 spin_lock_irqsave(&acm->read_lock, flags);
435 list_add(&rcv->list, &acm->spare_read_urbs); 474 list_add(&rcv->list, &acm->spare_read_urbs);
475 acm->processing = 0;
436 spin_unlock_irqrestore(&acm->read_lock, flags); 476 spin_unlock_irqrestore(&acm->read_lock, flags);
437 return; 477 return;
478 } else {
479 spin_unlock_irqrestore(&acm->read_lock, flags);
480 dbg("acm_rx_tasklet: sending urb 0x%p, rcv 0x%p, buf 0x%p", rcv->urb, rcv, buf);
438 } 481 }
439 } 482 }
483 spin_lock_irqsave(&acm->read_lock, flags);
484 acm->processing = 0;
485 spin_unlock_irqrestore(&acm->read_lock, flags);
440} 486}
441 487
442/* data interface wrote those outgoing bytes */ 488/* data interface wrote those outgoing bytes */
@@ -463,6 +509,27 @@ static void acm_softint(struct work_struct *work)
463 tty_wakeup(acm->tty); 509 tty_wakeup(acm->tty);
464} 510}
465 511
512static void acm_waker(struct work_struct *waker)
513{
514 struct acm *acm = container_of(waker, struct acm, waker);
515 long flags;
516 int rv;
517
518 rv = usb_autopm_get_interface(acm->control);
519 if (rv < 0) {
520 err("Autopm failure in %s", __func__);
521 return;
522 }
523 if (acm->delayed_wb) {
524 acm_start_wb(acm, acm->delayed_wb);
525 acm->delayed_wb = NULL;
526 }
527 spin_lock_irqsave(&acm->write_lock, flags);
528 acm->write_ready = acm->old_ready;
529 spin_unlock_irqrestore(&acm->write_lock, flags);
530 usb_autopm_put_interface(acm->control);
531}
532
466/* 533/*
467 * TTY handlers 534 * TTY handlers
468 */ 535 */
@@ -492,6 +559,8 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
492 559
493 if (usb_autopm_get_interface(acm->control) < 0) 560 if (usb_autopm_get_interface(acm->control) < 0)
494 goto early_bail; 561 goto early_bail;
562 else
563 acm->control->needs_remote_wakeup = 1;
495 564
496 mutex_lock(&acm->mutex); 565 mutex_lock(&acm->mutex);
497 if (acm->used++) { 566 if (acm->used++) {
@@ -509,6 +578,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
509 if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) && 578 if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
510 (acm->ctrl_caps & USB_CDC_CAP_LINE)) 579 (acm->ctrl_caps & USB_CDC_CAP_LINE))
511 goto full_bailout; 580 goto full_bailout;
581 usb_autopm_put_interface(acm->control);
512 582
513 INIT_LIST_HEAD(&acm->spare_read_urbs); 583 INIT_LIST_HEAD(&acm->spare_read_urbs);
514 INIT_LIST_HEAD(&acm->spare_read_bufs); 584 INIT_LIST_HEAD(&acm->spare_read_bufs);
@@ -570,12 +640,14 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
570 mutex_lock(&open_mutex); 640 mutex_lock(&open_mutex);
571 if (!--acm->used) { 641 if (!--acm->used) {
572 if (acm->dev) { 642 if (acm->dev) {
643 usb_autopm_get_interface(acm->control);
573 acm_set_control(acm, acm->ctrlout = 0); 644 acm_set_control(acm, acm->ctrlout = 0);
574 usb_kill_urb(acm->ctrlurb); 645 usb_kill_urb(acm->ctrlurb);
575 for (i = 0; i < ACM_NW; i++) 646 for (i = 0; i < ACM_NW; i++)
576 usb_kill_urb(acm->wb[i].urb); 647 usb_kill_urb(acm->wb[i].urb);
577 for (i = 0; i < nr; i++) 648 for (i = 0; i < nr; i++)
578 usb_kill_urb(acm->ru[i].urb); 649 usb_kill_urb(acm->ru[i].urb);
650 acm->control->needs_remote_wakeup = 0;
579 usb_autopm_put_interface(acm->control); 651 usb_autopm_put_interface(acm->control);
580 } else 652 } else
581 acm_tty_unregister(acm); 653 acm_tty_unregister(acm);
@@ -987,6 +1059,7 @@ skip_normal_probe:
987 acm->urb_task.func = acm_rx_tasklet; 1059 acm->urb_task.func = acm_rx_tasklet;
988 acm->urb_task.data = (unsigned long) acm; 1060 acm->urb_task.data = (unsigned long) acm;
989 INIT_WORK(&acm->work, acm_softint); 1061 INIT_WORK(&acm->work, acm_softint);
1062 INIT_WORK(&acm->waker, acm_waker);
990 spin_lock_init(&acm->throttle_lock); 1063 spin_lock_init(&acm->throttle_lock);
991 spin_lock_init(&acm->write_lock); 1064 spin_lock_init(&acm->write_lock);
992 spin_lock_init(&acm->read_lock); 1065 spin_lock_init(&acm->read_lock);
@@ -1116,6 +1189,7 @@ alloc_fail:
1116static void stop_data_traffic(struct acm *acm) 1189static void stop_data_traffic(struct acm *acm)
1117{ 1190{
1118 int i; 1191 int i;
1192 dbg("Entering stop_data_traffic");
1119 1193
1120 tasklet_disable(&acm->urb_task); 1194 tasklet_disable(&acm->urb_task);
1121 1195
@@ -1128,6 +1202,7 @@ static void stop_data_traffic(struct acm *acm)
1128 tasklet_enable(&acm->urb_task); 1202 tasklet_enable(&acm->urb_task);
1129 1203
1130 cancel_work_sync(&acm->work); 1204 cancel_work_sync(&acm->work);
1205 cancel_work_sync(&acm->waker);
1131} 1206}
1132 1207
1133static void acm_disconnect(struct usb_interface *intf) 1208static void acm_disconnect(struct usb_interface *intf)
@@ -1181,8 +1256,27 @@ static void acm_disconnect(struct usb_interface *intf)
1181static int acm_suspend(struct usb_interface *intf, pm_message_t message) 1256static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1182{ 1257{
1183 struct acm *acm = usb_get_intfdata(intf); 1258 struct acm *acm = usb_get_intfdata(intf);
1259 int cnt;
1260
1261 if (acm->dev->auto_pm) {
1262 int b;
1263
1264 spin_lock_irq(&acm->read_lock);
1265 spin_lock(&acm->write_lock);
1266 b = acm->processing + acm->transmitting;
1267 spin_unlock(&acm->write_lock);
1268 spin_unlock_irq(&acm->read_lock);
1269 if (b)
1270 return -EBUSY;
1271 }
1272
1273 spin_lock_irq(&acm->read_lock);
1274 spin_lock(&acm->write_lock);
1275 cnt = acm->susp_count++;
1276 spin_unlock(&acm->write_lock);
1277 spin_unlock_irq(&acm->read_lock);
1184 1278
1185 if (acm->susp_count++) 1279 if (cnt)
1186 return 0; 1280 return 0;
1187 /* 1281 /*
1188 we treat opened interfaces differently, 1282 we treat opened interfaces differently,
@@ -1201,15 +1295,21 @@ static int acm_resume(struct usb_interface *intf)
1201{ 1295{
1202 struct acm *acm = usb_get_intfdata(intf); 1296 struct acm *acm = usb_get_intfdata(intf);
1203 int rv = 0; 1297 int rv = 0;
1298 int cnt;
1204 1299
1205 if (--acm->susp_count) 1300 spin_lock_irq(&acm->read_lock);
1301 acm->susp_count -= 1;
1302 cnt = acm->susp_count;
1303 spin_unlock_irq(&acm->read_lock);
1304
1305 if (cnt)
1206 return 0; 1306 return 0;
1207 1307
1208 mutex_lock(&acm->mutex); 1308 mutex_lock(&acm->mutex);
1209 if (acm->used) { 1309 if (acm->used) {
1210 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); 1310 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
1211 if (rv < 0) 1311 if (rv < 0)
1212 goto err_out; 1312 goto err_out;
1213 1313
1214 tasklet_schedule(&acm->urb_task); 1314 tasklet_schedule(&acm->urb_task);
1215 } 1315 }
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 046e064b033a..85c3aaaab7c5 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -107,10 +107,14 @@ struct acm {
107 struct list_head filled_read_bufs; 107 struct list_head filled_read_bufs;
108 int write_used; /* number of non-empty write buffers */ 108 int write_used; /* number of non-empty write buffers */
109 int write_ready; /* write urb is not running */ 109 int write_ready; /* write urb is not running */
110 int old_ready;
111 int processing;
112 int transmitting;
110 spinlock_t write_lock; 113 spinlock_t write_lock;
111 struct mutex mutex; 114 struct mutex mutex;
112 struct usb_cdc_line_coding line; /* bits, stop, parity */ 115 struct usb_cdc_line_coding line; /* bits, stop, parity */
113 struct work_struct work; /* work queue entry for line discipline waking up */ 116 struct work_struct work; /* work queue entry for line discipline waking up */
117 struct work_struct waker;
114 struct tasklet_struct urb_task; /* rx processing */ 118 struct tasklet_struct urb_task; /* rx processing */
115 spinlock_t throttle_lock; /* synchronize throtteling and read callback */ 119 spinlock_t throttle_lock; /* synchronize throtteling and read callback */
116 unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ 120 unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
@@ -123,6 +127,7 @@ struct acm {
123 unsigned char clocal; /* termios CLOCAL */ 127 unsigned char clocal; /* termios CLOCAL */
124 unsigned int ctrl_caps; /* control capabilities from the class specific header */ 128 unsigned int ctrl_caps; /* control capabilities from the class specific header */
125 unsigned int susp_count; /* number of suspended interfaces */ 129 unsigned int susp_count; /* number of suspended interfaces */
130 struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
126}; 131};
127 132
128#define CDC_DATA_INTERFACE_TYPE 0x0a 133#define CDC_DATA_INTERFACE_TYPE 0x0a