aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDouglas Anderson <dianders@chromium.org>2016-01-28 21:19:59 -0500
committerFelipe Balbi <balbi@kernel.org>2016-03-04 08:14:41 -0500
commit17dd5b642d836ff7e12a780c2ec71a4be9bf9546 (patch)
tree2714c7eee7438f2896ae764da25d49922c485d66
parent74fc4a7558f859e89b849cc87afed38f517ded9a (diff)
usb: dwc2: host: Add a delay before releasing periodic bandwidth
We'd like to be able to use HCD_BH in order to speed up the dwc2 host interrupt handler quite a bit. However, according to the kernel doc for usb_submit_urb() (specifically the part about "Reserved Bandwidth Transfers"), we need to keep a reservation active as long as a device driver keeps submitting. That was easy to do when we gave back the URB in the interrupt context: we just looked at when our queue was empty and released the reserved bandwidth then. ...but now we need a little more complexity. We'll follow EHCI's lead in commit 9118f9eb4f1e ("USB: EHCI: improve interrupt qh unlink") and add a 5ms delay. Since we don't have a whole timer infrastructure in dwc2, we'll just add a timer per QH. The overhead for this is very small. Note that the dwc2 scheduler is pretty broken (see future patches to fix it). This patch attempts to replicate all old behavior and just add the proper delay. Acked-by: John Youn <johnyoun@synopsys.com> Signed-off-by: Douglas Anderson <dianders@chromium.org> Tested-by: Heiko Stuebner <heiko@sntech.de> Tested-by: Stefan Wahren <stefan.wahren@i2se.com> Signed-off-by: Felipe Balbi <balbi@kernel.org>
-rw-r--r--drivers/usb/dwc2/hcd.h6
-rw-r--r--drivers/usb/dwc2/hcd_queue.c237
2 files changed, 187 insertions, 56 deletions
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 809bc4ff9116..79473ea35bd6 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -215,6 +215,7 @@ enum dwc2_transaction_type {
215/** 215/**
216 * struct dwc2_qh - Software queue head structure 216 * struct dwc2_qh - Software queue head structure
217 * 217 *
218 * @hsotg: The HCD state structure for the DWC OTG controller
218 * @ep_type: Endpoint type. One of the following values: 219 * @ep_type: Endpoint type. One of the following values:
219 * - USB_ENDPOINT_XFER_CONTROL 220 * - USB_ENDPOINT_XFER_CONTROL
220 * - USB_ENDPOINT_XFER_BULK 221 * - USB_ENDPOINT_XFER_BULK
@@ -252,13 +253,16 @@ enum dwc2_transaction_type {
252 * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer 253 * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
253 * descriptor and indicates original XferSize value for the 254 * descriptor and indicates original XferSize value for the
254 * descriptor 255 * descriptor
256 * @unreserve_timer: Timer for releasing periodic reservation.
255 * @tt_buffer_dirty True if clear_tt_buffer_complete is pending 257 * @tt_buffer_dirty True if clear_tt_buffer_complete is pending
258 * @unreserve_pending: True if we planned to unreserve but haven't yet.
256 * 259 *
257 * A Queue Head (QH) holds the static characteristics of an endpoint and 260 * A Queue Head (QH) holds the static characteristics of an endpoint and
258 * maintains a list of transfers (QTDs) for that endpoint. A QH structure may 261 * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
259 * be entered in either the non-periodic or periodic schedule. 262 * be entered in either the non-periodic or periodic schedule.
260 */ 263 */
261struct dwc2_qh { 264struct dwc2_qh {
265 struct dwc2_hsotg *hsotg;
262 u8 ep_type; 266 u8 ep_type;
263 u8 ep_is_in; 267 u8 ep_is_in;
264 u16 maxp; 268 u16 maxp;
@@ -281,7 +285,9 @@ struct dwc2_qh {
281 dma_addr_t desc_list_dma; 285 dma_addr_t desc_list_dma;
282 u32 desc_list_sz; 286 u32 desc_list_sz;
283 u32 *n_bytes; 287 u32 *n_bytes;
288 struct timer_list unreserve_timer;
284 unsigned tt_buffer_dirty:1; 289 unsigned tt_buffer_dirty:1;
290 unsigned unreserve_pending:1;
285}; 291};
286 292
287/** 293/**
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 0e9faa75593c..b9e4867e1afd 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -53,6 +53,94 @@
53#include "core.h" 53#include "core.h"
54#include "hcd.h" 54#include "hcd.h"
55 55
56/* Wait this long before releasing periodic reservation */
57#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
58
59/**
60 * dwc2_do_unreserve() - Actually release the periodic reservation
61 *
62 * This function actually releases the periodic bandwidth that was reserved
63 * by the given qh.
64 *
65 * @hsotg: The HCD state structure for the DWC OTG controller
66 * @qh: QH for the periodic transfer.
67 */
68static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
69{
70 assert_spin_locked(&hsotg->lock);
71
72 WARN_ON(!qh->unreserve_pending);
73
74 /* No more unreserve pending--we're doing it */
75 qh->unreserve_pending = false;
76
77 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
78 list_del_init(&qh->qh_list_entry);
79
80 /* Update claimed usecs per (micro)frame */
81 hsotg->periodic_usecs -= qh->usecs;
82
83 if (hsotg->core_params->uframe_sched > 0) {
84 int i;
85
86 for (i = 0; i < 8; i++) {
87 hsotg->frame_usecs[i] += qh->frame_usecs[i];
88 qh->frame_usecs[i] = 0;
89 }
90 } else {
91 /* Release periodic channel reservation */
92 hsotg->periodic_channels--;
93 }
94}
95
96/**
97 * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
98 *
99 * According to the kernel doc for usb_submit_urb() (specifically the part about
100 * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
101 * long as a device driver keeps submitting. Since we're using HCD_BH to give
102 * back the URB we need to give the driver a little bit of time before we
103 * release the reservation. This worker is called after the appropriate
104 * delay.
105 *
106 * @work: Pointer to a qh unreserve_work.
107 */
108static void dwc2_unreserve_timer_fn(unsigned long data)
109{
110 struct dwc2_qh *qh = (struct dwc2_qh *)data;
111 struct dwc2_hsotg *hsotg = qh->hsotg;
112 unsigned long flags;
113
114 /*
115 * Wait for the lock, or for us to be scheduled again. We
116 * could be scheduled again if:
117 * - We started executing but didn't get the lock yet.
118 * - A new reservation came in, but cancel didn't take effect
119 * because we already started executing.
120 * - The timer has been kicked again.
121 * In that case cancel and wait for the next call.
122 */
123 while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
124 if (timer_pending(&qh->unreserve_timer))
125 return;
126 }
127
128 /*
129 * Might be no more unreserve pending if:
130 * - We started executing but didn't get the lock yet.
131 * - A new reservation came in, but cancel didn't take effect
132 * because we already started executing.
133 *
134 * We can't put this in the loop above because unreserve_pending needs
135 * to be accessed under lock, so we can only check it once we got the
136 * lock.
137 */
138 if (qh->unreserve_pending)
139 dwc2_do_unreserve(hsotg, qh);
140
141 spin_unlock_irqrestore(&hsotg->lock, flags);
142}
143
56/** 144/**
57 * dwc2_qh_init() - Initializes a QH structure 145 * dwc2_qh_init() - Initializes a QH structure
58 * 146 *
@@ -71,6 +159,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
71 dev_vdbg(hsotg->dev, "%s()\n", __func__); 159 dev_vdbg(hsotg->dev, "%s()\n", __func__);
72 160
73 /* Initialize QH */ 161 /* Initialize QH */
162 qh->hsotg = hsotg;
163 setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
164 (unsigned long)qh);
74 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 165 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
75 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0; 166 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
76 167
@@ -240,6 +331,15 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
240 */ 331 */
241void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 332void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
242{ 333{
334 /* Make sure any unreserve work is finished. */
335 if (del_timer_sync(&qh->unreserve_timer)) {
336 unsigned long flags;
337
338 spin_lock_irqsave(&hsotg->lock, flags);
339 dwc2_do_unreserve(hsotg, qh);
340 spin_unlock_irqrestore(&hsotg->lock, flags);
341 }
342
243 if (qh->desc_list) 343 if (qh->desc_list)
244 dwc2_hcd_qh_free_ddma(hsotg, qh); 344 dwc2_hcd_qh_free_ddma(hsotg, qh);
245 kfree(qh); 345 kfree(qh);
@@ -477,51 +577,74 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
477{ 577{
478 int status; 578 int status;
479 579
480 if (hsotg->core_params->uframe_sched > 0) { 580 status = dwc2_check_max_xfer_size(hsotg, qh);
481 int frame = -1; 581 if (status) {
482 582 dev_dbg(hsotg->dev,
483 status = dwc2_find_uframe(hsotg, qh); 583 "%s: Channel max transfer size too small for periodic transfer\n",
484 if (status == 0) 584 __func__);
485 frame = 7; 585 return status;
486 else if (status > 0) 586 }
487 frame = status - 1; 587
488 588 /* Cancel pending unreserve; if canceled OK, unreserve was pending */
489 /* Set the new frame up */ 589 if (del_timer(&qh->unreserve_timer))
490 if (frame >= 0) { 590 WARN_ON(!qh->unreserve_pending);
491 qh->sched_frame &= ~0x7; 591
492 qh->sched_frame |= (frame & 7); 592 /*
493 dwc2_sch_dbg(hsotg, "QH=%p sched_p sch=%04x, uf=%d\n", 593 * Only need to reserve if there's not an unreserve pending, since if an
494 qh, qh->sched_frame, frame); 594 * unreserve is pending then by definition our old reservation is still
595 * valid. Unreserve might still be pending even if we didn't cancel if
596 * dwc2_unreserve_timer_fn() already started. Code in the timer handles
597 * that case.
598 */
599 if (!qh->unreserve_pending) {
600 if (hsotg->core_params->uframe_sched > 0) {
601 int frame = -1;
602
603 status = dwc2_find_uframe(hsotg, qh);
604 if (status == 0)
605 frame = 7;
606 else if (status > 0)
607 frame = status - 1;
608
609 /* Set the new frame up */
610 if (frame >= 0) {
611 qh->sched_frame &= ~0x7;
612 qh->sched_frame |= (frame & 7);
613 dwc2_sch_dbg(hsotg,
614 "QH=%p sched_p sch=%04x, uf=%d\n",
615 qh, qh->sched_frame, frame);
616 }
617
618 if (status > 0)
619 status = 0;
620 } else {
621 status = dwc2_periodic_channel_available(hsotg);
622 if (status) {
623 dev_info(hsotg->dev,
624 "%s: No host channel available for periodic transfer\n",
625 __func__);
626 return status;
627 }
628
629 status = dwc2_check_periodic_bandwidth(hsotg, qh);
495 } 630 }
496 631
497 if (status > 0)
498 status = 0;
499 } else {
500 status = dwc2_periodic_channel_available(hsotg);
501 if (status) { 632 if (status) {
502 dev_info(hsotg->dev, 633 dev_dbg(hsotg->dev,
503 "%s: No host channel available for periodic transfer\n", 634 "%s: Insufficient periodic bandwidth for periodic transfer\n",
504 __func__); 635 __func__);
505 return status; 636 return status;
506 } 637 }
507 638
508 status = dwc2_check_periodic_bandwidth(hsotg, qh); 639 if (hsotg->core_params->uframe_sched <= 0)
509 } 640 /* Reserve periodic channel */
641 hsotg->periodic_channels++;
510 642
511 if (status) { 643 /* Update claimed usecs per (micro)frame */
512 dev_dbg(hsotg->dev, 644 hsotg->periodic_usecs += qh->usecs;
513 "%s: Insufficient periodic bandwidth for periodic transfer\n",
514 __func__);
515 return status;
516 } 645 }
517 646
518 status = dwc2_check_max_xfer_size(hsotg, qh); 647 qh->unreserve_pending = 0;
519 if (status) {
520 dev_dbg(hsotg->dev,
521 "%s: Channel max transfer size too small for periodic transfer\n",
522 __func__);
523 return status;
524 }
525 648
526 if (hsotg->core_params->dma_desc_enable > 0) 649 if (hsotg->core_params->dma_desc_enable > 0)
527 /* Don't rely on SOF and start in ready schedule */ 650 /* Don't rely on SOF and start in ready schedule */
@@ -531,13 +654,6 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
531 list_add_tail(&qh->qh_list_entry, 654 list_add_tail(&qh->qh_list_entry,
532 &hsotg->periodic_sched_inactive); 655 &hsotg->periodic_sched_inactive);
533 656
534 if (hsotg->core_params->uframe_sched <= 0)
535 /* Reserve periodic channel */
536 hsotg->periodic_channels++;
537
538 /* Update claimed usecs per (micro)frame */
539 hsotg->periodic_usecs += qh->usecs;
540
541 return status; 657 return status;
542} 658}
543 659
@@ -551,22 +667,31 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
551static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, 667static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
552 struct dwc2_qh *qh) 668 struct dwc2_qh *qh)
553{ 669{
554 int i; 670 bool did_modify;
555 671
556 list_del_init(&qh->qh_list_entry); 672 assert_spin_locked(&hsotg->lock);
557 673
558 /* Update claimed usecs per (micro)frame */ 674 /*
559 hsotg->periodic_usecs -= qh->usecs; 675 * Schedule the unreserve to happen in a little bit. Cases here:
676 * - Unreserve worker might be sitting there waiting to grab the lock.
677 * In this case it will notice it's been schedule again and will
678 * quit.
679 * - Unreserve worker might not be scheduled.
680 *
681 * We should never already be scheduled since dwc2_schedule_periodic()
682 * should have canceled the scheduled unreserve timer (hence the
683 * warning on did_modify).
684 *
685 * We add + 1 to the timer to guarantee that at least 1 jiffy has
686 * passed (otherwise if the jiffy counter might tick right after we
687 * read it and we'll get no delay).
688 */
689 did_modify = mod_timer(&qh->unreserve_timer,
690 jiffies + DWC2_UNRESERVE_DELAY + 1);
691 WARN_ON(did_modify);
692 qh->unreserve_pending = 1;
560 693
561 if (hsotg->core_params->uframe_sched > 0) { 694 list_del_init(&qh->qh_list_entry);
562 for (i = 0; i < 8; i++) {
563 hsotg->frame_usecs[i] += qh->frame_usecs[i];
564 qh->frame_usecs[i] = 0;
565 }
566 } else {
567 /* Release periodic channel reservation */
568 hsotg->periodic_channels--;
569 }
570} 695}
571 696
572/** 697/**