aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/dwc3/gadget.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/dwc3/gadget.c')
-rw-r--r--drivers/usb/dwc3/gadget.c427
1 files changed, 204 insertions, 223 deletions
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8e4a1b195e9b..9a7d0bd15dc3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -145,90 +145,21 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
145 return -ETIMEDOUT; 145 return -ETIMEDOUT;
146} 146}
147 147
148/** 148static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
150 * @dwc: pointer to our context structure
151 *
152 * This function will a best effort FIFO allocation in order
153 * to improve FIFO usage and throughput, while still allowing
154 * us to enable as many endpoints as possible.
155 *
156 * Keep in mind that this operation will be highly dependent
157 * on the configured size for RAM1 - which contains TxFifo -,
158 * the amount of endpoints enabled on coreConsultant tool, and
159 * the width of the Master Bus.
160 *
161 * In the ideal world, we would always be able to satisfy the
162 * following equation:
163 *
164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
166 *
167 * Unfortunately, due to many variables that's not always the case.
168 */
169int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
170{ 149{
171 int last_fifo_depth = 0; 150 dep->trb_enqueue++;
172 int ram1_depth; 151 dep->trb_enqueue %= DWC3_TRB_NUM;
173 int fifo_size; 152}
174 int mdwidth;
175 int num;
176
177 if (!dwc->needs_fifo_resize)
178 return 0;
179
180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
182
183 /* MDWIDTH is represented in bits, we need it in bytes */
184 mdwidth >>= 3;
185
186 /*
187 * FIXME For now we will only allocate 1 wMaxPacketSize space
188 * for each enabled endpoint, later patches will come to
189 * improve this algorithm so that we better use the internal
190 * FIFO space
191 */
192 for (num = 0; num < dwc->num_in_eps; num++) {
193 /* bit0 indicates direction; 1 means IN ep */
194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
195 int mult = 1;
196 int tmp;
197
198 if (!(dep->flags & DWC3_EP_ENABLED))
199 continue;
200
201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
202 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
203 mult = 3;
204
205 /*
206 * REVISIT: the following assumes we will always have enough
207 * space available on the FIFO RAM for all possible use cases.
208 * Make sure that's true somehow and change FIFO allocation
209 * accordingly.
210 *
211 * If we have Bulk or Isochronous endpoints, we want
212 * them to be able to be very, very fast. So we're giving
213 * those endpoints a fifo_size which is enough for 3 full
214 * packets
215 */
216 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
217 tmp += mdwidth;
218
219 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
220
221 fifo_size |= (last_fifo_depth << 16);
222
223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
224 dep->name, last_fifo_depth, fifo_size & 0xffff);
225
226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
227 153
228 last_fifo_depth += (fifo_size & 0xffff); 154static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
229 } 155{
156 dep->trb_dequeue++;
157 dep->trb_dequeue %= DWC3_TRB_NUM;
158}
230 159
231 return 0; 160static int dwc3_ep_is_last_trb(unsigned int index)
161{
162 return index == DWC3_TRB_NUM - 1;
232} 163}
233 164
234void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 165void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
@@ -237,21 +168,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
237 struct dwc3 *dwc = dep->dwc; 168 struct dwc3 *dwc = dep->dwc;
238 int i; 169 int i;
239 170
240 if (req->queued) { 171 if (req->started) {
241 i = 0; 172 i = 0;
242 do { 173 do {
243 dep->busy_slot++; 174 dwc3_ep_inc_deq(dep);
244 /* 175 /*
245 * Skip LINK TRB. We can't use req->trb and check for 176 * Skip LINK TRB. We can't use req->trb and check for
246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 177 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
247 * just completed (not the LINK TRB). 178 * just completed (not the LINK TRB).
248 */ 179 */
249 if (((dep->busy_slot & DWC3_TRB_MASK) == 180 if (dwc3_ep_is_last_trb(dep->trb_dequeue))
250 DWC3_TRB_NUM- 1) && 181 dwc3_ep_inc_deq(dep);
251 usb_endpoint_xfer_isoc(dep->endpoint.desc))
252 dep->busy_slot++;
253 } while(++i < req->request.num_mapped_sgs); 182 } while(++i < req->request.num_mapped_sgs);
254 req->queued = false; 183 req->started = false;
255 } 184 }
256 list_del(&req->list); 185 list_del(&req->list);
257 req->trb = NULL; 186 req->trb = NULL;
@@ -307,6 +236,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
307 } while (1); 236 } while (1);
308} 237}
309 238
239static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
240
310int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 241int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
311 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 242 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
312{ 243{
@@ -314,8 +245,40 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
314 u32 timeout = 500; 245 u32 timeout = 500;
315 u32 reg; 246 u32 reg;
316 247
248 int susphy = false;
249 int ret = -EINVAL;
250
317 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 251 trace_dwc3_gadget_ep_cmd(dep, cmd, params);
318 252
253 /*
254 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
255 * we're issuing an endpoint command, we must check if
256 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
257 *
258 * We will also set SUSPHY bit to what it was before returning as stated
259 * by the same section on Synopsys databook.
260 */
261 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
262 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
263 susphy = true;
264 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
265 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
266 }
267
268 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
269 int needs_wakeup;
270
271 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
272 dwc->link_state == DWC3_LINK_STATE_U2 ||
273 dwc->link_state == DWC3_LINK_STATE_U3);
274
275 if (unlikely(needs_wakeup)) {
276 ret = __dwc3_gadget_wakeup(dwc);
277 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
278 ret);
279 }
280 }
281
319 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 282 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
320 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 283 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
321 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 284 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
@@ -324,12 +287,40 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
324 do { 287 do {
325 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 288 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
326 if (!(reg & DWC3_DEPCMD_CMDACT)) { 289 if (!(reg & DWC3_DEPCMD_CMDACT)) {
290 int cmd_status = DWC3_DEPCMD_STATUS(reg);
291
327 dwc3_trace(trace_dwc3_gadget, 292 dwc3_trace(trace_dwc3_gadget,
328 "Command Complete --> %d", 293 "Command Complete --> %d",
329 DWC3_DEPCMD_STATUS(reg)); 294 cmd_status);
330 if (DWC3_DEPCMD_STATUS(reg)) 295
331 return -EINVAL; 296 switch (cmd_status) {
332 return 0; 297 case 0:
298 ret = 0;
299 break;
300 case DEPEVT_TRANSFER_NO_RESOURCE:
301 dwc3_trace(trace_dwc3_gadget, "%s: no resource available");
302 ret = -EINVAL;
303 break;
304 case DEPEVT_TRANSFER_BUS_EXPIRY:
305 /*
306 * SW issues START TRANSFER command to
307 * isochronous ep with future frame interval. If
308 * future interval time has already passed when
309 * core receives the command, it will respond
310 * with an error status of 'Bus Expiry'.
311 *
312 * Instead of always returning -EINVAL, let's
313 * give a hint to the gadget driver that this is
314 * the case by returning -EAGAIN.
315 */
316 dwc3_trace(trace_dwc3_gadget, "%s: bus expiry");
317 ret = -EAGAIN;
318 break;
319 default:
320 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
321 }
322
323 break;
333 } 324 }
334 325
335 /* 326 /*
@@ -340,11 +331,20 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
340 if (!timeout) { 331 if (!timeout) {
341 dwc3_trace(trace_dwc3_gadget, 332 dwc3_trace(trace_dwc3_gadget,
342 "Command Timed Out"); 333 "Command Timed Out");
343 return -ETIMEDOUT; 334 ret = -ETIMEDOUT;
335 break;
344 } 336 }
345 337
346 udelay(1); 338 udelay(1);
347 } while (1); 339 } while (1);
340
341 if (unlikely(susphy)) {
342 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
343 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
344 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
345 }
346
347 return ret;
348} 348}
349 349
350static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 350static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
@@ -464,9 +464,19 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
464 464
465 /* Burst size is only needed in SuperSpeed mode */ 465 /* Burst size is only needed in SuperSpeed mode */
466 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 466 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
467 u32 burst = dep->endpoint.maxburst - 1; 467 u32 burst = dep->endpoint.maxburst;
468 u32 nump;
469 u32 reg;
470
471 /* update NumP */
472 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
473 nump = DWC3_DCFG_NUMP(reg);
474 nump = max(nump, burst);
475 reg &= ~DWC3_DCFG_NUMP_MASK;
476 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
477 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
468 478
469 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 479 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
470 } 480 }
471 481
472 if (ignore) 482 if (ignore)
@@ -567,10 +577,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
567 reg |= DWC3_DALEPENA_EP(dep->number); 577 reg |= DWC3_DALEPENA_EP(dep->number);
568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 578 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
569 579
570 if (!usb_endpoint_xfer_isoc(desc)) 580 if (usb_endpoint_xfer_control(desc))
571 goto out; 581 goto out;
572 582
573 /* Link TRB for ISOC. The HWO bit is never reset */ 583 /* Link TRB. The HWO bit is never reset */
574 trb_st_hw = &dep->trb_pool[0]; 584 trb_st_hw = &dep->trb_pool[0];
575 585
576 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 586 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
@@ -608,19 +618,19 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
608{ 618{
609 struct dwc3_request *req; 619 struct dwc3_request *req;
610 620
611 if (!list_empty(&dep->req_queued)) { 621 if (!list_empty(&dep->started_list)) {
612 dwc3_stop_active_transfer(dwc, dep->number, true); 622 dwc3_stop_active_transfer(dwc, dep->number, true);
613 623
614 /* - giveback all requests to gadget driver */ 624 /* - giveback all requests to gadget driver */
615 while (!list_empty(&dep->req_queued)) { 625 while (!list_empty(&dep->started_list)) {
616 req = next_request(&dep->req_queued); 626 req = next_request(&dep->started_list);
617 627
618 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 628 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
619 } 629 }
620 } 630 }
621 631
622 while (!list_empty(&dep->request_list)) { 632 while (!list_empty(&dep->pending_list)) {
623 req = next_request(&dep->request_list); 633 req = next_request(&dep->pending_list);
624 634
625 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 635 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
626 } 636 }
@@ -783,20 +793,19 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
783 chain ? " chain" : ""); 793 chain ? " chain" : "");
784 794
785 795
786 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 796 trb = &dep->trb_pool[dep->trb_enqueue];
787 797
788 if (!req->trb) { 798 if (!req->trb) {
789 dwc3_gadget_move_request_queued(req); 799 dwc3_gadget_move_started_request(req);
790 req->trb = trb; 800 req->trb = trb;
791 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 801 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
792 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 802 req->first_trb_index = dep->trb_enqueue;
793 } 803 }
794 804
795 dep->free_slot++; 805 dwc3_ep_inc_enq(dep);
796 /* Skip the LINK-TRB on ISOC */ 806 /* Skip the LINK-TRB */
797 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 807 if (dwc3_ep_is_last_trb(dep->trb_enqueue))
798 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 808 dwc3_ep_inc_enq(dep);
799 dep->free_slot++;
800 809
801 trb->size = DWC3_TRB_SIZE_LENGTH(length); 810 trb->size = DWC3_TRB_SIZE_LENGTH(length);
802 trb->bpl = lower_32_bits(dma); 811 trb->bpl = lower_32_bits(dma);
@@ -812,6 +821,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
812 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 821 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
813 else 822 else
814 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 823 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
824
825 /* always enable Interrupt on Missed ISOC */
826 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
815 break; 827 break;
816 828
817 case USB_ENDPOINT_XFER_BULK: 829 case USB_ENDPOINT_XFER_BULK:
@@ -826,15 +838,14 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
826 BUG(); 838 BUG();
827 } 839 }
828 840
841 /* always enable Continue on Short Packet */
842 trb->ctrl |= DWC3_TRB_CTRL_CSP;
843
829 if (!req->request.no_interrupt && !chain) 844 if (!req->request.no_interrupt && !chain)
830 trb->ctrl |= DWC3_TRB_CTRL_IOC; 845 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
831 846
832 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 847 if (last)
833 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
834 trb->ctrl |= DWC3_TRB_CTRL_CSP;
835 } else if (last) {
836 trb->ctrl |= DWC3_TRB_CTRL_LST; 848 trb->ctrl |= DWC3_TRB_CTRL_LST;
837 }
838 849
839 if (chain) 850 if (chain)
840 trb->ctrl |= DWC3_TRB_CTRL_CHN; 851 trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -860,55 +871,29 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
860{ 871{
861 struct dwc3_request *req, *n; 872 struct dwc3_request *req, *n;
862 u32 trbs_left; 873 u32 trbs_left;
863 u32 max;
864 unsigned int last_one = 0; 874 unsigned int last_one = 0;
865 875
866 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 876 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
867 877
868 /* the first request must not be queued */ 878 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
869 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
870
871 /* Can't wrap around on a non-isoc EP since there's no link TRB */
872 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
873 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
874 if (trbs_left > max)
875 trbs_left = max;
876 }
877 879
878 /* 880 /*
879 * If busy & slot are equal than it is either full or empty. If we are 881 * If enqueue & dequeue are equal than it is either full or empty. If we
880 * starting to process requests then we are empty. Otherwise we are 882 * are starting to process requests then we are empty. Otherwise we are
881 * full and don't do anything 883 * full and don't do anything
882 */ 884 */
883 if (!trbs_left) { 885 if (!trbs_left) {
884 if (!starting) 886 if (!starting)
885 return; 887 return;
888
886 trbs_left = DWC3_TRB_NUM; 889 trbs_left = DWC3_TRB_NUM;
887 /*
888 * In case we start from scratch, we queue the ISOC requests
889 * starting from slot 1. This is done because we use ring
890 * buffer and have no LST bit to stop us. Instead, we place
891 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
892 * after the first request so we start at slot 1 and have
893 * 7 requests proceed before we hit the first IOC.
894 * Other transfer types don't use the ring buffer and are
895 * processed from the first TRB until the last one. Since we
896 * don't wrap around we have to start at the beginning.
897 */
898 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
899 dep->busy_slot = 1;
900 dep->free_slot = 1;
901 } else {
902 dep->busy_slot = 0;
903 dep->free_slot = 0;
904 }
905 } 890 }
906 891
907 /* The last TRB is a link TRB, not used for xfer */ 892 /* The last TRB is a link TRB, not used for xfer */
908 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 893 if (trbs_left <= 1)
909 return; 894 return;
910 895
911 list_for_each_entry_safe(req, n, &dep->request_list, list) { 896 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
912 unsigned length; 897 unsigned length;
913 dma_addr_t dma; 898 dma_addr_t dma;
914 last_one = false; 899 last_one = false;
@@ -927,7 +912,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
927 912
928 if (i == (request->num_mapped_sgs - 1) || 913 if (i == (request->num_mapped_sgs - 1) ||
929 sg_is_last(s)) { 914 sg_is_last(s)) {
930 if (list_empty(&dep->request_list)) 915 if (list_empty(&dep->pending_list))
931 last_one = true; 916 last_one = true;
932 chain = false; 917 chain = false;
933 } 918 }
@@ -957,7 +942,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
957 last_one = 1; 942 last_one = 1;
958 943
959 /* Is this the last request? */ 944 /* Is this the last request? */
960 if (list_is_last(&req->list, &dep->request_list)) 945 if (list_is_last(&req->list, &dep->pending_list))
961 last_one = 1; 946 last_one = 1;
962 947
963 dwc3_prepare_one_trb(dep, req, dma, length, 948 dwc3_prepare_one_trb(dep, req, dma, length,
@@ -988,18 +973,18 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
988 * new requests as we try to set the IOC bit only on the last request. 973 * new requests as we try to set the IOC bit only on the last request.
989 */ 974 */
990 if (start_new) { 975 if (start_new) {
991 if (list_empty(&dep->req_queued)) 976 if (list_empty(&dep->started_list))
992 dwc3_prepare_trbs(dep, start_new); 977 dwc3_prepare_trbs(dep, start_new);
993 978
994 /* req points to the first request which will be sent */ 979 /* req points to the first request which will be sent */
995 req = next_request(&dep->req_queued); 980 req = next_request(&dep->started_list);
996 } else { 981 } else {
997 dwc3_prepare_trbs(dep, start_new); 982 dwc3_prepare_trbs(dep, start_new);
998 983
999 /* 984 /*
1000 * req points to the first request where HWO changed from 0 to 1 985 * req points to the first request where HWO changed from 0 to 1
1001 */ 986 */
1002 req = next_request(&dep->req_queued); 987 req = next_request(&dep->started_list);
1003 } 988 }
1004 if (!req) { 989 if (!req) {
1005 dep->flags |= DWC3_EP_PENDING_REQUEST; 990 dep->flags |= DWC3_EP_PENDING_REQUEST;
@@ -1046,7 +1031,7 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1046{ 1031{
1047 u32 uf; 1032 u32 uf;
1048 1033
1049 if (list_empty(&dep->request_list)) { 1034 if (list_empty(&dep->pending_list)) {
1050 dwc3_trace(trace_dwc3_gadget, 1035 dwc3_trace(trace_dwc3_gadget,
1051 "ISOC ep %s run out for requests", 1036 "ISOC ep %s run out for requests",
1052 dep->name); 1037 dep->name);
@@ -1114,7 +1099,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1114 if (ret) 1099 if (ret)
1115 return ret; 1100 return ret;
1116 1101
1117 list_add_tail(&req->list, &dep->request_list); 1102 list_add_tail(&req->list, &dep->pending_list);
1118 1103
1119 /* 1104 /*
1120 * If there are no pending requests and the endpoint isn't already 1105 * If there are no pending requests and the endpoint isn't already
@@ -1149,7 +1134,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1149 * notion of current microframe. 1134 * notion of current microframe.
1150 */ 1135 */
1151 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1136 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1152 if (list_empty(&dep->req_queued)) { 1137 if (list_empty(&dep->started_list)) {
1153 dwc3_stop_active_transfer(dwc, dep->number, true); 1138 dwc3_stop_active_transfer(dwc, dep->number, true);
1154 dep->flags = DWC3_EP_ENABLED; 1139 dep->flags = DWC3_EP_ENABLED;
1155 } 1140 }
@@ -1267,13 +1252,13 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1267 1252
1268 spin_lock_irqsave(&dwc->lock, flags); 1253 spin_lock_irqsave(&dwc->lock, flags);
1269 1254
1270 list_for_each_entry(r, &dep->request_list, list) { 1255 list_for_each_entry(r, &dep->pending_list, list) {
1271 if (r == req) 1256 if (r == req)
1272 break; 1257 break;
1273 } 1258 }
1274 1259
1275 if (r != req) { 1260 if (r != req) {
1276 list_for_each_entry(r, &dep->req_queued, list) { 1261 list_for_each_entry(r, &dep->started_list, list) {
1277 if (r == req) 1262 if (r == req)
1278 break; 1263 break;
1279 } 1264 }
@@ -1313,10 +1298,10 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1313 1298
1314 if (value) { 1299 if (value) {
1315 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1300 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1316 (!list_empty(&dep->req_queued) || 1301 (!list_empty(&dep->started_list) ||
1317 !list_empty(&dep->request_list)))) { 1302 !list_empty(&dep->pending_list)))) {
1318 dwc3_trace(trace_dwc3_gadget, 1303 dwc3_trace(trace_dwc3_gadget,
1319 "%s: pending request, cannot halt\n", 1304 "%s: pending request, cannot halt",
1320 dep->name); 1305 dep->name);
1321 return -EAGAIN; 1306 return -EAGAIN;
1322 } 1307 }
@@ -1417,22 +1402,16 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
1417 return DWC3_DSTS_SOFFN(reg); 1402 return DWC3_DSTS_SOFFN(reg);
1418} 1403}
1419 1404
1420static int dwc3_gadget_wakeup(struct usb_gadget *g) 1405static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1421{ 1406{
1422 struct dwc3 *dwc = gadget_to_dwc(g);
1423
1424 unsigned long timeout; 1407 unsigned long timeout;
1425 unsigned long flags;
1426 1408
1409 int ret;
1427 u32 reg; 1410 u32 reg;
1428 1411
1429 int ret = 0;
1430
1431 u8 link_state; 1412 u8 link_state;
1432 u8 speed; 1413 u8 speed;
1433 1414
1434 spin_lock_irqsave(&dwc->lock, flags);
1435
1436 /* 1415 /*
1437 * According to the Databook Remote wakeup request should 1416 * According to the Databook Remote wakeup request should
1438 * be issued only when the device is in early suspend state. 1417 * be issued only when the device is in early suspend state.
@@ -1445,8 +1424,7 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
1445 if ((speed == DWC3_DSTS_SUPERSPEED) || 1424 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1446 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) { 1425 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
1447 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n"); 1426 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n");
1448 ret = -EINVAL; 1427 return -EINVAL;
1449 goto out;
1450 } 1428 }
1451 1429
1452 link_state = DWC3_DSTS_USBLNKST(reg); 1430 link_state = DWC3_DSTS_USBLNKST(reg);
@@ -1459,14 +1437,13 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
1459 dwc3_trace(trace_dwc3_gadget, 1437 dwc3_trace(trace_dwc3_gadget,
1460 "can't wakeup from '%s'\n", 1438 "can't wakeup from '%s'\n",
1461 dwc3_gadget_link_string(link_state)); 1439 dwc3_gadget_link_string(link_state));
1462 ret = -EINVAL; 1440 return -EINVAL;
1463 goto out;
1464 } 1441 }
1465 1442
1466 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1443 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1467 if (ret < 0) { 1444 if (ret < 0) {
1468 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1445 dev_err(dwc->dev, "failed to put link in Recovery\n");
1469 goto out; 1446 return ret;
1470 } 1447 }
1471 1448
1472 /* Recent versions do this automatically */ 1449 /* Recent versions do this automatically */
@@ -1490,10 +1467,20 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
1490 1467
1491 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1468 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1492 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1469 dev_err(dwc->dev, "failed to send remote wakeup\n");
1493 ret = -EINVAL; 1470 return -EINVAL;
1494 } 1471 }
1495 1472
1496out: 1473 return 0;
1474}
1475
1476static int dwc3_gadget_wakeup(struct usb_gadget *g)
1477{
1478 struct dwc3 *dwc = gadget_to_dwc(g);
1479 unsigned long flags;
1480 int ret;
1481
1482 spin_lock_irqsave(&dwc->lock, flags);
1483 ret = __dwc3_gadget_wakeup(dwc);
1497 spin_unlock_irqrestore(&dwc->lock, flags); 1484 spin_unlock_irqrestore(&dwc->lock, flags);
1498 1485
1499 return ret; 1486 return ret;
@@ -1620,7 +1607,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
1620 1607
1621 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1608 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1622 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1609 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1623 IRQF_SHARED, "dwc3", dwc); 1610 IRQF_SHARED, "dwc3", dwc->ev_buf);
1624 if (ret) { 1611 if (ret) {
1625 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1612 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1626 irq, ret); 1613 irq, ret);
@@ -1682,6 +1669,17 @@ static int dwc3_gadget_start(struct usb_gadget *g,
1682 } 1669 }
1683 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1670 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1684 1671
1672 /*
1673 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1674 * field instead of letting dwc3 itself calculate that automatically.
1675 *
1676 * This way, we maximize the chances that we'll be able to get several
1677 * bursts of data without going through any sort of endpoint throttling.
1678 */
1679 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1680 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1681 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1682
1685 /* Start with SuperSpeed Default */ 1683 /* Start with SuperSpeed Default */
1686 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1684 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1687 1685
@@ -1720,7 +1718,7 @@ err2:
1720err1: 1718err1:
1721 spin_unlock_irqrestore(&dwc->lock, flags); 1719 spin_unlock_irqrestore(&dwc->lock, flags);
1722 1720
1723 free_irq(irq, dwc); 1721 free_irq(irq, dwc->ev_buf);
1724 1722
1725err0: 1723err0:
1726 return ret; 1724 return ret;
@@ -1743,7 +1741,7 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
1743 spin_unlock_irqrestore(&dwc->lock, flags); 1741 spin_unlock_irqrestore(&dwc->lock, flags);
1744 1742
1745 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1743 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1746 free_irq(irq, dwc); 1744 free_irq(irq, dwc->ev_buf);
1747 1745
1748 return 0; 1746 return 0;
1749} 1747}
@@ -1815,8 +1813,8 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1815 dep->endpoint.caps.dir_in = !!direction; 1813 dep->endpoint.caps.dir_in = !!direction;
1816 dep->endpoint.caps.dir_out = !direction; 1814 dep->endpoint.caps.dir_out = !direction;
1817 1815
1818 INIT_LIST_HEAD(&dep->request_list); 1816 INIT_LIST_HEAD(&dep->pending_list);
1819 INIT_LIST_HEAD(&dep->req_queued); 1817 INIT_LIST_HEAD(&dep->started_list);
1820 } 1818 }
1821 1819
1822 return 0; 1820 return 0;
@@ -1913,11 +1911,11 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1913 * If there are still queued request 1911 * If there are still queued request
1914 * then wait, do not issue either END 1912 * then wait, do not issue either END
1915 * or UPDATE TRANSFER, just attach next 1913 * or UPDATE TRANSFER, just attach next
1916 * request in request_list during 1914 * request in pending_list during
1917 * giveback.If any future queued request 1915 * giveback.If any future queued request
1918 * is successfully transferred then we 1916 * is successfully transferred then we
1919 * will issue UPDATE TRANSFER for all 1917 * will issue UPDATE TRANSFER for all
1920 * request in the request_list. 1918 * request in the pending_list.
1921 */ 1919 */
1922 dep->flags |= DWC3_EP_MISSED_ISOC; 1920 dep->flags |= DWC3_EP_MISSED_ISOC;
1923 } else { 1921 } else {
@@ -1963,15 +1961,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1963 int ret; 1961 int ret;
1964 1962
1965 do { 1963 do {
1966 req = next_request(&dep->req_queued); 1964 req = next_request(&dep->started_list);
1967 if (WARN_ON_ONCE(!req)) 1965 if (WARN_ON_ONCE(!req))
1968 return 1; 1966 return 1;
1969 1967
1970 i = 0; 1968 i = 0;
1971 do { 1969 do {
1972 slot = req->start_slot + i; 1970 slot = req->first_trb_index + i;
1973 if ((slot == DWC3_TRB_NUM - 1) && 1971 if (slot == DWC3_TRB_NUM - 1)
1974 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1975 slot++; 1972 slot++;
1976 slot %= DWC3_TRB_NUM; 1973 slot %= DWC3_TRB_NUM;
1977 trb = &dep->trb_pool[slot]; 1974 trb = &dep->trb_pool[slot];
@@ -1989,8 +1986,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1989 } while (1); 1986 } while (1);
1990 1987
1991 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1988 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1992 list_empty(&dep->req_queued)) { 1989 list_empty(&dep->started_list)) {
1993 if (list_empty(&dep->request_list)) { 1990 if (list_empty(&dep->pending_list)) {
1994 /* 1991 /*
1995 * If there is no entry in request list then do 1992 * If there is no entry in request list then do
1996 * not issue END TRANSFER now. Just set PENDING 1993 * not issue END TRANSFER now. Just set PENDING
@@ -2039,7 +2036,7 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2039 if (!(dep->flags & DWC3_EP_ENABLED)) 2036 if (!(dep->flags & DWC3_EP_ENABLED))
2040 continue; 2037 continue;
2041 2038
2042 if (!list_empty(&dep->req_queued)) 2039 if (!list_empty(&dep->started_list))
2043 return; 2040 return;
2044 } 2041 }
2045 2042
@@ -2686,14 +2683,13 @@ static void dwc3_process_event_entry(struct dwc3 *dwc,
2686 } 2683 }
2687} 2684}
2688 2685
2689static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2686static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
2690{ 2687{
2691 struct dwc3_event_buffer *evt; 2688 struct dwc3 *dwc = evt->dwc;
2692 irqreturn_t ret = IRQ_NONE; 2689 irqreturn_t ret = IRQ_NONE;
2693 int left; 2690 int left;
2694 u32 reg; 2691 u32 reg;
2695 2692
2696 evt = dwc->ev_buffs[buf];
2697 left = evt->count; 2693 left = evt->count;
2698 2694
2699 if (!(evt->flags & DWC3_EVENT_PENDING)) 2695 if (!(evt->flags & DWC3_EVENT_PENDING))
@@ -2718,7 +2714,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2718 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2714 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2719 left -= 4; 2715 left -= 4;
2720 2716
2721 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2717 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
2722 } 2718 }
2723 2719
2724 evt->count = 0; 2720 evt->count = 0;
@@ -2726,39 +2722,34 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2726 ret = IRQ_HANDLED; 2722 ret = IRQ_HANDLED;
2727 2723
2728 /* Unmask interrupt */ 2724 /* Unmask interrupt */
2729 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2725 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2730 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2726 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2731 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2727 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2732 2728
2733 return ret; 2729 return ret;
2734} 2730}
2735 2731
2736static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2732static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
2737{ 2733{
2738 struct dwc3 *dwc = _dwc; 2734 struct dwc3_event_buffer *evt = _evt;
2735 struct dwc3 *dwc = evt->dwc;
2739 unsigned long flags; 2736 unsigned long flags;
2740 irqreturn_t ret = IRQ_NONE; 2737 irqreturn_t ret = IRQ_NONE;
2741 int i;
2742 2738
2743 spin_lock_irqsave(&dwc->lock, flags); 2739 spin_lock_irqsave(&dwc->lock, flags);
2744 2740 ret = dwc3_process_event_buf(evt);
2745 for (i = 0; i < dwc->num_event_buffers; i++)
2746 ret |= dwc3_process_event_buf(dwc, i);
2747
2748 spin_unlock_irqrestore(&dwc->lock, flags); 2741 spin_unlock_irqrestore(&dwc->lock, flags);
2749 2742
2750 return ret; 2743 return ret;
2751} 2744}
2752 2745
2753static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2746static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
2754{ 2747{
2755 struct dwc3_event_buffer *evt; 2748 struct dwc3 *dwc = evt->dwc;
2756 u32 count; 2749 u32 count;
2757 u32 reg; 2750 u32 reg;
2758 2751
2759 evt = dwc->ev_buffs[buf]; 2752 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2760
2761 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2762 count &= DWC3_GEVNTCOUNT_MASK; 2753 count &= DWC3_GEVNTCOUNT_MASK;
2763 if (!count) 2754 if (!count)
2764 return IRQ_NONE; 2755 return IRQ_NONE;
@@ -2767,28 +2758,18 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2767 evt->flags |= DWC3_EVENT_PENDING; 2758 evt->flags |= DWC3_EVENT_PENDING;
2768 2759
2769 /* Mask interrupt */ 2760 /* Mask interrupt */
2770 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2761 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2771 reg |= DWC3_GEVNTSIZ_INTMASK; 2762 reg |= DWC3_GEVNTSIZ_INTMASK;
2772 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2763 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2773 2764
2774 return IRQ_WAKE_THREAD; 2765 return IRQ_WAKE_THREAD;
2775} 2766}
2776 2767
2777static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2768static irqreturn_t dwc3_interrupt(int irq, void *_evt)
2778{ 2769{
2779 struct dwc3 *dwc = _dwc; 2770 struct dwc3_event_buffer *evt = _evt;
2780 int i;
2781 irqreturn_t ret = IRQ_NONE;
2782
2783 for (i = 0; i < dwc->num_event_buffers; i++) {
2784 irqreturn_t status;
2785 2771
2786 status = dwc3_check_event_buf(dwc, i); 2772 return dwc3_check_event_buf(evt);
2787 if (status == IRQ_WAKE_THREAD)
2788 ret = status;
2789 }
2790
2791 return ret;
2792} 2773}
2793 2774
2794/** 2775/**