aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-03-04 21:43:07 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-03-04 21:43:07 -0500
commit3d0712deb0a416021e55febc7ec7f6c24f460e06 (patch)
treecf56aba54ac85516fc753b662a1880e8a50ee5d9
parent83253b3b0286ab6111e0266da74fb10b7b92746b (diff)
parent0561f77e2db9e72dc32e4f82b56fca8ba6b31171 (diff)
Merge tag 'usb-for-v4.6' of http://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-next
Felipe writes: usb changes for v4.6 merge window This is almost all under drivers/usb/dwc2/. Many changes to the host side implementation of dwc2 have been done by Douglas Anderson. We also have USB 3.1 support added to the Gadget Framework and, because of that work, dwc3 got support to Synopsys new DWC_usb31 IP core. Other than these 2 important series, we also have the usual collection of non-critical fixes, Documentation updates, and minor changes all over the place.
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt2
-rw-r--r--Documentation/usb/chipidea.txt9
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h198
-rw-r--r--arch/arm/mach-pxa/include/mach/pxa25x-udc.h163
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/chipidea/otg_fsm.c29
-rw-r--r--drivers/usb/chipidea/otg_fsm.h2
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/common/usb-otg-fsm.c87
-rw-r--r--drivers/usb/dwc2/core.c1884
-rw-r--r--drivers/usb/dwc2/core.h151
-rw-r--r--drivers/usb/dwc2/gadget.c102
-rw-r--r--drivers/usb/dwc2/hcd.c2255
-rw-r--r--drivers/usb/dwc2/hcd.h134
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c49
-rw-r--r--drivers/usb/dwc2/hcd_intr.c174
-rw-r--r--drivers/usb/dwc2/hcd_queue.c1941
-rw-r--r--drivers/usb/dwc2/platform.c38
-rw-r--r--drivers/usb/dwc3/core.c31
-rw-r--r--drivers/usb/dwc3/core.h11
-rw-r--r--drivers/usb/dwc3/ep0.c9
-rw-r--r--drivers/usb/dwc3/gadget.c30
-rw-r--r--drivers/usb/gadget/composite.c150
-rw-r--r--drivers/usb/gadget/config.c9
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_acm.c6
-rw-r--r--drivers/usb/gadget/function/f_ecm.c2
-rw-r--r--drivers/usb/gadget/function/f_eem.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c155
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_midi.c200
-rw-r--r--drivers/usb/gadget/function/f_ncm.c2
-rw-r--r--drivers/usb/gadget/function/f_obex.c3
-rw-r--r--drivers/usb/gadget/function/f_phonet.c2
-rw-r--r--drivers/usb/gadget/function/f_printer.c2
-rw-r--r--drivers/usb/gadget/function/f_rndis.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/function/f_subset.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c3
-rw-r--r--drivers/usb/gadget/function/f_uac2.c3
-rw-r--r--drivers/usb/gadget/legacy/inode.c28
-rw-r--r--drivers/usb/gadget/udc/Kconfig3
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c5
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c103
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c530
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h11
-rw-r--r--drivers/usb/gadget/udc/udc-core.c30
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/host/Makefile3
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musbhsdma.c8
-rw-r--r--drivers/usb/musb/sunxi.c1
-rw-r--r--drivers/usb/musb/tusb6010_omap.c4
-rw-r--r--drivers/usb/musb/ux500_dma.c3
-rw-r--r--drivers/usb/phy/phy-am335x.c1
-rw-r--r--drivers/usb/phy/phy-generic.c11
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/renesas_usbhs/Makefile2
-rw-r--r--drivers/usb/renesas_usbhs/common.c14
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c6
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c54
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.h3
-rw-r--r--include/linux/usb/composite.h6
-rw-r--r--include/linux/usb/gadget.h20
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/otg-fsm.h15
-rw-r--r--include/linux/usb/renesas_usbhs.h1
-rw-r--r--include/uapi/linux/usb/ch9.h7
74 files changed, 5264 insertions, 3479 deletions
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 221368207ca4..20a68bf2b4e7 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -8,6 +8,8 @@ Required properties:
8 - rockchip,rk3066-usb: The DWC2 USB controller instance in the rk3066 Soc; 8 - rockchip,rk3066-usb: The DWC2 USB controller instance in the rk3066 Soc;
9 - "rockchip,rk3188-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3188 Soc; 9 - "rockchip,rk3188-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3188 Soc;
10 - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc; 10 - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
11 - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
12 - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
11 - snps,dwc2: A generic DWC2 USB controller with default parameters. 13 - snps,dwc2: A generic DWC2 USB controller with default parameters.
12- reg : Should contain 1 register range (address and length) 14- reg : Should contain 1 register range (address and length)
13- interrupts : Should contain 1 interrupt 15- interrupts : Should contain 1 interrupt
diff --git a/Documentation/usb/chipidea.txt b/Documentation/usb/chipidea.txt
index 05f735a1b5a5..678741b0f213 100644
--- a/Documentation/usb/chipidea.txt
+++ b/Documentation/usb/chipidea.txt
@@ -26,16 +26,17 @@ cat /sys/kernel/debug/ci_hdrc.0/registers
26 On B-device: 26 On B-device:
27 echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req 27 echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
28 28
29 if HNP polling is not supported, also need:
30 On A-device:
31 echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
32
33 B-device should take host role and enumrate A-device. 29 B-device should take host role and enumrate A-device.
34 30
354) A-device switch back to host. 314) A-device switch back to host.
36 On B-device: 32 On B-device:
37 echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req 33 echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
38 34
35 or, by introducing HNP polling, B-Host can know when A-peripheral wish
36 to be host role, so this role switch also can be trigged in A-peripheral
37 side by answering the polling from B-Host, this can be done on A-device:
38 echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
39
39 A-device should switch back to host and enumrate B-device. 40 A-device should switch back to host and enumrate B-device.
40 41
415) Remove B-device(unplug micro B plug) and insert again in 10 seconds, 425) Remove B-device(unplug micro B plug) and insert again in 10 seconds,
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
index c5bae9c035d5..b7ddd27419c2 100644
--- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
@@ -395,204 +395,6 @@
395#define CRP_AD_CBE_BESL 20 395#define CRP_AD_CBE_BESL 20
396#define CRP_AD_CBE_WRITE 0x00010000 396#define CRP_AD_CBE_WRITE 0x00010000
397 397
398
399/*
400 * USB Device Controller
401 *
402 * These are used by the USB gadget driver, so they don't follow the
403 * IXP4XX_ naming convetions.
404 *
405 */
406# define IXP4XX_USB_REG(x) (*((volatile u32 *)(x)))
407
408/* UDC Undocumented - Reserved1 */
409#define UDC_RES1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0004)
410/* UDC Undocumented - Reserved2 */
411#define UDC_RES2 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0008)
412/* UDC Undocumented - Reserved3 */
413#define UDC_RES3 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x000C)
414/* UDC Control Register */
415#define UDCCR IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0000)
416/* UDC Endpoint 0 Control/Status Register */
417#define UDCCS0 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0010)
418/* UDC Endpoint 1 (IN) Control/Status Register */
419#define UDCCS1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0014)
420/* UDC Endpoint 2 (OUT) Control/Status Register */
421#define UDCCS2 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0018)
422/* UDC Endpoint 3 (IN) Control/Status Register */
423#define UDCCS3 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x001C)
424/* UDC Endpoint 4 (OUT) Control/Status Register */
425#define UDCCS4 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0020)
426/* UDC Endpoint 5 (Interrupt) Control/Status Register */
427#define UDCCS5 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0024)
428/* UDC Endpoint 6 (IN) Control/Status Register */
429#define UDCCS6 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0028)
430/* UDC Endpoint 7 (OUT) Control/Status Register */
431#define UDCCS7 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x002C)
432/* UDC Endpoint 8 (IN) Control/Status Register */
433#define UDCCS8 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0030)
434/* UDC Endpoint 9 (OUT) Control/Status Register */
435#define UDCCS9 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0034)
436/* UDC Endpoint 10 (Interrupt) Control/Status Register */
437#define UDCCS10 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0038)
438/* UDC Endpoint 11 (IN) Control/Status Register */
439#define UDCCS11 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x003C)
440/* UDC Endpoint 12 (OUT) Control/Status Register */
441#define UDCCS12 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0040)
442/* UDC Endpoint 13 (IN) Control/Status Register */
443#define UDCCS13 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0044)
444/* UDC Endpoint 14 (OUT) Control/Status Register */
445#define UDCCS14 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0048)
446/* UDC Endpoint 15 (Interrupt) Control/Status Register */
447#define UDCCS15 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x004C)
448/* UDC Frame Number Register High */
449#define UFNRH IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0060)
450/* UDC Frame Number Register Low */
451#define UFNRL IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0064)
452/* UDC Byte Count Reg 2 */
453#define UBCR2 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0068)
454/* UDC Byte Count Reg 4 */
455#define UBCR4 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x006c)
456/* UDC Byte Count Reg 7 */
457#define UBCR7 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0070)
458/* UDC Byte Count Reg 9 */
459#define UBCR9 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0074)
460/* UDC Byte Count Reg 12 */
461#define UBCR12 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0078)
462/* UDC Byte Count Reg 14 */
463#define UBCR14 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x007c)
464/* UDC Endpoint 0 Data Register */
465#define UDDR0 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0080)
466/* UDC Endpoint 1 Data Register */
467#define UDDR1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0100)
468/* UDC Endpoint 2 Data Register */
469#define UDDR2 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0180)
470/* UDC Endpoint 3 Data Register */
471#define UDDR3 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0200)
472/* UDC Endpoint 4 Data Register */
473#define UDDR4 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0400)
474/* UDC Endpoint 5 Data Register */
475#define UDDR5 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x00A0)
476/* UDC Endpoint 6 Data Register */
477#define UDDR6 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0600)
478/* UDC Endpoint 7 Data Register */
479#define UDDR7 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0680)
480/* UDC Endpoint 8 Data Register */
481#define UDDR8 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0700)
482/* UDC Endpoint 9 Data Register */
483#define UDDR9 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0900)
484/* UDC Endpoint 10 Data Register */
485#define UDDR10 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x00C0)
486/* UDC Endpoint 11 Data Register */
487#define UDDR11 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0B00)
488/* UDC Endpoint 12 Data Register */
489#define UDDR12 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0B80)
490/* UDC Endpoint 13 Data Register */
491#define UDDR13 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0C00)
492/* UDC Endpoint 14 Data Register */
493#define UDDR14 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0E00)
494/* UDC Endpoint 15 Data Register */
495#define UDDR15 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x00E0)
496/* UDC Interrupt Control Register 0 */
497#define UICR0 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0050)
498/* UDC Interrupt Control Register 1 */
499#define UICR1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0054)
500/* UDC Status Interrupt Register 0 */
501#define USIR0 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0058)
502/* UDC Status Interrupt Register 1 */
503#define USIR1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x005C)
504
505#define UDCCR_UDE (1 << 0) /* UDC enable */
506#define UDCCR_UDA (1 << 1) /* UDC active */
507#define UDCCR_RSM (1 << 2) /* Device resume */
508#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */
509#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */
510#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */
511#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */
512#define UDCCR_REM (1 << 7) /* Reset interrupt mask */
513
514#define UDCCS0_OPR (1 << 0) /* OUT packet ready */
515#define UDCCS0_IPR (1 << 1) /* IN packet ready */
516#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */
517#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */
518#define UDCCS0_SST (1 << 4) /* Sent stall */
519#define UDCCS0_FST (1 << 5) /* Force stall */
520#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */
521#define UDCCS0_SA (1 << 7) /* Setup active */
522
523#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */
524#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */
525#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */
526#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */
527#define UDCCS_BI_SST (1 << 4) /* Sent stall */
528#define UDCCS_BI_FST (1 << 5) /* Force stall */
529#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */
530
531#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */
532#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */
533#define UDCCS_BO_DME (1 << 3) /* DMA enable */
534#define UDCCS_BO_SST (1 << 4) /* Sent stall */
535#define UDCCS_BO_FST (1 << 5) /* Force stall */
536#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */
537#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */
538
539#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */
540#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */
541#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */
542#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */
543#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */
544
545#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */
546#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */
547#define UDCCS_IO_ROF (1 << 3) /* Receive overflow */
548#define UDCCS_IO_DME (1 << 3) /* DMA enable */
549#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */
550#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */
551
552#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */
553#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */
554#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */
555#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */
556#define UDCCS_INT_SST (1 << 4) /* Sent stall */
557#define UDCCS_INT_FST (1 << 5) /* Force stall */
558#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */
559
560#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
561#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
562#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
563#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
564#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
565#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
566#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
567#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
568
569#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
570#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
571#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
572#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
573#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
574#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
575#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
576#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
577
578#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
579#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
580#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
581#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
582#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
583#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
584#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
585#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
586
587#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
588#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
589#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
590#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
591#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
592#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
593#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
594#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
595
596#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ 398#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
597 399
598/* "fuse" bits of IXP_EXP_CFG2 */ 400/* "fuse" bits of IXP_EXP_CFG2 */
diff --git a/arch/arm/mach-pxa/include/mach/pxa25x-udc.h b/arch/arm/mach-pxa/include/mach/pxa25x-udc.h
index 1b80a4805a60..e69de29bb2d1 100644
--- a/arch/arm/mach-pxa/include/mach/pxa25x-udc.h
+++ b/arch/arm/mach-pxa/include/mach/pxa25x-udc.h
@@ -1,163 +0,0 @@
1#ifndef _ASM_ARCH_PXA25X_UDC_H
2#define _ASM_ARCH_PXA25X_UDC_H
3
4#ifdef _ASM_ARCH_PXA27X_UDC_H
5#error "You can't include both PXA25x and PXA27x UDC support"
6#endif
7
8#define UDC_RES1 __REG(0x40600004) /* UDC Undocumented - Reserved1 */
9#define UDC_RES2 __REG(0x40600008) /* UDC Undocumented - Reserved2 */
10#define UDC_RES3 __REG(0x4060000C) /* UDC Undocumented - Reserved3 */
11
12#define UDCCR __REG(0x40600000) /* UDC Control Register */
13#define UDCCR_UDE (1 << 0) /* UDC enable */
14#define UDCCR_UDA (1 << 1) /* UDC active */
15#define UDCCR_RSM (1 << 2) /* Device resume */
16#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */
17#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */
18#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */
19#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */
20#define UDCCR_REM (1 << 7) /* Reset interrupt mask */
21
22#define UDCCS0 __REG(0x40600010) /* UDC Endpoint 0 Control/Status Register */
23#define UDCCS0_OPR (1 << 0) /* OUT packet ready */
24#define UDCCS0_IPR (1 << 1) /* IN packet ready */
25#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */
26#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */
27#define UDCCS0_SST (1 << 4) /* Sent stall */
28#define UDCCS0_FST (1 << 5) /* Force stall */
29#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */
30#define UDCCS0_SA (1 << 7) /* Setup active */
31
32/* Bulk IN - Endpoint 1,6,11 */
33#define UDCCS1 __REG(0x40600014) /* UDC Endpoint 1 (IN) Control/Status Register */
34#define UDCCS6 __REG(0x40600028) /* UDC Endpoint 6 (IN) Control/Status Register */
35#define UDCCS11 __REG(0x4060003C) /* UDC Endpoint 11 (IN) Control/Status Register */
36
37#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */
38#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */
39#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */
40#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */
41#define UDCCS_BI_SST (1 << 4) /* Sent stall */
42#define UDCCS_BI_FST (1 << 5) /* Force stall */
43#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */
44
45/* Bulk OUT - Endpoint 2,7,12 */
46#define UDCCS2 __REG(0x40600018) /* UDC Endpoint 2 (OUT) Control/Status Register */
47#define UDCCS7 __REG(0x4060002C) /* UDC Endpoint 7 (OUT) Control/Status Register */
48#define UDCCS12 __REG(0x40600040) /* UDC Endpoint 12 (OUT) Control/Status Register */
49
50#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */
51#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */
52#define UDCCS_BO_DME (1 << 3) /* DMA enable */
53#define UDCCS_BO_SST (1 << 4) /* Sent stall */
54#define UDCCS_BO_FST (1 << 5) /* Force stall */
55#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */
56#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */
57
58/* Isochronous IN - Endpoint 3,8,13 */
59#define UDCCS3 __REG(0x4060001C) /* UDC Endpoint 3 (IN) Control/Status Register */
60#define UDCCS8 __REG(0x40600030) /* UDC Endpoint 8 (IN) Control/Status Register */
61#define UDCCS13 __REG(0x40600044) /* UDC Endpoint 13 (IN) Control/Status Register */
62
63#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */
64#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */
65#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */
66#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */
67#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */
68
69/* Isochronous OUT - Endpoint 4,9,14 */
70#define UDCCS4 __REG(0x40600020) /* UDC Endpoint 4 (OUT) Control/Status Register */
71#define UDCCS9 __REG(0x40600034) /* UDC Endpoint 9 (OUT) Control/Status Register */
72#define UDCCS14 __REG(0x40600048) /* UDC Endpoint 14 (OUT) Control/Status Register */
73
74#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */
75#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */
76#define UDCCS_IO_ROF (1 << 2) /* Receive overflow */
77#define UDCCS_IO_DME (1 << 3) /* DMA enable */
78#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */
79#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */
80
81/* Interrupt IN - Endpoint 5,10,15 */
82#define UDCCS5 __REG(0x40600024) /* UDC Endpoint 5 (Interrupt) Control/Status Register */
83#define UDCCS10 __REG(0x40600038) /* UDC Endpoint 10 (Interrupt) Control/Status Register */
84#define UDCCS15 __REG(0x4060004C) /* UDC Endpoint 15 (Interrupt) Control/Status Register */
85
86#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */
87#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */
88#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */
89#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */
90#define UDCCS_INT_SST (1 << 4) /* Sent stall */
91#define UDCCS_INT_FST (1 << 5) /* Force stall */
92#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */
93
94#define UFNRH __REG(0x40600060) /* UDC Frame Number Register High */
95#define UFNRL __REG(0x40600064) /* UDC Frame Number Register Low */
96#define UBCR2 __REG(0x40600068) /* UDC Byte Count Reg 2 */
97#define UBCR4 __REG(0x4060006c) /* UDC Byte Count Reg 4 */
98#define UBCR7 __REG(0x40600070) /* UDC Byte Count Reg 7 */
99#define UBCR9 __REG(0x40600074) /* UDC Byte Count Reg 9 */
100#define UBCR12 __REG(0x40600078) /* UDC Byte Count Reg 12 */
101#define UBCR14 __REG(0x4060007c) /* UDC Byte Count Reg 14 */
102#define UDDR0 __REG(0x40600080) /* UDC Endpoint 0 Data Register */
103#define UDDR1 __REG(0x40600100) /* UDC Endpoint 1 Data Register */
104#define UDDR2 __REG(0x40600180) /* UDC Endpoint 2 Data Register */
105#define UDDR3 __REG(0x40600200) /* UDC Endpoint 3 Data Register */
106#define UDDR4 __REG(0x40600400) /* UDC Endpoint 4 Data Register */
107#define UDDR5 __REG(0x406000A0) /* UDC Endpoint 5 Data Register */
108#define UDDR6 __REG(0x40600600) /* UDC Endpoint 6 Data Register */
109#define UDDR7 __REG(0x40600680) /* UDC Endpoint 7 Data Register */
110#define UDDR8 __REG(0x40600700) /* UDC Endpoint 8 Data Register */
111#define UDDR9 __REG(0x40600900) /* UDC Endpoint 9 Data Register */
112#define UDDR10 __REG(0x406000C0) /* UDC Endpoint 10 Data Register */
113#define UDDR11 __REG(0x40600B00) /* UDC Endpoint 11 Data Register */
114#define UDDR12 __REG(0x40600B80) /* UDC Endpoint 12 Data Register */
115#define UDDR13 __REG(0x40600C00) /* UDC Endpoint 13 Data Register */
116#define UDDR14 __REG(0x40600E00) /* UDC Endpoint 14 Data Register */
117#define UDDR15 __REG(0x406000E0) /* UDC Endpoint 15 Data Register */
118
119#define UICR0 __REG(0x40600050) /* UDC Interrupt Control Register 0 */
120
121#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
122#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
123#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
124#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
125#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
126#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
127#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
128#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
129
130#define UICR1 __REG(0x40600054) /* UDC Interrupt Control Register 1 */
131
132#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
133#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
134#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
135#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
136#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
137#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
138#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
139#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
140
141#define USIR0 __REG(0x40600058) /* UDC Status Interrupt Register 0 */
142
143#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
144#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
145#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
146#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
147#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
148#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
149#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
150#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
151
152#define USIR1 __REG(0x4060005C) /* UDC Status Interrupt Register 1 */
153
154#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
155#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
156#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
157#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
158#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
159#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
160#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
161#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
162
163#endif
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index d5c57f1e98fd..dca78565eb55 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -26,7 +26,7 @@ obj-$(CONFIG_USB_U132_HCD) += host/
26obj-$(CONFIG_USB_R8A66597_HCD) += host/ 26obj-$(CONFIG_USB_R8A66597_HCD) += host/
27obj-$(CONFIG_USB_HWA_HCD) += host/ 27obj-$(CONFIG_USB_HWA_HCD) += host/
28obj-$(CONFIG_USB_IMX21_HCD) += host/ 28obj-$(CONFIG_USB_IMX21_HCD) += host/
29obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/ 29obj-$(CONFIG_USB_FSL_USB2) += host/
30obj-$(CONFIG_USB_FOTG210_HCD) += host/ 30obj-$(CONFIG_USB_FOTG210_HCD) += host/
31obj-$(CONFIG_USB_MAX3421_HCD) += host/ 31obj-$(CONFIG_USB_MAX3421_HCD) += host/
32 32
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index ba90dc66703d..de8e22ec3902 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -66,6 +66,11 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr,
66 return count; 66 return count;
67 } 67 }
68 ci->fsm.a_bus_req = 1; 68 ci->fsm.a_bus_req = 1;
69 if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
70 ci->gadget.host_request_flag = 1;
71 mutex_unlock(&ci->fsm.lock);
72 return count;
73 }
69 } 74 }
70 75
71 ci_otg_queue_work(ci); 76 ci_otg_queue_work(ci);
@@ -144,8 +149,14 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr,
144 mutex_lock(&ci->fsm.lock); 149 mutex_lock(&ci->fsm.lock);
145 if (buf[0] == '0') 150 if (buf[0] == '0')
146 ci->fsm.b_bus_req = 0; 151 ci->fsm.b_bus_req = 0;
147 else if (buf[0] == '1') 152 else if (buf[0] == '1') {
148 ci->fsm.b_bus_req = 1; 153 ci->fsm.b_bus_req = 1;
154 if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
155 ci->gadget.host_request_flag = 1;
156 mutex_unlock(&ci->fsm.lock);
157 return count;
158 }
159 }
149 160
150 ci_otg_queue_work(ci); 161 ci_otg_queue_work(ci);
151 mutex_unlock(&ci->fsm.lock); 162 mutex_unlock(&ci->fsm.lock);
@@ -198,6 +209,7 @@ static unsigned otg_timer_ms[] = {
198 TA_AIDL_BDIS, 209 TA_AIDL_BDIS,
199 TB_ASE0_BRST, 210 TB_ASE0_BRST,
200 TA_BIDL_ADIS, 211 TA_BIDL_ADIS,
212 TB_AIDL_BDIS,
201 TB_SE0_SRP, 213 TB_SE0_SRP,
202 TB_SRP_FAIL, 214 TB_SRP_FAIL,
203 0, 215 0,
@@ -309,6 +321,12 @@ static int a_bidl_adis_tmout(struct ci_hdrc *ci)
309 return 0; 321 return 0;
310} 322}
311 323
324static int b_aidl_bdis_tmout(struct ci_hdrc *ci)
325{
326 ci->fsm.a_bus_suspend = 1;
327 return 0;
328}
329
312static int b_se0_srp_tmout(struct ci_hdrc *ci) 330static int b_se0_srp_tmout(struct ci_hdrc *ci)
313{ 331{
314 ci->fsm.b_se0_srp = 1; 332 ci->fsm.b_se0_srp = 1;
@@ -353,6 +371,7 @@ static int (*otg_timer_handlers[])(struct ci_hdrc *) = {
353 a_aidl_bdis_tmout, /* A_AIDL_BDIS */ 371 a_aidl_bdis_tmout, /* A_AIDL_BDIS */
354 b_ase0_brst_tmout, /* B_ASE0_BRST */ 372 b_ase0_brst_tmout, /* B_ASE0_BRST */
355 a_bidl_adis_tmout, /* A_BIDL_ADIS */ 373 a_bidl_adis_tmout, /* A_BIDL_ADIS */
374 b_aidl_bdis_tmout, /* B_AIDL_BDIS */
356 b_se0_srp_tmout, /* B_SE0_SRP */ 375 b_se0_srp_tmout, /* B_SE0_SRP */
357 b_srp_fail_tmout, /* B_SRP_FAIL */ 376 b_srp_fail_tmout, /* B_SRP_FAIL */
358 NULL, /* A_WAIT_ENUM */ 377 NULL, /* A_WAIT_ENUM */
@@ -644,9 +663,9 @@ static void ci_otg_fsm_event(struct ci_hdrc *ci)
644 break; 663 break;
645 case OTG_STATE_B_PERIPHERAL: 664 case OTG_STATE_B_PERIPHERAL:
646 if ((intr_sts & USBi_SLI) && port_conn && otg_bsess_vld) { 665 if ((intr_sts & USBi_SLI) && port_conn && otg_bsess_vld) {
647 fsm->a_bus_suspend = 1; 666 ci_otg_add_timer(ci, B_AIDL_BDIS);
648 ci_otg_queue_work(ci);
649 } else if (intr_sts & USBi_PCI) { 667 } else if (intr_sts & USBi_PCI) {
668 ci_otg_del_timer(ci, B_AIDL_BDIS);
650 if (fsm->a_bus_suspend == 1) 669 if (fsm->a_bus_suspend == 1)
651 fsm->a_bus_suspend = 0; 670 fsm->a_bus_suspend = 0;
652 } 671 }
@@ -786,6 +805,10 @@ int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci)
786 ci->fsm.id = hw_read_otgsc(ci, OTGSC_ID) ? 1 : 0; 805 ci->fsm.id = hw_read_otgsc(ci, OTGSC_ID) ? 1 : 0;
787 ci->fsm.otg->state = OTG_STATE_UNDEFINED; 806 ci->fsm.otg->state = OTG_STATE_UNDEFINED;
788 ci->fsm.ops = &ci_otg_ops; 807 ci->fsm.ops = &ci_otg_ops;
808 ci->gadget.hnp_polling_support = 1;
809 ci->fsm.host_req_flag = devm_kzalloc(ci->dev, 1, GFP_KERNEL);
810 if (!ci->fsm.host_req_flag)
811 return -ENOMEM;
789 812
790 mutex_init(&ci->fsm.lock); 813 mutex_init(&ci->fsm.lock);
791 814
diff --git a/drivers/usb/chipidea/otg_fsm.h b/drivers/usb/chipidea/otg_fsm.h
index 262d6ef8df7c..6366fe398ba6 100644
--- a/drivers/usb/chipidea/otg_fsm.h
+++ b/drivers/usb/chipidea/otg_fsm.h
@@ -62,6 +62,8 @@
62/* SSEND time before SRP */ 62/* SSEND time before SRP */
63#define TB_SSEND_SRP (1500) /* minimum 1.5 sec, section:5.1.2 */ 63#define TB_SSEND_SRP (1500) /* minimum 1.5 sec, section:5.1.2 */
64 64
65#define TB_AIDL_BDIS (20) /* 4ms ~ 150ms, section 5.2.1 */
66
65#if IS_ENABLED(CONFIG_USB_OTG_FSM) 67#if IS_ENABLED(CONFIG_USB_OTG_FSM)
66 68
67int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci); 69int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 00250ab38ddb..065f5d97aa67 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1067,7 +1067,8 @@ __acquires(ci->lock)
1067 } 1067 }
1068 break; 1068 break;
1069 case USB_REQ_GET_STATUS: 1069 case USB_REQ_GET_STATUS:
1070 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) && 1070 if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
1071 le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
1071 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) && 1072 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1072 type != (USB_DIR_IN|USB_RECIP_INTERFACE)) 1073 type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1073 goto delegate; 1074 goto delegate;
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index 61d538aa2346..504708f59b93 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -78,6 +78,8 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
78 fsm->b_srp_done = 0; 78 fsm->b_srp_done = 0;
79 break; 79 break;
80 case OTG_STATE_B_PERIPHERAL: 80 case OTG_STATE_B_PERIPHERAL:
81 if (fsm->otg->gadget)
82 fsm->otg->gadget->host_request_flag = 0;
81 break; 83 break;
82 case OTG_STATE_B_WAIT_ACON: 84 case OTG_STATE_B_WAIT_ACON:
83 otg_del_timer(fsm, B_ASE0_BRST); 85 otg_del_timer(fsm, B_ASE0_BRST);
@@ -107,6 +109,8 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
107 case OTG_STATE_A_PERIPHERAL: 109 case OTG_STATE_A_PERIPHERAL:
108 otg_del_timer(fsm, A_BIDL_ADIS); 110 otg_del_timer(fsm, A_BIDL_ADIS);
109 fsm->a_bidl_adis_tmout = 0; 111 fsm->a_bidl_adis_tmout = 0;
112 if (fsm->otg->gadget)
113 fsm->otg->gadget->host_request_flag = 0;
110 break; 114 break;
111 case OTG_STATE_A_WAIT_VFALL: 115 case OTG_STATE_A_WAIT_VFALL:
112 otg_del_timer(fsm, A_WAIT_VFALL); 116 otg_del_timer(fsm, A_WAIT_VFALL);
@@ -120,6 +124,87 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
120 } 124 }
121} 125}
122 126
127static void otg_hnp_polling_work(struct work_struct *work)
128{
129 struct otg_fsm *fsm = container_of(to_delayed_work(work),
130 struct otg_fsm, hnp_polling_work);
131 struct usb_device *udev;
132 enum usb_otg_state state = fsm->otg->state;
133 u8 flag;
134 int retval;
135
136 if (state != OTG_STATE_A_HOST && state != OTG_STATE_B_HOST)
137 return;
138
139 udev = usb_hub_find_child(fsm->otg->host->root_hub, 1);
140 if (!udev) {
141 dev_err(fsm->otg->host->controller,
142 "no usb dev connected, can't start HNP polling\n");
143 return;
144 }
145
146 *fsm->host_req_flag = 0;
147 /* Get host request flag from connected USB device */
148 retval = usb_control_msg(udev,
149 usb_rcvctrlpipe(udev, 0),
150 USB_REQ_GET_STATUS,
151 USB_DIR_IN | USB_RECIP_DEVICE,
152 0,
153 OTG_STS_SELECTOR,
154 fsm->host_req_flag,
155 1,
156 USB_CTRL_GET_TIMEOUT);
157 if (retval != 1) {
158 dev_err(&udev->dev, "Get one byte OTG status failed\n");
159 return;
160 }
161
162 flag = *fsm->host_req_flag;
163 if (flag == 0) {
164 /* Continue HNP polling */
165 schedule_delayed_work(&fsm->hnp_polling_work,
166 msecs_to_jiffies(T_HOST_REQ_POLL));
167 return;
168 } else if (flag != HOST_REQUEST_FLAG) {
169 dev_err(&udev->dev, "host request flag %d is invalid\n", flag);
170 return;
171 }
172
173 /* Host request flag is set */
174 if (state == OTG_STATE_A_HOST) {
175 /* Set b_hnp_enable */
176 if (!fsm->otg->host->b_hnp_enable) {
177 retval = usb_control_msg(udev,
178 usb_sndctrlpipe(udev, 0),
179 USB_REQ_SET_FEATURE, 0,
180 USB_DEVICE_B_HNP_ENABLE,
181 0, NULL, 0,
182 USB_CTRL_SET_TIMEOUT);
183 if (retval >= 0)
184 fsm->otg->host->b_hnp_enable = 1;
185 }
186 fsm->a_bus_req = 0;
187 } else if (state == OTG_STATE_B_HOST) {
188 fsm->b_bus_req = 0;
189 }
190
191 otg_statemachine(fsm);
192}
193
194static void otg_start_hnp_polling(struct otg_fsm *fsm)
195{
196 /*
197 * The memory of host_req_flag should be allocated by
198 * controller driver, otherwise, hnp polling is not started.
199 */
200 if (!fsm->host_req_flag)
201 return;
202
203 INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
204 schedule_delayed_work(&fsm->hnp_polling_work,
205 msecs_to_jiffies(T_HOST_REQ_POLL));
206}
207
123/* Called when entering a state */ 208/* Called when entering a state */
124static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) 209static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
125{ 210{
@@ -169,6 +254,7 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
169 otg_set_protocol(fsm, PROTO_HOST); 254 otg_set_protocol(fsm, PROTO_HOST);
170 usb_bus_start_enum(fsm->otg->host, 255 usb_bus_start_enum(fsm->otg->host,
171 fsm->otg->host->otg_port); 256 fsm->otg->host->otg_port);
257 otg_start_hnp_polling(fsm);
172 break; 258 break;
173 case OTG_STATE_A_IDLE: 259 case OTG_STATE_A_IDLE:
174 otg_drv_vbus(fsm, 0); 260 otg_drv_vbus(fsm, 0);
@@ -203,6 +289,7 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
203 */ 289 */
204 if (!fsm->a_bus_req || fsm->a_suspend_req_inf) 290 if (!fsm->a_bus_req || fsm->a_suspend_req_inf)
205 otg_add_timer(fsm, A_WAIT_ENUM); 291 otg_add_timer(fsm, A_WAIT_ENUM);
292 otg_start_hnp_polling(fsm);
206 break; 293 break;
207 case OTG_STATE_A_SUSPEND: 294 case OTG_STATE_A_SUSPEND:
208 otg_drv_vbus(fsm, 1); 295 otg_drv_vbus(fsm, 1);
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 46c4ba75dc2a..4135a5ff67ca 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -56,189 +56,6 @@
56#include "core.h" 56#include "core.h"
57#include "hcd.h" 57#include "hcd.h"
58 58
59#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
60/**
61 * dwc2_backup_host_registers() - Backup controller host registers.
62 * When suspending usb bus, registers needs to be backuped
63 * if controller power is disabled once suspended.
64 *
65 * @hsotg: Programming view of the DWC_otg controller
66 */
67static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
68{
69 struct dwc2_hregs_backup *hr;
70 int i;
71
72 dev_dbg(hsotg->dev, "%s\n", __func__);
73
74 /* Backup Host regs */
75 hr = &hsotg->hr_backup;
76 hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
77 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
78 for (i = 0; i < hsotg->core_params->host_channels; ++i)
79 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
80
81 hr->hprt0 = dwc2_read_hprt0(hsotg);
82 hr->hfir = dwc2_readl(hsotg->regs + HFIR);
83 hr->valid = true;
84
85 return 0;
86}
87
88/**
89 * dwc2_restore_host_registers() - Restore controller host registers.
90 * When resuming usb bus, device registers needs to be restored
91 * if controller power were disabled.
92 *
93 * @hsotg: Programming view of the DWC_otg controller
94 */
95static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
96{
97 struct dwc2_hregs_backup *hr;
98 int i;
99
100 dev_dbg(hsotg->dev, "%s\n", __func__);
101
102 /* Restore host regs */
103 hr = &hsotg->hr_backup;
104 if (!hr->valid) {
105 dev_err(hsotg->dev, "%s: no host registers to restore\n",
106 __func__);
107 return -EINVAL;
108 }
109 hr->valid = false;
110
111 dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
112 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
113
114 for (i = 0; i < hsotg->core_params->host_channels; ++i)
115 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
116
117 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
118 dwc2_writel(hr->hfir, hsotg->regs + HFIR);
119 hsotg->frame_number = 0;
120
121 return 0;
122}
123#else
124static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
125{ return 0; }
126
127static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
128{ return 0; }
129#endif
130
131#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
132 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
133/**
134 * dwc2_backup_device_registers() - Backup controller device registers.
135 * When suspending usb bus, registers needs to be backuped
136 * if controller power is disabled once suspended.
137 *
138 * @hsotg: Programming view of the DWC_otg controller
139 */
140static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
141{
142 struct dwc2_dregs_backup *dr;
143 int i;
144
145 dev_dbg(hsotg->dev, "%s\n", __func__);
146
147 /* Backup dev regs */
148 dr = &hsotg->dr_backup;
149
150 dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
151 dr->dctl = dwc2_readl(hsotg->regs + DCTL);
152 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
153 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
154 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
155
156 for (i = 0; i < hsotg->num_of_eps; i++) {
157 /* Backup IN EPs */
158 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
159
160 /* Ensure DATA PID is correctly configured */
161 if (dr->diepctl[i] & DXEPCTL_DPID)
162 dr->diepctl[i] |= DXEPCTL_SETD1PID;
163 else
164 dr->diepctl[i] |= DXEPCTL_SETD0PID;
165
166 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
167 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
168
169 /* Backup OUT EPs */
170 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
171
172 /* Ensure DATA PID is correctly configured */
173 if (dr->doepctl[i] & DXEPCTL_DPID)
174 dr->doepctl[i] |= DXEPCTL_SETD1PID;
175 else
176 dr->doepctl[i] |= DXEPCTL_SETD0PID;
177
178 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
179 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
180 }
181 dr->valid = true;
182 return 0;
183}
184
185/**
186 * dwc2_restore_device_registers() - Restore controller device registers.
187 * When resuming usb bus, device registers needs to be restored
188 * if controller power were disabled.
189 *
190 * @hsotg: Programming view of the DWC_otg controller
191 */
192static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
193{
194 struct dwc2_dregs_backup *dr;
195 u32 dctl;
196 int i;
197
198 dev_dbg(hsotg->dev, "%s\n", __func__);
199
200 /* Restore dev regs */
201 dr = &hsotg->dr_backup;
202 if (!dr->valid) {
203 dev_err(hsotg->dev, "%s: no device registers to restore\n",
204 __func__);
205 return -EINVAL;
206 }
207 dr->valid = false;
208
209 dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
210 dwc2_writel(dr->dctl, hsotg->regs + DCTL);
211 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
212 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
213 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
214
215 for (i = 0; i < hsotg->num_of_eps; i++) {
216 /* Restore IN EPs */
217 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
218 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
219 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
220
221 /* Restore OUT EPs */
222 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
223 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
224 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
225 }
226
227 /* Set the Power-On Programming done bit */
228 dctl = dwc2_readl(hsotg->regs + DCTL);
229 dctl |= DCTL_PWRONPRGDONE;
230 dwc2_writel(dctl, hsotg->regs + DCTL);
231
232 return 0;
233}
234#else
235static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
236{ return 0; }
237
238static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
239{ return 0; }
240#endif
241
242/** 59/**
243 * dwc2_backup_global_registers() - Backup global controller registers. 60 * dwc2_backup_global_registers() - Backup global controller registers.
244 * When suspending usb bus, registers needs to be backuped 61 * When suspending usb bus, registers needs to be backuped
@@ -421,62 +238,6 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
421 return ret; 238 return ret;
422} 239}
423 240
424/**
425 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
426 * used in both device and host modes
427 *
428 * @hsotg: Programming view of the DWC_otg controller
429 */
430static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
431{
432 u32 intmsk;
433
434 /* Clear any pending OTG Interrupts */
435 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
436
437 /* Clear any pending interrupts */
438 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
439
440 /* Enable the interrupts in the GINTMSK */
441 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
442
443 if (hsotg->core_params->dma_enable <= 0)
444 intmsk |= GINTSTS_RXFLVL;
445 if (hsotg->core_params->external_id_pin_ctl <= 0)
446 intmsk |= GINTSTS_CONIDSTSCHNG;
447
448 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
449 GINTSTS_SESSREQINT;
450
451 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
452}
453
454/*
455 * Initializes the FSLSPClkSel field of the HCFG register depending on the
456 * PHY type
457 */
458static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
459{
460 u32 hcfg, val;
461
462 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
463 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
464 hsotg->core_params->ulpi_fs_ls > 0) ||
465 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
466 /* Full speed PHY */
467 val = HCFG_FSLSPCLKSEL_48_MHZ;
468 } else {
469 /* High speed PHY running at full speed or high speed */
470 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
471 }
472
473 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
474 hcfg = dwc2_readl(hsotg->regs + HCFG);
475 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
476 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
477 dwc2_writel(hcfg, hsotg->regs + HCFG);
478}
479
480/* 241/*
481 * Do core a soft reset of the core. Be careful with this because it 242 * Do core a soft reset of the core. Be careful with this because it
482 * resets all the internal state machines of the core. 243 * resets all the internal state machines of the core.
@@ -646,1644 +407,6 @@ int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg)
646 return 0; 407 return 0;
647} 408}
648 409
649static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
650{
651 u32 usbcfg, i2cctl;
652 int retval = 0;
653
654 /*
655 * core_init() is now called on every switch so only call the
656 * following for the first time through
657 */
658 if (select_phy) {
659 dev_dbg(hsotg->dev, "FS PHY selected\n");
660
661 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
662 if (!(usbcfg & GUSBCFG_PHYSEL)) {
663 usbcfg |= GUSBCFG_PHYSEL;
664 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
665
666 /* Reset after a PHY select */
667 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
668
669 if (retval) {
670 dev_err(hsotg->dev,
671 "%s: Reset failed, aborting", __func__);
672 return retval;
673 }
674 }
675 }
676
677 /*
678 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
679 * do this on HNP Dev/Host mode switches (done in dev_init and
680 * host_init).
681 */
682 if (dwc2_is_host_mode(hsotg))
683 dwc2_init_fs_ls_pclk_sel(hsotg);
684
685 if (hsotg->core_params->i2c_enable > 0) {
686 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
687
688 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
689 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
690 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
691 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
692
693 /* Program GI2CCTL.I2CEn */
694 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
695 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
696 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
697 i2cctl &= ~GI2CCTL_I2CEN;
698 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
699 i2cctl |= GI2CCTL_I2CEN;
700 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
701 }
702
703 return retval;
704}
705
706static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
707{
708 u32 usbcfg, usbcfg_old;
709 int retval = 0;
710
711 if (!select_phy)
712 return 0;
713
714 usbcfg = usbcfg_old = dwc2_readl(hsotg->regs + GUSBCFG);
715
716 /*
717 * HS PHY parameters. These parameters are preserved during soft reset
718 * so only program the first time. Do a soft reset immediately after
719 * setting phyif.
720 */
721 switch (hsotg->core_params->phy_type) {
722 case DWC2_PHY_TYPE_PARAM_ULPI:
723 /* ULPI interface */
724 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
725 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
726 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
727 if (hsotg->core_params->phy_ulpi_ddr > 0)
728 usbcfg |= GUSBCFG_DDRSEL;
729 break;
730 case DWC2_PHY_TYPE_PARAM_UTMI:
731 /* UTMI+ interface */
732 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
733 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
734 if (hsotg->core_params->phy_utmi_width == 16)
735 usbcfg |= GUSBCFG_PHYIF16;
736 break;
737 default:
738 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
739 break;
740 }
741
742 if (usbcfg != usbcfg_old) {
743 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
744
745 /* Reset after setting the PHY parameters */
746 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
747 if (retval) {
748 dev_err(hsotg->dev,
749 "%s: Reset failed, aborting", __func__);
750 return retval;
751 }
752 }
753
754 return retval;
755}
756
757static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
758{
759 u32 usbcfg;
760 int retval = 0;
761
762 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
763 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
764 /* If FS mode with FS PHY */
765 retval = dwc2_fs_phy_init(hsotg, select_phy);
766 if (retval)
767 return retval;
768 } else {
769 /* High speed PHY */
770 retval = dwc2_hs_phy_init(hsotg, select_phy);
771 if (retval)
772 return retval;
773 }
774
775 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
776 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
777 hsotg->core_params->ulpi_fs_ls > 0) {
778 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
779 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
780 usbcfg |= GUSBCFG_ULPI_FS_LS;
781 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
782 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
783 } else {
784 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
785 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
786 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
787 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
788 }
789
790 return retval;
791}
792
793static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
794{
795 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
796
797 switch (hsotg->hw_params.arch) {
798 case GHWCFG2_EXT_DMA_ARCH:
799 dev_err(hsotg->dev, "External DMA Mode not supported\n");
800 return -EINVAL;
801
802 case GHWCFG2_INT_DMA_ARCH:
803 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
804 if (hsotg->core_params->ahbcfg != -1) {
805 ahbcfg &= GAHBCFG_CTRL_MASK;
806 ahbcfg |= hsotg->core_params->ahbcfg &
807 ~GAHBCFG_CTRL_MASK;
808 }
809 break;
810
811 case GHWCFG2_SLAVE_ONLY_ARCH:
812 default:
813 dev_dbg(hsotg->dev, "Slave Only Mode\n");
814 break;
815 }
816
817 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
818 hsotg->core_params->dma_enable,
819 hsotg->core_params->dma_desc_enable);
820
821 if (hsotg->core_params->dma_enable > 0) {
822 if (hsotg->core_params->dma_desc_enable > 0)
823 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
824 else
825 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
826 } else {
827 dev_dbg(hsotg->dev, "Using Slave mode\n");
828 hsotg->core_params->dma_desc_enable = 0;
829 }
830
831 if (hsotg->core_params->dma_enable > 0)
832 ahbcfg |= GAHBCFG_DMA_EN;
833
834 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
835
836 return 0;
837}
838
839static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
840{
841 u32 usbcfg;
842
843 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
844 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
845
846 switch (hsotg->hw_params.op_mode) {
847 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
848 if (hsotg->core_params->otg_cap ==
849 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
850 usbcfg |= GUSBCFG_HNPCAP;
851 if (hsotg->core_params->otg_cap !=
852 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
853 usbcfg |= GUSBCFG_SRPCAP;
854 break;
855
856 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
857 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
858 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
859 if (hsotg->core_params->otg_cap !=
860 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
861 usbcfg |= GUSBCFG_SRPCAP;
862 break;
863
864 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
865 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
866 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
867 default:
868 break;
869 }
870
871 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
872}
873
874/**
875 * dwc2_core_init() - Initializes the DWC_otg controller registers and
876 * prepares the core for device mode or host mode operation
877 *
878 * @hsotg: Programming view of the DWC_otg controller
879 * @initial_setup: If true then this is the first init for this instance.
880 */
881int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
882{
883 u32 usbcfg, otgctl;
884 int retval;
885
886 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
887
888 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
889
890 /* Set ULPI External VBUS bit if needed */
891 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
892 if (hsotg->core_params->phy_ulpi_ext_vbus ==
893 DWC2_PHY_ULPI_EXTERNAL_VBUS)
894 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
895
896 /* Set external TS Dline pulsing bit if needed */
897 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
898 if (hsotg->core_params->ts_dline > 0)
899 usbcfg |= GUSBCFG_TERMSELDLPULSE;
900
901 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
902
903 /*
904 * Reset the Controller
905 *
906 * We only need to reset the controller if this is a re-init.
907 * For the first init we know for sure that earlier code reset us (it
908 * needed to in order to properly detect various parameters).
909 */
910 if (!initial_setup) {
911 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
912 if (retval) {
913 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
914 __func__);
915 return retval;
916 }
917 }
918
919 /*
920 * This needs to happen in FS mode before any other programming occurs
921 */
922 retval = dwc2_phy_init(hsotg, initial_setup);
923 if (retval)
924 return retval;
925
926 /* Program the GAHBCFG Register */
927 retval = dwc2_gahbcfg_init(hsotg);
928 if (retval)
929 return retval;
930
931 /* Program the GUSBCFG register */
932 dwc2_gusbcfg_init(hsotg);
933
934 /* Program the GOTGCTL register */
935 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
936 otgctl &= ~GOTGCTL_OTGVER;
937 if (hsotg->core_params->otg_ver > 0)
938 otgctl |= GOTGCTL_OTGVER;
939 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
940 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
941
942 /* Clear the SRP success bit for FS-I2c */
943 hsotg->srp_success = 0;
944
945 /* Enable common interrupts */
946 dwc2_enable_common_interrupts(hsotg);
947
948 /*
949 * Do device or host initialization based on mode during PCD and
950 * HCD initialization
951 */
952 if (dwc2_is_host_mode(hsotg)) {
953 dev_dbg(hsotg->dev, "Host Mode\n");
954 hsotg->op_state = OTG_STATE_A_HOST;
955 } else {
956 dev_dbg(hsotg->dev, "Device Mode\n");
957 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
958 }
959
960 return 0;
961}
962
963/**
964 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
965 *
966 * @hsotg: Programming view of DWC_otg controller
967 */
968void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
969{
970 u32 intmsk;
971
972 dev_dbg(hsotg->dev, "%s()\n", __func__);
973
974 /* Disable all interrupts */
975 dwc2_writel(0, hsotg->regs + GINTMSK);
976 dwc2_writel(0, hsotg->regs + HAINTMSK);
977
978 /* Enable the common interrupts */
979 dwc2_enable_common_interrupts(hsotg);
980
981 /* Enable host mode interrupts without disturbing common interrupts */
982 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
983 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
984 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
985}
986
987/**
988 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
989 *
990 * @hsotg: Programming view of DWC_otg controller
991 */
992void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
993{
994 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
995
996 /* Disable host mode interrupts without disturbing common interrupts */
997 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
998 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
999 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
1000}
1001
1002/*
1003 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
1004 * For system that have a total fifo depth that is smaller than the default
1005 * RX + TX fifo size.
1006 *
1007 * @hsotg: Programming view of DWC_otg controller
1008 */
1009static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
1010{
1011 struct dwc2_core_params *params = hsotg->core_params;
1012 struct dwc2_hw_params *hw = &hsotg->hw_params;
1013 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
1014
1015 total_fifo_size = hw->total_fifo_size;
1016 rxfsiz = params->host_rx_fifo_size;
1017 nptxfsiz = params->host_nperio_tx_fifo_size;
1018 ptxfsiz = params->host_perio_tx_fifo_size;
1019
1020 /*
1021 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
1022 * allocation with support for high bandwidth endpoints. Synopsys
1023 * defines MPS(Max Packet size) for a periodic EP=1024, and for
1024 * non-periodic as 512.
1025 */
1026 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
1027 /*
1028 * For Buffer DMA mode/Scatter Gather DMA mode
1029 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
1030 * with n = number of host channel.
1031 * 2 * ((1024/4) + 2) = 516
1032 */
1033 rxfsiz = 516 + hw->host_channels;
1034
1035 /*
1036 * min non-periodic tx fifo depth
1037 * 2 * (largest non-periodic USB packet used / 4)
1038 * 2 * (512/4) = 256
1039 */
1040 nptxfsiz = 256;
1041
1042 /*
1043 * min periodic tx fifo depth
1044 * (largest packet size*MC)/4
1045 * (1024 * 3)/4 = 768
1046 */
1047 ptxfsiz = 768;
1048
1049 params->host_rx_fifo_size = rxfsiz;
1050 params->host_nperio_tx_fifo_size = nptxfsiz;
1051 params->host_perio_tx_fifo_size = ptxfsiz;
1052 }
1053
1054 /*
1055 * If the summation of RX, NPTX and PTX fifo sizes is still
1056 * bigger than the total_fifo_size, then we have a problem.
1057 *
1058 * We won't be able to allocate as many endpoints. Right now,
1059 * we're just printing an error message, but ideally this FIFO
1060 * allocation algorithm would be improved in the future.
1061 *
1062 * FIXME improve this FIFO allocation algorithm.
1063 */
1064 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
1065 dev_err(hsotg->dev, "invalid fifo sizes\n");
1066}
1067
1068static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
1069{
1070 struct dwc2_core_params *params = hsotg->core_params;
1071 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
1072
1073 if (!params->enable_dynamic_fifo)
1074 return;
1075
1076 dwc2_calculate_dynamic_fifo(hsotg);
1077
1078 /* Rx FIFO */
1079 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
1080 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
1081 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
1082 grxfsiz |= params->host_rx_fifo_size <<
1083 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
1084 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
1085 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
1086 dwc2_readl(hsotg->regs + GRXFSIZ));
1087
1088 /* Non-periodic Tx FIFO */
1089 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
1090 dwc2_readl(hsotg->regs + GNPTXFSIZ));
1091 nptxfsiz = params->host_nperio_tx_fifo_size <<
1092 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1093 nptxfsiz |= params->host_rx_fifo_size <<
1094 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
1095 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
1096 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
1097 dwc2_readl(hsotg->regs + GNPTXFSIZ));
1098
1099 /* Periodic Tx FIFO */
1100 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
1101 dwc2_readl(hsotg->regs + HPTXFSIZ));
1102 hptxfsiz = params->host_perio_tx_fifo_size <<
1103 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1104 hptxfsiz |= (params->host_rx_fifo_size +
1105 params->host_nperio_tx_fifo_size) <<
1106 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
1107 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
1108 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
1109 dwc2_readl(hsotg->regs + HPTXFSIZ));
1110
1111 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
1112 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
1113 /*
1114 * Global DFIFOCFG calculation for Host mode -
1115 * include RxFIFO, NPTXFIFO and HPTXFIFO
1116 */
1117 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
1118 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
1119 dfifocfg |= (params->host_rx_fifo_size +
1120 params->host_nperio_tx_fifo_size +
1121 params->host_perio_tx_fifo_size) <<
1122 GDFIFOCFG_EPINFOBASE_SHIFT &
1123 GDFIFOCFG_EPINFOBASE_MASK;
1124 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
1125 }
1126}
1127
1128/**
1129 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1130 * Host mode
1131 *
1132 * @hsotg: Programming view of DWC_otg controller
1133 *
1134 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1135 * request queues. Host channels are reset to ensure that they are ready for
1136 * performing transfers.
1137 */
1138void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
1139{
1140 u32 hcfg, hfir, otgctl;
1141
1142 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
1143
1144 /* Restart the Phy Clock */
1145 dwc2_writel(0, hsotg->regs + PCGCTL);
1146
1147 /* Initialize Host Configuration Register */
1148 dwc2_init_fs_ls_pclk_sel(hsotg);
1149 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
1150 hcfg = dwc2_readl(hsotg->regs + HCFG);
1151 hcfg |= HCFG_FSLSSUPP;
1152 dwc2_writel(hcfg, hsotg->regs + HCFG);
1153 }
1154
1155 /*
1156 * This bit allows dynamic reloading of the HFIR register during
1157 * runtime. This bit needs to be programmed during initial configuration
1158 * and its value must not be changed during runtime.
1159 */
1160 if (hsotg->core_params->reload_ctl > 0) {
1161 hfir = dwc2_readl(hsotg->regs + HFIR);
1162 hfir |= HFIR_RLDCTRL;
1163 dwc2_writel(hfir, hsotg->regs + HFIR);
1164 }
1165
1166 if (hsotg->core_params->dma_desc_enable > 0) {
1167 u32 op_mode = hsotg->hw_params.op_mode;
1168 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
1169 !hsotg->hw_params.dma_desc_enable ||
1170 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
1171 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
1172 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
1173 dev_err(hsotg->dev,
1174 "Hardware does not support descriptor DMA mode -\n");
1175 dev_err(hsotg->dev,
1176 "falling back to buffer DMA mode.\n");
1177 hsotg->core_params->dma_desc_enable = 0;
1178 } else {
1179 hcfg = dwc2_readl(hsotg->regs + HCFG);
1180 hcfg |= HCFG_DESCDMA;
1181 dwc2_writel(hcfg, hsotg->regs + HCFG);
1182 }
1183 }
1184
1185 /* Configure data FIFO sizes */
1186 dwc2_config_fifos(hsotg);
1187
1188 /* TODO - check this */
1189 /* Clear Host Set HNP Enable in the OTG Control Register */
1190 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
1191 otgctl &= ~GOTGCTL_HSTSETHNPEN;
1192 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
1193
1194 /* Make sure the FIFOs are flushed */
1195 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
1196 dwc2_flush_rx_fifo(hsotg);
1197
1198 /* Clear Host Set HNP Enable in the OTG Control Register */
1199 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
1200 otgctl &= ~GOTGCTL_HSTSETHNPEN;
1201 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
1202
1203 if (hsotg->core_params->dma_desc_enable <= 0) {
1204 int num_channels, i;
1205 u32 hcchar;
1206
1207 /* Flush out any leftover queued requests */
1208 num_channels = hsotg->core_params->host_channels;
1209 for (i = 0; i < num_channels; i++) {
1210 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
1211 hcchar &= ~HCCHAR_CHENA;
1212 hcchar |= HCCHAR_CHDIS;
1213 hcchar &= ~HCCHAR_EPDIR;
1214 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
1215 }
1216
1217 /* Halt all channels to put them into a known state */
1218 for (i = 0; i < num_channels; i++) {
1219 int count = 0;
1220
1221 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
1222 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
1223 hcchar &= ~HCCHAR_EPDIR;
1224 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
1225 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
1226 __func__, i);
1227 do {
1228 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
1229 if (++count > 1000) {
1230 dev_err(hsotg->dev,
1231 "Unable to clear enable on channel %d\n",
1232 i);
1233 break;
1234 }
1235 udelay(1);
1236 } while (hcchar & HCCHAR_CHENA);
1237 }
1238 }
1239
1240 /* Turn on the vbus power */
1241 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
1242 if (hsotg->op_state == OTG_STATE_A_HOST) {
1243 u32 hprt0 = dwc2_read_hprt0(hsotg);
1244
1245 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
1246 !!(hprt0 & HPRT0_PWR));
1247 if (!(hprt0 & HPRT0_PWR)) {
1248 hprt0 |= HPRT0_PWR;
1249 dwc2_writel(hprt0, hsotg->regs + HPRT0);
1250 }
1251 }
1252
1253 dwc2_enable_host_interrupts(hsotg);
1254}
1255
1256static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
1257 struct dwc2_host_chan *chan)
1258{
1259 u32 hcintmsk = HCINTMSK_CHHLTD;
1260
1261 switch (chan->ep_type) {
1262 case USB_ENDPOINT_XFER_CONTROL:
1263 case USB_ENDPOINT_XFER_BULK:
1264 dev_vdbg(hsotg->dev, "control/bulk\n");
1265 hcintmsk |= HCINTMSK_XFERCOMPL;
1266 hcintmsk |= HCINTMSK_STALL;
1267 hcintmsk |= HCINTMSK_XACTERR;
1268 hcintmsk |= HCINTMSK_DATATGLERR;
1269 if (chan->ep_is_in) {
1270 hcintmsk |= HCINTMSK_BBLERR;
1271 } else {
1272 hcintmsk |= HCINTMSK_NAK;
1273 hcintmsk |= HCINTMSK_NYET;
1274 if (chan->do_ping)
1275 hcintmsk |= HCINTMSK_ACK;
1276 }
1277
1278 if (chan->do_split) {
1279 hcintmsk |= HCINTMSK_NAK;
1280 if (chan->complete_split)
1281 hcintmsk |= HCINTMSK_NYET;
1282 else
1283 hcintmsk |= HCINTMSK_ACK;
1284 }
1285
1286 if (chan->error_state)
1287 hcintmsk |= HCINTMSK_ACK;
1288 break;
1289
1290 case USB_ENDPOINT_XFER_INT:
1291 if (dbg_perio())
1292 dev_vdbg(hsotg->dev, "intr\n");
1293 hcintmsk |= HCINTMSK_XFERCOMPL;
1294 hcintmsk |= HCINTMSK_NAK;
1295 hcintmsk |= HCINTMSK_STALL;
1296 hcintmsk |= HCINTMSK_XACTERR;
1297 hcintmsk |= HCINTMSK_DATATGLERR;
1298 hcintmsk |= HCINTMSK_FRMOVRUN;
1299
1300 if (chan->ep_is_in)
1301 hcintmsk |= HCINTMSK_BBLERR;
1302 if (chan->error_state)
1303 hcintmsk |= HCINTMSK_ACK;
1304 if (chan->do_split) {
1305 if (chan->complete_split)
1306 hcintmsk |= HCINTMSK_NYET;
1307 else
1308 hcintmsk |= HCINTMSK_ACK;
1309 }
1310 break;
1311
1312 case USB_ENDPOINT_XFER_ISOC:
1313 if (dbg_perio())
1314 dev_vdbg(hsotg->dev, "isoc\n");
1315 hcintmsk |= HCINTMSK_XFERCOMPL;
1316 hcintmsk |= HCINTMSK_FRMOVRUN;
1317 hcintmsk |= HCINTMSK_ACK;
1318
1319 if (chan->ep_is_in) {
1320 hcintmsk |= HCINTMSK_XACTERR;
1321 hcintmsk |= HCINTMSK_BBLERR;
1322 }
1323 break;
1324 default:
1325 dev_err(hsotg->dev, "## Unknown EP type ##\n");
1326 break;
1327 }
1328
1329 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1330 if (dbg_hc(chan))
1331 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1332}
1333
1334static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
1335 struct dwc2_host_chan *chan)
1336{
1337 u32 hcintmsk = HCINTMSK_CHHLTD;
1338
1339 /*
1340 * For Descriptor DMA mode core halts the channel on AHB error.
1341 * Interrupt is not required.
1342 */
1343 if (hsotg->core_params->dma_desc_enable <= 0) {
1344 if (dbg_hc(chan))
1345 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1346 hcintmsk |= HCINTMSK_AHBERR;
1347 } else {
1348 if (dbg_hc(chan))
1349 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
1350 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1351 hcintmsk |= HCINTMSK_XFERCOMPL;
1352 }
1353
1354 if (chan->error_state && !chan->do_split &&
1355 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1356 if (dbg_hc(chan))
1357 dev_vdbg(hsotg->dev, "setting ACK\n");
1358 hcintmsk |= HCINTMSK_ACK;
1359 if (chan->ep_is_in) {
1360 hcintmsk |= HCINTMSK_DATATGLERR;
1361 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
1362 hcintmsk |= HCINTMSK_NAK;
1363 }
1364 }
1365
1366 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1367 if (dbg_hc(chan))
1368 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1369}
1370
1371static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
1372 struct dwc2_host_chan *chan)
1373{
1374 u32 intmsk;
1375
1376 if (hsotg->core_params->dma_enable > 0) {
1377 if (dbg_hc(chan))
1378 dev_vdbg(hsotg->dev, "DMA enabled\n");
1379 dwc2_hc_enable_dma_ints(hsotg, chan);
1380 } else {
1381 if (dbg_hc(chan))
1382 dev_vdbg(hsotg->dev, "DMA disabled\n");
1383 dwc2_hc_enable_slave_ints(hsotg, chan);
1384 }
1385
1386 /* Enable the top level host channel interrupt */
1387 intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
1388 intmsk |= 1 << chan->hc_num;
1389 dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
1390 if (dbg_hc(chan))
1391 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
1392
1393 /* Make sure host channel interrupts are enabled */
1394 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
1395 intmsk |= GINTSTS_HCHINT;
1396 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
1397 if (dbg_hc(chan))
1398 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
1399}
1400
1401/**
1402 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1403 * a specific endpoint
1404 *
1405 * @hsotg: Programming view of DWC_otg controller
1406 * @chan: Information needed to initialize the host channel
1407 *
1408 * The HCCHARn register is set up with the characteristics specified in chan.
1409 * Host channel interrupts that may need to be serviced while this transfer is
1410 * in progress are enabled.
1411 */
1412void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1413{
1414 u8 hc_num = chan->hc_num;
1415 u32 hcintmsk;
1416 u32 hcchar;
1417 u32 hcsplt = 0;
1418
1419 if (dbg_hc(chan))
1420 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1421
1422 /* Clear old interrupt conditions for this host channel */
1423 hcintmsk = 0xffffffff;
1424 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1425 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
1426
1427 /* Enable channel interrupts required for this transfer */
1428 dwc2_hc_enable_ints(hsotg, chan);
1429
1430 /*
1431 * Program the HCCHARn register with the endpoint characteristics for
1432 * the current transfer
1433 */
1434 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
1435 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
1436 if (chan->ep_is_in)
1437 hcchar |= HCCHAR_EPDIR;
1438 if (chan->speed == USB_SPEED_LOW)
1439 hcchar |= HCCHAR_LSPDDEV;
1440 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
1441 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
1442 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
1443 if (dbg_hc(chan)) {
1444 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
1445 hc_num, hcchar);
1446
1447 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
1448 __func__, hc_num);
1449 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
1450 chan->dev_addr);
1451 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
1452 chan->ep_num);
1453 dev_vdbg(hsotg->dev, " Is In: %d\n",
1454 chan->ep_is_in);
1455 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
1456 chan->speed == USB_SPEED_LOW);
1457 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
1458 chan->ep_type);
1459 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
1460 chan->max_packet);
1461 }
1462
1463 /* Program the HCSPLT register for SPLITs */
1464 if (chan->do_split) {
1465 if (dbg_hc(chan))
1466 dev_vdbg(hsotg->dev,
1467 "Programming HC %d with split --> %s\n",
1468 hc_num,
1469 chan->complete_split ? "CSPLIT" : "SSPLIT");
1470 if (chan->complete_split)
1471 hcsplt |= HCSPLT_COMPSPLT;
1472 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
1473 HCSPLT_XACTPOS_MASK;
1474 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1475 HCSPLT_HUBADDR_MASK;
1476 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1477 HCSPLT_PRTADDR_MASK;
1478 if (dbg_hc(chan)) {
1479 dev_vdbg(hsotg->dev, " comp split %d\n",
1480 chan->complete_split);
1481 dev_vdbg(hsotg->dev, " xact pos %d\n",
1482 chan->xact_pos);
1483 dev_vdbg(hsotg->dev, " hub addr %d\n",
1484 chan->hub_addr);
1485 dev_vdbg(hsotg->dev, " hub port %d\n",
1486 chan->hub_port);
1487 dev_vdbg(hsotg->dev, " is_in %d\n",
1488 chan->ep_is_in);
1489 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
1490 chan->max_packet);
1491 dev_vdbg(hsotg->dev, " xferlen %d\n",
1492 chan->xfer_len);
1493 }
1494 }
1495
1496 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
1497}
1498
1499/**
1500 * dwc2_hc_halt() - Attempts to halt a host channel
1501 *
1502 * @hsotg: Controller register interface
1503 * @chan: Host channel to halt
1504 * @halt_status: Reason for halting the channel
1505 *
1506 * This function should only be called in Slave mode or to abort a transfer in
1507 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1508 * controller halts the channel when the transfer is complete or a condition
1509 * occurs that requires application intervention.
1510 *
1511 * In slave mode, checks for a free request queue entry, then sets the Channel
1512 * Enable and Channel Disable bits of the Host Channel Characteristics
1513 * register of the specified channel to intiate the halt. If there is no free
1514 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1515 * register to flush requests for this channel. In the latter case, sets a
1516 * flag to indicate that the host channel needs to be halted when a request
1517 * queue slot is open.
1518 *
1519 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1520 * HCCHARn register. The controller ensures there is space in the request
1521 * queue before submitting the halt request.
1522 *
1523 * Some time may elapse before the core flushes any posted requests for this
1524 * host channel and halts. The Channel Halted interrupt handler completes the
1525 * deactivation of the host channel.
1526 */
1527void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1528 enum dwc2_halt_status halt_status)
1529{
1530 u32 nptxsts, hptxsts, hcchar;
1531
1532 if (dbg_hc(chan))
1533 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1534 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1535 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1536
1537 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1538 halt_status == DWC2_HC_XFER_AHB_ERR) {
1539 /*
1540 * Disable all channel interrupts except Ch Halted. The QTD
1541 * and QH state associated with this transfer has been cleared
1542 * (in the case of URB_DEQUEUE), so the channel needs to be
1543 * shut down carefully to prevent crashes.
1544 */
1545 u32 hcintmsk = HCINTMSK_CHHLTD;
1546
1547 dev_vdbg(hsotg->dev, "dequeue/error\n");
1548 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1549
1550 /*
1551 * Make sure no other interrupts besides halt are currently
1552 * pending. Handling another interrupt could cause a crash due
1553 * to the QTD and QH state.
1554 */
1555 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1556
1557 /*
1558 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1559 * even if the channel was already halted for some other
1560 * reason
1561 */
1562 chan->halt_status = halt_status;
1563
1564 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1565 if (!(hcchar & HCCHAR_CHENA)) {
1566 /*
1567 * The channel is either already halted or it hasn't
1568 * started yet. In DMA mode, the transfer may halt if
1569 * it finishes normally or a condition occurs that
1570 * requires driver intervention. Don't want to halt
1571 * the channel again. In either Slave or DMA mode,
1572 * it's possible that the transfer has been assigned
1573 * to a channel, but not started yet when an URB is
1574 * dequeued. Don't want to halt a channel that hasn't
1575 * started yet.
1576 */
1577 return;
1578 }
1579 }
1580 if (chan->halt_pending) {
1581 /*
1582 * A halt has already been issued for this channel. This might
1583 * happen when a transfer is aborted by a higher level in
1584 * the stack.
1585 */
1586 dev_vdbg(hsotg->dev,
1587 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1588 __func__, chan->hc_num);
1589 return;
1590 }
1591
1592 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1593
1594 /* No need to set the bit in DDMA for disabling the channel */
1595 /* TODO check it everywhere channel is disabled */
1596 if (hsotg->core_params->dma_desc_enable <= 0) {
1597 if (dbg_hc(chan))
1598 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1599 hcchar |= HCCHAR_CHENA;
1600 } else {
1601 if (dbg_hc(chan))
1602 dev_dbg(hsotg->dev, "desc DMA enabled\n");
1603 }
1604 hcchar |= HCCHAR_CHDIS;
1605
1606 if (hsotg->core_params->dma_enable <= 0) {
1607 if (dbg_hc(chan))
1608 dev_vdbg(hsotg->dev, "DMA not enabled\n");
1609 hcchar |= HCCHAR_CHENA;
1610
1611 /* Check for space in the request queue to issue the halt */
1612 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1613 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1614 dev_vdbg(hsotg->dev, "control/bulk\n");
1615 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
1616 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1617 dev_vdbg(hsotg->dev, "Disabling channel\n");
1618 hcchar &= ~HCCHAR_CHENA;
1619 }
1620 } else {
1621 if (dbg_perio())
1622 dev_vdbg(hsotg->dev, "isoc/intr\n");
1623 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
1624 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1625 hsotg->queuing_high_bandwidth) {
1626 if (dbg_perio())
1627 dev_vdbg(hsotg->dev, "Disabling channel\n");
1628 hcchar &= ~HCCHAR_CHENA;
1629 }
1630 }
1631 } else {
1632 if (dbg_hc(chan))
1633 dev_vdbg(hsotg->dev, "DMA enabled\n");
1634 }
1635
1636 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1637 chan->halt_status = halt_status;
1638
1639 if (hcchar & HCCHAR_CHENA) {
1640 if (dbg_hc(chan))
1641 dev_vdbg(hsotg->dev, "Channel enabled\n");
1642 chan->halt_pending = 1;
1643 chan->halt_on_queue = 0;
1644 } else {
1645 if (dbg_hc(chan))
1646 dev_vdbg(hsotg->dev, "Channel disabled\n");
1647 chan->halt_on_queue = 1;
1648 }
1649
1650 if (dbg_hc(chan)) {
1651 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1652 chan->hc_num);
1653 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1654 hcchar);
1655 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1656 chan->halt_pending);
1657 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1658 chan->halt_on_queue);
1659 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1660 chan->halt_status);
1661 }
1662}
1663
1664/**
1665 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1666 *
1667 * @hsotg: Programming view of DWC_otg controller
1668 * @chan: Identifies the host channel to clean up
1669 *
1670 * This function is normally called after a transfer is done and the host
1671 * channel is being released
1672 */
1673void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1674{
1675 u32 hcintmsk;
1676
1677 chan->xfer_started = 0;
1678
1679 /*
1680 * Clear channel interrupt enables and any unhandled channel interrupt
1681 * conditions
1682 */
1683 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1684 hcintmsk = 0xffffffff;
1685 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1686 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1687}
1688
1689/**
1690 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1691 * which frame a periodic transfer should occur
1692 *
1693 * @hsotg: Programming view of DWC_otg controller
1694 * @chan: Identifies the host channel to set up and its properties
1695 * @hcchar: Current value of the HCCHAR register for the specified host channel
1696 *
1697 * This function has no effect on non-periodic transfers
1698 */
1699static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1700 struct dwc2_host_chan *chan, u32 *hcchar)
1701{
1702 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1703 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1704 /* 1 if _next_ frame is odd, 0 if it's even */
1705 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1706 *hcchar |= HCCHAR_ODDFRM;
1707 }
1708}
1709
1710static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1711{
1712 /* Set up the initial PID for the transfer */
1713 if (chan->speed == USB_SPEED_HIGH) {
1714 if (chan->ep_is_in) {
1715 if (chan->multi_count == 1)
1716 chan->data_pid_start = DWC2_HC_PID_DATA0;
1717 else if (chan->multi_count == 2)
1718 chan->data_pid_start = DWC2_HC_PID_DATA1;
1719 else
1720 chan->data_pid_start = DWC2_HC_PID_DATA2;
1721 } else {
1722 if (chan->multi_count == 1)
1723 chan->data_pid_start = DWC2_HC_PID_DATA0;
1724 else
1725 chan->data_pid_start = DWC2_HC_PID_MDATA;
1726 }
1727 } else {
1728 chan->data_pid_start = DWC2_HC_PID_DATA0;
1729 }
1730}
1731
1732/**
1733 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1734 * the Host Channel
1735 *
1736 * @hsotg: Programming view of DWC_otg controller
1737 * @chan: Information needed to initialize the host channel
1738 *
1739 * This function should only be called in Slave mode. For a channel associated
1740 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1741 * associated with a periodic EP, the periodic Tx FIFO is written.
1742 *
1743 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1744 * the number of bytes written to the Tx FIFO.
1745 */
1746static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1747 struct dwc2_host_chan *chan)
1748{
1749 u32 i;
1750 u32 remaining_count;
1751 u32 byte_count;
1752 u32 dword_count;
1753 u32 __iomem *data_fifo;
1754 u32 *data_buf = (u32 *)chan->xfer_buf;
1755
1756 if (dbg_hc(chan))
1757 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1758
1759 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1760
1761 remaining_count = chan->xfer_len - chan->xfer_count;
1762 if (remaining_count > chan->max_packet)
1763 byte_count = chan->max_packet;
1764 else
1765 byte_count = remaining_count;
1766
1767 dword_count = (byte_count + 3) / 4;
1768
1769 if (((unsigned long)data_buf & 0x3) == 0) {
1770 /* xfer_buf is DWORD aligned */
1771 for (i = 0; i < dword_count; i++, data_buf++)
1772 dwc2_writel(*data_buf, data_fifo);
1773 } else {
1774 /* xfer_buf is not DWORD aligned */
1775 for (i = 0; i < dword_count; i++, data_buf++) {
1776 u32 data = data_buf[0] | data_buf[1] << 8 |
1777 data_buf[2] << 16 | data_buf[3] << 24;
1778 dwc2_writel(data, data_fifo);
1779 }
1780 }
1781
1782 chan->xfer_count += byte_count;
1783 chan->xfer_buf += byte_count;
1784}
1785
1786/**
1787 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1788 * channel and starts the transfer
1789 *
1790 * @hsotg: Programming view of DWC_otg controller
1791 * @chan: Information needed to initialize the host channel. The xfer_len value
1792 * may be reduced to accommodate the max widths of the XferSize and
1793 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1794 * changed to reflect the final xfer_len value.
1795 *
1796 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1797 * the caller must ensure that there is sufficient space in the request queue
1798 * and Tx Data FIFO.
1799 *
1800 * For an OUT transfer in Slave mode, it loads a data packet into the
1801 * appropriate FIFO. If necessary, additional data packets are loaded in the
1802 * Host ISR.
1803 *
1804 * For an IN transfer in Slave mode, a data packet is requested. The data
1805 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1806 * additional data packets are requested in the Host ISR.
1807 *
1808 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1809 * register along with a packet count of 1 and the channel is enabled. This
1810 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1811 * simply set to 0 since no data transfer occurs in this case.
1812 *
1813 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1814 * all the information required to perform the subsequent data transfer. In
1815 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1816 * controller performs the entire PING protocol, then starts the data
1817 * transfer.
1818 */
1819void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1820 struct dwc2_host_chan *chan)
1821{
1822 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1823 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1824 u32 hcchar;
1825 u32 hctsiz = 0;
1826 u16 num_packets;
1827 u32 ec_mc;
1828
1829 if (dbg_hc(chan))
1830 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1831
1832 if (chan->do_ping) {
1833 if (hsotg->core_params->dma_enable <= 0) {
1834 if (dbg_hc(chan))
1835 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1836 dwc2_hc_do_ping(hsotg, chan);
1837 chan->xfer_started = 1;
1838 return;
1839 } else {
1840 if (dbg_hc(chan))
1841 dev_vdbg(hsotg->dev, "ping, DMA\n");
1842 hctsiz |= TSIZ_DOPNG;
1843 }
1844 }
1845
1846 if (chan->do_split) {
1847 if (dbg_hc(chan))
1848 dev_vdbg(hsotg->dev, "split\n");
1849 num_packets = 1;
1850
1851 if (chan->complete_split && !chan->ep_is_in)
1852 /*
1853 * For CSPLIT OUT Transfer, set the size to 0 so the
1854 * core doesn't expect any data written to the FIFO
1855 */
1856 chan->xfer_len = 0;
1857 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1858 chan->xfer_len = chan->max_packet;
1859 else if (!chan->ep_is_in && chan->xfer_len > 188)
1860 chan->xfer_len = 188;
1861
1862 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1863 TSIZ_XFERSIZE_MASK;
1864
1865 /* For split set ec_mc for immediate retries */
1866 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1867 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1868 ec_mc = 3;
1869 else
1870 ec_mc = 1;
1871 } else {
1872 if (dbg_hc(chan))
1873 dev_vdbg(hsotg->dev, "no split\n");
1874 /*
1875 * Ensure that the transfer length and packet count will fit
1876 * in the widths allocated for them in the HCTSIZn register
1877 */
1878 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1879 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1880 /*
1881 * Make sure the transfer size is no larger than one
1882 * (micro)frame's worth of data. (A check was done
1883 * when the periodic transfer was accepted to ensure
1884 * that a (micro)frame's worth of data can be
1885 * programmed into a channel.)
1886 */
1887 u32 max_periodic_len =
1888 chan->multi_count * chan->max_packet;
1889
1890 if (chan->xfer_len > max_periodic_len)
1891 chan->xfer_len = max_periodic_len;
1892 } else if (chan->xfer_len > max_hc_xfer_size) {
1893 /*
1894 * Make sure that xfer_len is a multiple of max packet
1895 * size
1896 */
1897 chan->xfer_len =
1898 max_hc_xfer_size - chan->max_packet + 1;
1899 }
1900
1901 if (chan->xfer_len > 0) {
1902 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1903 chan->max_packet;
1904 if (num_packets > max_hc_pkt_count) {
1905 num_packets = max_hc_pkt_count;
1906 chan->xfer_len = num_packets * chan->max_packet;
1907 }
1908 } else {
1909 /* Need 1 packet for transfer length of 0 */
1910 num_packets = 1;
1911 }
1912
1913 if (chan->ep_is_in)
1914 /*
1915 * Always program an integral # of max packets for IN
1916 * transfers
1917 */
1918 chan->xfer_len = num_packets * chan->max_packet;
1919
1920 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1921 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1922 /*
1923 * Make sure that the multi_count field matches the
1924 * actual transfer length
1925 */
1926 chan->multi_count = num_packets;
1927
1928 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1929 dwc2_set_pid_isoc(chan);
1930
1931 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1932 TSIZ_XFERSIZE_MASK;
1933
1934 /* The ec_mc gets the multi_count for non-split */
1935 ec_mc = chan->multi_count;
1936 }
1937
1938 chan->start_pkt_count = num_packets;
1939 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1940 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1941 TSIZ_SC_MC_PID_MASK;
1942 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1943 if (dbg_hc(chan)) {
1944 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1945 hctsiz, chan->hc_num);
1946
1947 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1948 chan->hc_num);
1949 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1950 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1951 TSIZ_XFERSIZE_SHIFT);
1952 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1953 (hctsiz & TSIZ_PKTCNT_MASK) >>
1954 TSIZ_PKTCNT_SHIFT);
1955 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1956 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1957 TSIZ_SC_MC_PID_SHIFT);
1958 }
1959
1960 if (hsotg->core_params->dma_enable > 0) {
1961 dma_addr_t dma_addr;
1962
1963 if (chan->align_buf) {
1964 if (dbg_hc(chan))
1965 dev_vdbg(hsotg->dev, "align_buf\n");
1966 dma_addr = chan->align_buf;
1967 } else {
1968 dma_addr = chan->xfer_dma;
1969 }
1970 dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
1971 if (dbg_hc(chan))
1972 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1973 (unsigned long)dma_addr, chan->hc_num);
1974 }
1975
1976 /* Start the split */
1977 if (chan->do_split) {
1978 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
1979
1980 hcsplt |= HCSPLT_SPLTENA;
1981 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1982 }
1983
1984 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1985 hcchar &= ~HCCHAR_MULTICNT_MASK;
1986 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
1987 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1988
1989 if (hcchar & HCCHAR_CHDIS)
1990 dev_warn(hsotg->dev,
1991 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1992 __func__, chan->hc_num, hcchar);
1993
1994 /* Set host channel enable after all other setup is complete */
1995 hcchar |= HCCHAR_CHENA;
1996 hcchar &= ~HCCHAR_CHDIS;
1997
1998 if (dbg_hc(chan))
1999 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
2000 (hcchar & HCCHAR_MULTICNT_MASK) >>
2001 HCCHAR_MULTICNT_SHIFT);
2002
2003 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
2004 if (dbg_hc(chan))
2005 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
2006 chan->hc_num);
2007
2008 chan->xfer_started = 1;
2009 chan->requests++;
2010
2011 if (hsotg->core_params->dma_enable <= 0 &&
2012 !chan->ep_is_in && chan->xfer_len > 0)
2013 /* Load OUT packet into the appropriate Tx FIFO */
2014 dwc2_hc_write_packet(hsotg, chan);
2015}
2016
2017/**
2018 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
2019 * host channel and starts the transfer in Descriptor DMA mode
2020 *
2021 * @hsotg: Programming view of DWC_otg controller
2022 * @chan: Information needed to initialize the host channel
2023 *
2024 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
2025 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
2026 * with micro-frame bitmap.
2027 *
2028 * Initializes HCDMA register with descriptor list address and CTD value then
2029 * starts the transfer via enabling the channel.
2030 */
2031void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
2032 struct dwc2_host_chan *chan)
2033{
2034 u32 hcchar;
2035 u32 hctsiz = 0;
2036
2037 if (chan->do_ping)
2038 hctsiz |= TSIZ_DOPNG;
2039
2040 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
2041 dwc2_set_pid_isoc(chan);
2042
2043 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
2044 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
2045 TSIZ_SC_MC_PID_MASK;
2046
2047 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
2048 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
2049
2050 /* Non-zero only for high-speed interrupt endpoints */
2051 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
2052
2053 if (dbg_hc(chan)) {
2054 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2055 chan->hc_num);
2056 dev_vdbg(hsotg->dev, " Start PID: %d\n",
2057 chan->data_pid_start);
2058 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
2059 }
2060
2061 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
2062
2063 dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
2064 chan->desc_list_sz, DMA_TO_DEVICE);
2065
2066 dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
2067
2068 if (dbg_hc(chan))
2069 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
2070 &chan->desc_list_addr, chan->hc_num);
2071
2072 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
2073 hcchar &= ~HCCHAR_MULTICNT_MASK;
2074 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
2075 HCCHAR_MULTICNT_MASK;
2076
2077 if (hcchar & HCCHAR_CHDIS)
2078 dev_warn(hsotg->dev,
2079 "%s: chdis set, channel %d, hcchar 0x%08x\n",
2080 __func__, chan->hc_num, hcchar);
2081
2082 /* Set host channel enable after all other setup is complete */
2083 hcchar |= HCCHAR_CHENA;
2084 hcchar &= ~HCCHAR_CHDIS;
2085
2086 if (dbg_hc(chan))
2087 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
2088 (hcchar & HCCHAR_MULTICNT_MASK) >>
2089 HCCHAR_MULTICNT_SHIFT);
2090
2091 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
2092 if (dbg_hc(chan))
2093 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
2094 chan->hc_num);
2095
2096 chan->xfer_started = 1;
2097 chan->requests++;
2098}
2099
2100/**
2101 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
2102 * a previous call to dwc2_hc_start_transfer()
2103 *
2104 * @hsotg: Programming view of DWC_otg controller
2105 * @chan: Information needed to initialize the host channel
2106 *
2107 * The caller must ensure there is sufficient space in the request queue and Tx
2108 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
2109 * the controller acts autonomously to complete transfers programmed to a host
2110 * channel.
2111 *
2112 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
2113 * if there is any data remaining to be queued. For an IN transfer, another
2114 * data packet is always requested. For the SETUP phase of a control transfer,
2115 * this function does nothing.
2116 *
2117 * Return: 1 if a new request is queued, 0 if no more requests are required
2118 * for this transfer
2119 */
2120int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
2121 struct dwc2_host_chan *chan)
2122{
2123 if (dbg_hc(chan))
2124 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2125 chan->hc_num);
2126
2127 if (chan->do_split)
2128 /* SPLITs always queue just once per channel */
2129 return 0;
2130
2131 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
2132 /* SETUPs are queued only once since they can't be NAK'd */
2133 return 0;
2134
2135 if (chan->ep_is_in) {
2136 /*
2137 * Always queue another request for other IN transfers. If
2138 * back-to-back INs are issued and NAKs are received for both,
2139 * the driver may still be processing the first NAK when the
2140 * second NAK is received. When the interrupt handler clears
2141 * the NAK interrupt for the first NAK, the second NAK will
2142 * not be seen. So we can't depend on the NAK interrupt
2143 * handler to requeue a NAK'd request. Instead, IN requests
2144 * are issued each time this function is called. When the
2145 * transfer completes, the extra requests for the channel will
2146 * be flushed.
2147 */
2148 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
2149
2150 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2151 hcchar |= HCCHAR_CHENA;
2152 hcchar &= ~HCCHAR_CHDIS;
2153 if (dbg_hc(chan))
2154 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
2155 hcchar);
2156 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
2157 chan->requests++;
2158 return 1;
2159 }
2160
2161 /* OUT transfers */
2162
2163 if (chan->xfer_count < chan->xfer_len) {
2164 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2165 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
2166 u32 hcchar = dwc2_readl(hsotg->regs +
2167 HCCHAR(chan->hc_num));
2168
2169 dwc2_hc_set_even_odd_frame(hsotg, chan,
2170 &hcchar);
2171 }
2172
2173 /* Load OUT packet into the appropriate Tx FIFO */
2174 dwc2_hc_write_packet(hsotg, chan);
2175 chan->requests++;
2176 return 1;
2177 }
2178
2179 return 0;
2180}
2181
2182/**
2183 * dwc2_hc_do_ping() - Starts a PING transfer
2184 *
2185 * @hsotg: Programming view of DWC_otg controller
2186 * @chan: Information needed to initialize the host channel
2187 *
2188 * This function should only be called in Slave mode. The Do Ping bit is set in
2189 * the HCTSIZ register, then the channel is enabled.
2190 */
2191void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
2192{
2193 u32 hcchar;
2194 u32 hctsiz;
2195
2196 if (dbg_hc(chan))
2197 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2198 chan->hc_num);
2199
2200
2201 hctsiz = TSIZ_DOPNG;
2202 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
2203 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
2204
2205 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
2206 hcchar |= HCCHAR_CHENA;
2207 hcchar &= ~HCCHAR_CHDIS;
2208 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
2209}
2210
2211/**
2212 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2213 * the HFIR register according to PHY type and speed
2214 *
2215 * @hsotg: Programming view of DWC_otg controller
2216 *
2217 * NOTE: The caller can modify the value of the HFIR register only after the
2218 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2219 * has been set
2220 */
2221u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
2222{
2223 u32 usbcfg;
2224 u32 hprt0;
2225 int clock = 60; /* default value */
2226
2227 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2228 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
2229
2230 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
2231 !(usbcfg & GUSBCFG_PHYIF16))
2232 clock = 60;
2233 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
2234 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
2235 clock = 48;
2236 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2237 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2238 clock = 30;
2239 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2240 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
2241 clock = 60;
2242 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2243 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2244 clock = 48;
2245 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
2246 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
2247 clock = 48;
2248 if ((usbcfg & GUSBCFG_PHYSEL) &&
2249 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2250 clock = 48;
2251
2252 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
2253 /* High speed case */
2254 return 125 * clock;
2255 else
2256 /* FS/LS case */
2257 return 1000 * clock;
2258}
2259
2260/**
2261 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2262 * buffer
2263 *
2264 * @core_if: Programming view of DWC_otg controller
2265 * @dest: Destination buffer for the packet
2266 * @bytes: Number of bytes to copy to the destination
2267 */
2268void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
2269{
2270 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
2271 u32 *data_buf = (u32 *)dest;
2272 int word_count = (bytes + 3) / 4;
2273 int i;
2274
2275 /*
2276 * Todo: Account for the case where dest is not dword aligned. This
2277 * requires reading data from the FIFO into a u32 temp buffer, then
2278 * moving it into the data buffer.
2279 */
2280
2281 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
2282
2283 for (i = 0; i < word_count; i++, data_buf++)
2284 *data_buf = dwc2_readl(fifo);
2285}
2286
2287/** 410/**
2288 * dwc2_dump_host_registers() - Prints the host registers 411 * dwc2_dump_host_registers() - Prints the host registers
2289 * 412 *
@@ -3355,13 +1478,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3355 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> 1478 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
3356 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; 1479 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
3357 hw->max_transfer_size = (1 << (width + 11)) - 1; 1480 hw->max_transfer_size = (1 << (width + 11)) - 1;
3358 /*
3359 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3360 * coherent buffers with this size, and if it's too large we can
3361 * exhaust the coherent DMA pool.
3362 */
3363 if (hw->max_transfer_size > 65535)
3364 hw->max_transfer_size = 65535;
3365 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> 1481 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
3366 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; 1482 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
3367 hw->max_packet_count = (1 << (width + 4)) - 1; 1483 hw->max_packet_count = (1 << (width + 4)) - 1;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 7fb6434f4639..3c58d633ce80 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -44,6 +44,26 @@
44#include <linux/usb/phy.h> 44#include <linux/usb/phy.h>
45#include "hw.h" 45#include "hw.h"
46 46
47/*
48 * Suggested defines for tracers:
49 * - no_printk: Disable tracing
50 * - pr_info: Print this info to the console
51 * - trace_printk: Print this info to trace buffer (good for verbose logging)
52 */
53
54#define DWC2_TRACE_SCHEDULER no_printk
55#define DWC2_TRACE_SCHEDULER_VB no_printk
56
57/* Detailed scheduler tracing, but won't overwhelm console */
58#define dwc2_sch_dbg(hsotg, fmt, ...) \
59 DWC2_TRACE_SCHEDULER(pr_fmt("%s: SCH: " fmt), \
60 dev_name(hsotg->dev), ##__VA_ARGS__)
61
62/* Verbose scheduler tracing */
63#define dwc2_sch_vdbg(hsotg, fmt, ...) \
64 DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
65 dev_name(hsotg->dev), ##__VA_ARGS__)
66
47static inline u32 dwc2_readl(const void __iomem *addr) 67static inline u32 dwc2_readl(const void __iomem *addr)
48{ 68{
49 u32 value = __raw_readl(addr); 69 u32 value = __raw_readl(addr);
@@ -572,6 +592,84 @@ struct dwc2_hregs_backup {
572 bool valid; 592 bool valid;
573}; 593};
574 594
595/*
596 * Constants related to high speed periodic scheduling
597 *
598 * We have a periodic schedule that is DWC2_HS_SCHEDULE_UFRAMES long. From a
599 * reservation point of view it's assumed that the schedule goes right back to
600 * the beginning after the end of the schedule.
601 *
602 * What does that mean for scheduling things with a long interval? It means
603 * we'll reserve time for them in every possible microframe that they could
604 * ever be scheduled in. ...but we'll still only actually schedule them as
605 * often as they were requested.
606 *
607 * We keep our schedule in a "bitmap" structure. This simplifies having
608 * to keep track of and merge intervals: we just let the bitmap code do most
609 * of the heavy lifting. In a way scheduling is much like memory allocation.
610 *
611 * We schedule 100us per uframe or 80% of 125us (the maximum amount you're
612 * supposed to schedule for periodic transfers). That's according to spec.
613 *
614 * Note that though we only schedule 80% of each microframe, the bitmap that we
615 * keep the schedule in is tightly packed (AKA it doesn't have 100us worth of
616 * space for each uFrame).
617 *
618 * Requirements:
619 * - DWC2_HS_SCHEDULE_UFRAMES must even divide 0x4000 (HFNUM_MAX_FRNUM + 1)
620 * - DWC2_HS_SCHEDULE_UFRAMES must be 8 times DWC2_LS_SCHEDULE_FRAMES (probably
621 * could be any multiple of 8 times DWC2_LS_SCHEDULE_FRAMES, but there might
622 * be bugs). The 8 comes from the USB spec: number of microframes per frame.
623 */
624#define DWC2_US_PER_UFRAME 125
625#define DWC2_HS_PERIODIC_US_PER_UFRAME 100
626
627#define DWC2_HS_SCHEDULE_UFRAMES 8
628#define DWC2_HS_SCHEDULE_US (DWC2_HS_SCHEDULE_UFRAMES * \
629 DWC2_HS_PERIODIC_US_PER_UFRAME)
630
631/*
632 * Constants related to low speed scheduling
633 *
634 * For high speed we schedule every 1us. For low speed that's a bit overkill,
635 * so we make up a unit called a "slice" that's worth 25us. There are 40
636 * slices in a full frame and we can schedule 36 of those (90%) for periodic
637 * transfers.
638 *
639 * Our low speed schedule can be as short as 1 frame or could be longer. When
640 * we only schedule 1 frame it means that we'll need to reserve a time every
641 * frame even for things that only transfer very rarely, so something that runs
642 * every 2048 frames will get time reserved in every frame. Our low speed
643 * schedule can be longer and we'll be able to handle more overlap, but that
644 * will come at increased memory cost and increased time to schedule.
645 *
646 * Note: one other advantage of a short low speed schedule is that if we mess
647 * up and miss scheduling we can jump in and use any of the slots that we
648 * happened to reserve.
649 *
650 * With 25 us per slice and 1 frame in the schedule, we only need 4 bytes for
651 * the schedule. There will be one schedule per TT.
652 *
653 * Requirements:
654 * - DWC2_US_PER_SLICE must evenly divide DWC2_LS_PERIODIC_US_PER_FRAME.
655 */
656#define DWC2_US_PER_SLICE 25
657#define DWC2_SLICES_PER_UFRAME (DWC2_US_PER_UFRAME / DWC2_US_PER_SLICE)
658
659#define DWC2_ROUND_US_TO_SLICE(us) \
660 (DIV_ROUND_UP((us), DWC2_US_PER_SLICE) * \
661 DWC2_US_PER_SLICE)
662
663#define DWC2_LS_PERIODIC_US_PER_FRAME \
664 900
665#define DWC2_LS_PERIODIC_SLICES_PER_FRAME \
666 (DWC2_LS_PERIODIC_US_PER_FRAME / \
667 DWC2_US_PER_SLICE)
668
669#define DWC2_LS_SCHEDULE_FRAMES 1
670#define DWC2_LS_SCHEDULE_SLICES (DWC2_LS_SCHEDULE_FRAMES * \
671 DWC2_LS_PERIODIC_SLICES_PER_FRAME)
672
575/** 673/**
576 * struct dwc2_hsotg - Holds the state of the driver, including the non-periodic 674 * struct dwc2_hsotg - Holds the state of the driver, including the non-periodic
577 * and periodic schedules 675 * and periodic schedules
@@ -657,11 +755,14 @@ struct dwc2_hregs_backup {
657 * periodic_sched_ready because it must be rescheduled for 755 * periodic_sched_ready because it must be rescheduled for
658 * the next frame. Otherwise, the item moves to 756 * the next frame. Otherwise, the item moves to
659 * periodic_sched_inactive. 757 * periodic_sched_inactive.
758 * @split_order: List keeping track of channels doing splits, in order.
660 * @periodic_usecs: Total bandwidth claimed so far for periodic transfers. 759 * @periodic_usecs: Total bandwidth claimed so far for periodic transfers.
661 * This value is in microseconds per (micro)frame. The 760 * This value is in microseconds per (micro)frame. The
662 * assumption is that all periodic transfers may occur in 761 * assumption is that all periodic transfers may occur in
663 * the same (micro)frame. 762 * the same (micro)frame.
664 * @frame_usecs: Internal variable used by the microframe scheduler 763 * @hs_periodic_bitmap: Bitmap used by the microframe scheduler any time the
764 * host is in high speed mode; low speed schedules are
765 * stored elsewhere since we need one per TT.
665 * @frame_number: Frame number read from the core at SOF. The value ranges 766 * @frame_number: Frame number read from the core at SOF. The value ranges
666 * from 0 to HFNUM_MAX_FRNUM. 767 * from 0 to HFNUM_MAX_FRNUM.
667 * @periodic_qh_count: Count of periodic QHs, if using several eps. Used for 768 * @periodic_qh_count: Count of periodic QHs, if using several eps. Used for
@@ -780,16 +881,19 @@ struct dwc2_hsotg {
780 struct list_head periodic_sched_ready; 881 struct list_head periodic_sched_ready;
781 struct list_head periodic_sched_assigned; 882 struct list_head periodic_sched_assigned;
782 struct list_head periodic_sched_queued; 883 struct list_head periodic_sched_queued;
884 struct list_head split_order;
783 u16 periodic_usecs; 885 u16 periodic_usecs;
784 u16 frame_usecs[8]; 886 unsigned long hs_periodic_bitmap[
887 DIV_ROUND_UP(DWC2_HS_SCHEDULE_US, BITS_PER_LONG)];
785 u16 frame_number; 888 u16 frame_number;
786 u16 periodic_qh_count; 889 u16 periodic_qh_count;
787 bool bus_suspended; 890 bool bus_suspended;
788 bool new_connection; 891 bool new_connection;
789 892
893 u16 last_frame_num;
894
790#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 895#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
791#define FRAME_NUM_ARRAY_SIZE 1000 896#define FRAME_NUM_ARRAY_SIZE 1000
792 u16 last_frame_num;
793 u16 *frame_num_array; 897 u16 *frame_num_array;
794 u16 *last_frame_num_array; 898 u16 *last_frame_num_array;
795 int frame_num_idx; 899 int frame_num_idx;
@@ -885,34 +989,11 @@ enum dwc2_halt_status {
885 */ 989 */
886extern int dwc2_core_reset(struct dwc2_hsotg *hsotg); 990extern int dwc2_core_reset(struct dwc2_hsotg *hsotg);
887extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg); 991extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg);
888extern void dwc2_core_host_init(struct dwc2_hsotg *hsotg);
889extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg); 992extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg);
890extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore); 993extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore);
891 994
892void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg); 995void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
893 996
894/*
895 * Host core Functions.
896 * The following functions support managing the DWC_otg controller in host
897 * mode.
898 */
899extern void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
900extern void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
901 enum dwc2_halt_status halt_status);
902extern void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg,
903 struct dwc2_host_chan *chan);
904extern void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
905 struct dwc2_host_chan *chan);
906extern void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
907 struct dwc2_host_chan *chan);
908extern int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
909 struct dwc2_host_chan *chan);
910extern void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
911 struct dwc2_host_chan *chan);
912extern void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg);
913extern void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg);
914
915extern u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
916extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg); 997extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
917 998
918/* 999/*
@@ -924,7 +1005,6 @@ extern void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes);
924extern void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num); 1005extern void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num);
925extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg); 1006extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg);
926 1007
927extern int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup);
928extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd); 1008extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd);
929extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd); 1009extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
930 1010
@@ -1191,6 +1271,8 @@ extern void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
1191extern void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); 1271extern void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
1192extern int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); 1272extern int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
1193#define dwc2_is_device_connected(hsotg) (hsotg->connected) 1273#define dwc2_is_device_connected(hsotg) (hsotg->connected)
1274int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
1275int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg);
1194#else 1276#else
1195static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2) 1277static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2)
1196{ return 0; } 1278{ return 0; }
@@ -1208,22 +1290,37 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
1208 int testmode) 1290 int testmode)
1209{ return 0; } 1291{ return 0; }
1210#define dwc2_is_device_connected(hsotg) (0) 1292#define dwc2_is_device_connected(hsotg) (0)
1293static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
1294{ return 0; }
1295static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
1296{ return 0; }
1211#endif 1297#endif
1212 1298
1213#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1299#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
1214extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg); 1300extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
1301extern int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us);
1215extern void dwc2_hcd_connect(struct dwc2_hsotg *hsotg); 1302extern void dwc2_hcd_connect(struct dwc2_hsotg *hsotg);
1216extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force); 1303extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force);
1217extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg); 1304extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
1305int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg);
1306int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg);
1218#else 1307#else
1219static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) 1308static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
1220{ return 0; } 1309{ return 0; }
1310static inline int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg,
1311 int us)
1312{ return 0; }
1221static inline void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) {} 1313static inline void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) {}
1222static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {} 1314static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {}
1223static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {} 1315static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {}
1224static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {} 1316static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {}
1225static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) 1317static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
1226{ return 0; } 1318{ return 0; }
1319static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
1320{ return 0; }
1321static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
1322{ return 0; }
1323
1227#endif 1324#endif
1228 1325
1229#endif /* __DWC2_CORE_H__ */ 1326#endif /* __DWC2_CORE_H__ */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 422ab7da4eb5..e9940dd004e4 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3668,3 +3668,105 @@ int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
3668 3668
3669 return 0; 3669 return 0;
3670} 3670}
3671
3672/**
3673 * dwc2_backup_device_registers() - Backup controller device registers.
3674 * When suspending usb bus, registers needs to be backuped
3675 * if controller power is disabled once suspended.
3676 *
3677 * @hsotg: Programming view of the DWC_otg controller
3678 */
3679int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
3680{
3681 struct dwc2_dregs_backup *dr;
3682 int i;
3683
3684 dev_dbg(hsotg->dev, "%s\n", __func__);
3685
3686 /* Backup dev regs */
3687 dr = &hsotg->dr_backup;
3688
3689 dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
3690 dr->dctl = dwc2_readl(hsotg->regs + DCTL);
3691 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
3692 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
3693 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
3694
3695 for (i = 0; i < hsotg->num_of_eps; i++) {
3696 /* Backup IN EPs */
3697 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
3698
3699 /* Ensure DATA PID is correctly configured */
3700 if (dr->diepctl[i] & DXEPCTL_DPID)
3701 dr->diepctl[i] |= DXEPCTL_SETD1PID;
3702 else
3703 dr->diepctl[i] |= DXEPCTL_SETD0PID;
3704
3705 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
3706 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
3707
3708 /* Backup OUT EPs */
3709 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
3710
3711 /* Ensure DATA PID is correctly configured */
3712 if (dr->doepctl[i] & DXEPCTL_DPID)
3713 dr->doepctl[i] |= DXEPCTL_SETD1PID;
3714 else
3715 dr->doepctl[i] |= DXEPCTL_SETD0PID;
3716
3717 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
3718 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
3719 }
3720 dr->valid = true;
3721 return 0;
3722}
3723
3724/**
3725 * dwc2_restore_device_registers() - Restore controller device registers.
3726 * When resuming usb bus, device registers needs to be restored
3727 * if controller power were disabled.
3728 *
3729 * @hsotg: Programming view of the DWC_otg controller
3730 */
3731int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
3732{
3733 struct dwc2_dregs_backup *dr;
3734 u32 dctl;
3735 int i;
3736
3737 dev_dbg(hsotg->dev, "%s\n", __func__);
3738
3739 /* Restore dev regs */
3740 dr = &hsotg->dr_backup;
3741 if (!dr->valid) {
3742 dev_err(hsotg->dev, "%s: no device registers to restore\n",
3743 __func__);
3744 return -EINVAL;
3745 }
3746 dr->valid = false;
3747
3748 dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
3749 dwc2_writel(dr->dctl, hsotg->regs + DCTL);
3750 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
3751 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
3752 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
3753
3754 for (i = 0; i < hsotg->num_of_eps; i++) {
3755 /* Restore IN EPs */
3756 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
3757 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
3758 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
3759
3760 /* Restore OUT EPs */
3761 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
3762 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
3763 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
3764 }
3765
3766 /* Set the Power-On Programming done bit */
3767 dctl = dwc2_readl(hsotg->regs + DCTL);
3768 dctl |= DCTL_PWRONPRGDONE;
3769 dwc2_writel(dctl, hsotg->regs + DCTL);
3770
3771 return 0;
3772}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 8847c72e55f6..1f6255131857 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -54,6 +54,535 @@
54#include "core.h" 54#include "core.h"
55#include "hcd.h" 55#include "hcd.h"
56 56
57/*
58 * =========================================================================
59 * Host Core Layer Functions
60 * =========================================================================
61 */
62
63/**
64 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
65 * used in both device and host modes
66 *
67 * @hsotg: Programming view of the DWC_otg controller
68 */
69static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
70{
71 u32 intmsk;
72
73 /* Clear any pending OTG Interrupts */
74 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
75
76 /* Clear any pending interrupts */
77 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
78
79 /* Enable the interrupts in the GINTMSK */
80 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
81
82 if (hsotg->core_params->dma_enable <= 0)
83 intmsk |= GINTSTS_RXFLVL;
84 if (hsotg->core_params->external_id_pin_ctl <= 0)
85 intmsk |= GINTSTS_CONIDSTSCHNG;
86
87 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
88 GINTSTS_SESSREQINT;
89
90 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
91}
92
93/*
94 * Initializes the FSLSPClkSel field of the HCFG register depending on the
95 * PHY type
96 */
97static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
98{
99 u32 hcfg, val;
100
101 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
102 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
103 hsotg->core_params->ulpi_fs_ls > 0) ||
104 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
105 /* Full speed PHY */
106 val = HCFG_FSLSPCLKSEL_48_MHZ;
107 } else {
108 /* High speed PHY running at full speed or high speed */
109 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
110 }
111
112 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
113 hcfg = dwc2_readl(hsotg->regs + HCFG);
114 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
115 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
116 dwc2_writel(hcfg, hsotg->regs + HCFG);
117}
118
119static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
120{
121 u32 usbcfg, i2cctl;
122 int retval = 0;
123
124 /*
125 * core_init() is now called on every switch so only call the
126 * following for the first time through
127 */
128 if (select_phy) {
129 dev_dbg(hsotg->dev, "FS PHY selected\n");
130
131 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
132 if (!(usbcfg & GUSBCFG_PHYSEL)) {
133 usbcfg |= GUSBCFG_PHYSEL;
134 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
135
136 /* Reset after a PHY select */
137 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
138
139 if (retval) {
140 dev_err(hsotg->dev,
141 "%s: Reset failed, aborting", __func__);
142 return retval;
143 }
144 }
145 }
146
147 /*
148 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
149 * do this on HNP Dev/Host mode switches (done in dev_init and
150 * host_init).
151 */
152 if (dwc2_is_host_mode(hsotg))
153 dwc2_init_fs_ls_pclk_sel(hsotg);
154
155 if (hsotg->core_params->i2c_enable > 0) {
156 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
157
158 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
159 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
160 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
161 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
162
163 /* Program GI2CCTL.I2CEn */
164 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
165 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
166 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
167 i2cctl &= ~GI2CCTL_I2CEN;
168 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
169 i2cctl |= GI2CCTL_I2CEN;
170 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
171 }
172
173 return retval;
174}
175
176static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
177{
178 u32 usbcfg, usbcfg_old;
179 int retval = 0;
180
181 if (!select_phy)
182 return 0;
183
184 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
185 usbcfg_old = usbcfg;
186
187 /*
188 * HS PHY parameters. These parameters are preserved during soft reset
189 * so only program the first time. Do a soft reset immediately after
190 * setting phyif.
191 */
192 switch (hsotg->core_params->phy_type) {
193 case DWC2_PHY_TYPE_PARAM_ULPI:
194 /* ULPI interface */
195 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
196 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
197 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
198 if (hsotg->core_params->phy_ulpi_ddr > 0)
199 usbcfg |= GUSBCFG_DDRSEL;
200 break;
201 case DWC2_PHY_TYPE_PARAM_UTMI:
202 /* UTMI+ interface */
203 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
204 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
205 if (hsotg->core_params->phy_utmi_width == 16)
206 usbcfg |= GUSBCFG_PHYIF16;
207 break;
208 default:
209 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
210 break;
211 }
212
213 if (usbcfg != usbcfg_old) {
214 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
215
216 /* Reset after setting the PHY parameters */
217 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
218 if (retval) {
219 dev_err(hsotg->dev,
220 "%s: Reset failed, aborting", __func__);
221 return retval;
222 }
223 }
224
225 return retval;
226}
227
228static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
229{
230 u32 usbcfg;
231 int retval = 0;
232
233 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
234 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
235 /* If FS mode with FS PHY */
236 retval = dwc2_fs_phy_init(hsotg, select_phy);
237 if (retval)
238 return retval;
239 } else {
240 /* High speed PHY */
241 retval = dwc2_hs_phy_init(hsotg, select_phy);
242 if (retval)
243 return retval;
244 }
245
246 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
247 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
248 hsotg->core_params->ulpi_fs_ls > 0) {
249 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
250 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
251 usbcfg |= GUSBCFG_ULPI_FS_LS;
252 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
253 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
254 } else {
255 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
256 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
257 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
258 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
259 }
260
261 return retval;
262}
263
264static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
265{
266 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
267
268 switch (hsotg->hw_params.arch) {
269 case GHWCFG2_EXT_DMA_ARCH:
270 dev_err(hsotg->dev, "External DMA Mode not supported\n");
271 return -EINVAL;
272
273 case GHWCFG2_INT_DMA_ARCH:
274 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
275 if (hsotg->core_params->ahbcfg != -1) {
276 ahbcfg &= GAHBCFG_CTRL_MASK;
277 ahbcfg |= hsotg->core_params->ahbcfg &
278 ~GAHBCFG_CTRL_MASK;
279 }
280 break;
281
282 case GHWCFG2_SLAVE_ONLY_ARCH:
283 default:
284 dev_dbg(hsotg->dev, "Slave Only Mode\n");
285 break;
286 }
287
288 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
289 hsotg->core_params->dma_enable,
290 hsotg->core_params->dma_desc_enable);
291
292 if (hsotg->core_params->dma_enable > 0) {
293 if (hsotg->core_params->dma_desc_enable > 0)
294 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
295 else
296 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
297 } else {
298 dev_dbg(hsotg->dev, "Using Slave mode\n");
299 hsotg->core_params->dma_desc_enable = 0;
300 }
301
302 if (hsotg->core_params->dma_enable > 0)
303 ahbcfg |= GAHBCFG_DMA_EN;
304
305 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
306
307 return 0;
308}
309
310static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
311{
312 u32 usbcfg;
313
314 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
315 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
316
317 switch (hsotg->hw_params.op_mode) {
318 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
319 if (hsotg->core_params->otg_cap ==
320 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
321 usbcfg |= GUSBCFG_HNPCAP;
322 if (hsotg->core_params->otg_cap !=
323 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
324 usbcfg |= GUSBCFG_SRPCAP;
325 break;
326
327 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
328 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
329 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
330 if (hsotg->core_params->otg_cap !=
331 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
332 usbcfg |= GUSBCFG_SRPCAP;
333 break;
334
335 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
336 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
337 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
338 default:
339 break;
340 }
341
342 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
343}
344
345/**
346 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
347 *
348 * @hsotg: Programming view of DWC_otg controller
349 */
350static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
351{
352 u32 intmsk;
353
354 dev_dbg(hsotg->dev, "%s()\n", __func__);
355
356 /* Disable all interrupts */
357 dwc2_writel(0, hsotg->regs + GINTMSK);
358 dwc2_writel(0, hsotg->regs + HAINTMSK);
359
360 /* Enable the common interrupts */
361 dwc2_enable_common_interrupts(hsotg);
362
363 /* Enable host mode interrupts without disturbing common interrupts */
364 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
365 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
366 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
367}
368
369/**
370 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
371 *
372 * @hsotg: Programming view of DWC_otg controller
373 */
374static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
375{
376 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
377
378 /* Disable host mode interrupts without disturbing common interrupts */
379 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
380 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
381 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
382}
383
384/*
385 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
386 * For system that have a total fifo depth that is smaller than the default
387 * RX + TX fifo size.
388 *
389 * @hsotg: Programming view of DWC_otg controller
390 */
391static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
392{
393 struct dwc2_core_params *params = hsotg->core_params;
394 struct dwc2_hw_params *hw = &hsotg->hw_params;
395 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
396
397 total_fifo_size = hw->total_fifo_size;
398 rxfsiz = params->host_rx_fifo_size;
399 nptxfsiz = params->host_nperio_tx_fifo_size;
400 ptxfsiz = params->host_perio_tx_fifo_size;
401
402 /*
403 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
404 * allocation with support for high bandwidth endpoints. Synopsys
405 * defines MPS(Max Packet size) for a periodic EP=1024, and for
406 * non-periodic as 512.
407 */
408 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
409 /*
410 * For Buffer DMA mode/Scatter Gather DMA mode
411 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
412 * with n = number of host channel.
413 * 2 * ((1024/4) + 2) = 516
414 */
415 rxfsiz = 516 + hw->host_channels;
416
417 /*
418 * min non-periodic tx fifo depth
419 * 2 * (largest non-periodic USB packet used / 4)
420 * 2 * (512/4) = 256
421 */
422 nptxfsiz = 256;
423
424 /*
425 * min periodic tx fifo depth
426 * (largest packet size*MC)/4
427 * (1024 * 3)/4 = 768
428 */
429 ptxfsiz = 768;
430
431 params->host_rx_fifo_size = rxfsiz;
432 params->host_nperio_tx_fifo_size = nptxfsiz;
433 params->host_perio_tx_fifo_size = ptxfsiz;
434 }
435
436 /*
437 * If the summation of RX, NPTX and PTX fifo sizes is still
438 * bigger than the total_fifo_size, then we have a problem.
439 *
440 * We won't be able to allocate as many endpoints. Right now,
441 * we're just printing an error message, but ideally this FIFO
442 * allocation algorithm would be improved in the future.
443 *
444 * FIXME improve this FIFO allocation algorithm.
445 */
446 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
447 dev_err(hsotg->dev, "invalid fifo sizes\n");
448}
449
450static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
451{
452 struct dwc2_core_params *params = hsotg->core_params;
453 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
454
455 if (!params->enable_dynamic_fifo)
456 return;
457
458 dwc2_calculate_dynamic_fifo(hsotg);
459
460 /* Rx FIFO */
461 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
462 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
463 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
464 grxfsiz |= params->host_rx_fifo_size <<
465 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
466 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
467 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
468 dwc2_readl(hsotg->regs + GRXFSIZ));
469
470 /* Non-periodic Tx FIFO */
471 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
472 dwc2_readl(hsotg->regs + GNPTXFSIZ));
473 nptxfsiz = params->host_nperio_tx_fifo_size <<
474 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
475 nptxfsiz |= params->host_rx_fifo_size <<
476 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
477 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
478 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
479 dwc2_readl(hsotg->regs + GNPTXFSIZ));
480
481 /* Periodic Tx FIFO */
482 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
483 dwc2_readl(hsotg->regs + HPTXFSIZ));
484 hptxfsiz = params->host_perio_tx_fifo_size <<
485 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
486 hptxfsiz |= (params->host_rx_fifo_size +
487 params->host_nperio_tx_fifo_size) <<
488 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
489 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
490 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
491 dwc2_readl(hsotg->regs + HPTXFSIZ));
492
493 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
494 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
495 /*
496 * Global DFIFOCFG calculation for Host mode -
497 * include RxFIFO, NPTXFIFO and HPTXFIFO
498 */
499 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
500 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
501 dfifocfg |= (params->host_rx_fifo_size +
502 params->host_nperio_tx_fifo_size +
503 params->host_perio_tx_fifo_size) <<
504 GDFIFOCFG_EPINFOBASE_SHIFT &
505 GDFIFOCFG_EPINFOBASE_MASK;
506 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
507 }
508}
509
510/**
511 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
512 * the HFIR register according to PHY type and speed
513 *
514 * @hsotg: Programming view of DWC_otg controller
515 *
516 * NOTE: The caller can modify the value of the HFIR register only after the
517 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
518 * has been set
519 */
520u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
521{
522 u32 usbcfg;
523 u32 hprt0;
524 int clock = 60; /* default value */
525
526 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
527 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
528
529 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
530 !(usbcfg & GUSBCFG_PHYIF16))
531 clock = 60;
532 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
533 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
534 clock = 48;
535 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
536 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
537 clock = 30;
538 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
539 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
540 clock = 60;
541 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
542 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
543 clock = 48;
544 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
545 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
546 clock = 48;
547 if ((usbcfg & GUSBCFG_PHYSEL) &&
548 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
549 clock = 48;
550
551 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
552 /* High speed case */
553 return 125 * clock - 1;
554
555 /* FS/LS case */
556 return 1000 * clock - 1;
557}
558
559/**
560 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
561 * buffer
562 *
563 * @core_if: Programming view of DWC_otg controller
564 * @dest: Destination buffer for the packet
565 * @bytes: Number of bytes to copy to the destination
566 */
567void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
568{
569 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
570 u32 *data_buf = (u32 *)dest;
571 int word_count = (bytes + 3) / 4;
572 int i;
573
574 /*
575 * Todo: Account for the case where dest is not dword aligned. This
576 * requires reading data from the FIFO into a u32 temp buffer, then
577 * moving it into the data buffer.
578 */
579
580 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
581
582 for (i = 0; i < word_count; i++, data_buf++)
583 *data_buf = dwc2_readl(fifo);
584}
585
57/** 586/**
58 * dwc2_dump_channel_info() - Prints the state of a host channel 587 * dwc2_dump_channel_info() - Prints the state of a host channel
59 * 588 *
@@ -77,7 +606,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
77 u32 hc_dma; 606 u32 hc_dma;
78 int i; 607 int i;
79 608
80 if (chan == NULL) 609 if (!chan)
81 return; 610 return;
82 611
83 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 612 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
@@ -120,6 +649,1056 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
120} 649}
121 650
122/* 651/*
652 * =========================================================================
653 * Low Level Host Channel Access Functions
654 * =========================================================================
655 */
656
657static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
658 struct dwc2_host_chan *chan)
659{
660 u32 hcintmsk = HCINTMSK_CHHLTD;
661
662 switch (chan->ep_type) {
663 case USB_ENDPOINT_XFER_CONTROL:
664 case USB_ENDPOINT_XFER_BULK:
665 dev_vdbg(hsotg->dev, "control/bulk\n");
666 hcintmsk |= HCINTMSK_XFERCOMPL;
667 hcintmsk |= HCINTMSK_STALL;
668 hcintmsk |= HCINTMSK_XACTERR;
669 hcintmsk |= HCINTMSK_DATATGLERR;
670 if (chan->ep_is_in) {
671 hcintmsk |= HCINTMSK_BBLERR;
672 } else {
673 hcintmsk |= HCINTMSK_NAK;
674 hcintmsk |= HCINTMSK_NYET;
675 if (chan->do_ping)
676 hcintmsk |= HCINTMSK_ACK;
677 }
678
679 if (chan->do_split) {
680 hcintmsk |= HCINTMSK_NAK;
681 if (chan->complete_split)
682 hcintmsk |= HCINTMSK_NYET;
683 else
684 hcintmsk |= HCINTMSK_ACK;
685 }
686
687 if (chan->error_state)
688 hcintmsk |= HCINTMSK_ACK;
689 break;
690
691 case USB_ENDPOINT_XFER_INT:
692 if (dbg_perio())
693 dev_vdbg(hsotg->dev, "intr\n");
694 hcintmsk |= HCINTMSK_XFERCOMPL;
695 hcintmsk |= HCINTMSK_NAK;
696 hcintmsk |= HCINTMSK_STALL;
697 hcintmsk |= HCINTMSK_XACTERR;
698 hcintmsk |= HCINTMSK_DATATGLERR;
699 hcintmsk |= HCINTMSK_FRMOVRUN;
700
701 if (chan->ep_is_in)
702 hcintmsk |= HCINTMSK_BBLERR;
703 if (chan->error_state)
704 hcintmsk |= HCINTMSK_ACK;
705 if (chan->do_split) {
706 if (chan->complete_split)
707 hcintmsk |= HCINTMSK_NYET;
708 else
709 hcintmsk |= HCINTMSK_ACK;
710 }
711 break;
712
713 case USB_ENDPOINT_XFER_ISOC:
714 if (dbg_perio())
715 dev_vdbg(hsotg->dev, "isoc\n");
716 hcintmsk |= HCINTMSK_XFERCOMPL;
717 hcintmsk |= HCINTMSK_FRMOVRUN;
718 hcintmsk |= HCINTMSK_ACK;
719
720 if (chan->ep_is_in) {
721 hcintmsk |= HCINTMSK_XACTERR;
722 hcintmsk |= HCINTMSK_BBLERR;
723 }
724 break;
725 default:
726 dev_err(hsotg->dev, "## Unknown EP type ##\n");
727 break;
728 }
729
730 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
731 if (dbg_hc(chan))
732 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
733}
734
735static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
736 struct dwc2_host_chan *chan)
737{
738 u32 hcintmsk = HCINTMSK_CHHLTD;
739
740 /*
741 * For Descriptor DMA mode core halts the channel on AHB error.
742 * Interrupt is not required.
743 */
744 if (hsotg->core_params->dma_desc_enable <= 0) {
745 if (dbg_hc(chan))
746 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
747 hcintmsk |= HCINTMSK_AHBERR;
748 } else {
749 if (dbg_hc(chan))
750 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
751 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
752 hcintmsk |= HCINTMSK_XFERCOMPL;
753 }
754
755 if (chan->error_state && !chan->do_split &&
756 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
757 if (dbg_hc(chan))
758 dev_vdbg(hsotg->dev, "setting ACK\n");
759 hcintmsk |= HCINTMSK_ACK;
760 if (chan->ep_is_in) {
761 hcintmsk |= HCINTMSK_DATATGLERR;
762 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
763 hcintmsk |= HCINTMSK_NAK;
764 }
765 }
766
767 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
768 if (dbg_hc(chan))
769 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
770}
771
772static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
773 struct dwc2_host_chan *chan)
774{
775 u32 intmsk;
776
777 if (hsotg->core_params->dma_enable > 0) {
778 if (dbg_hc(chan))
779 dev_vdbg(hsotg->dev, "DMA enabled\n");
780 dwc2_hc_enable_dma_ints(hsotg, chan);
781 } else {
782 if (dbg_hc(chan))
783 dev_vdbg(hsotg->dev, "DMA disabled\n");
784 dwc2_hc_enable_slave_ints(hsotg, chan);
785 }
786
787 /* Enable the top level host channel interrupt */
788 intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
789 intmsk |= 1 << chan->hc_num;
790 dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
791 if (dbg_hc(chan))
792 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
793
794 /* Make sure host channel interrupts are enabled */
795 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
796 intmsk |= GINTSTS_HCHINT;
797 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
798 if (dbg_hc(chan))
799 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
800}
801
802/**
803 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
804 * a specific endpoint
805 *
806 * @hsotg: Programming view of DWC_otg controller
807 * @chan: Information needed to initialize the host channel
808 *
809 * The HCCHARn register is set up with the characteristics specified in chan.
810 * Host channel interrupts that may need to be serviced while this transfer is
811 * in progress are enabled.
812 */
813static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
814{
815 u8 hc_num = chan->hc_num;
816 u32 hcintmsk;
817 u32 hcchar;
818 u32 hcsplt = 0;
819
820 if (dbg_hc(chan))
821 dev_vdbg(hsotg->dev, "%s()\n", __func__);
822
823 /* Clear old interrupt conditions for this host channel */
824 hcintmsk = 0xffffffff;
825 hcintmsk &= ~HCINTMSK_RESERVED14_31;
826 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
827
828 /* Enable channel interrupts required for this transfer */
829 dwc2_hc_enable_ints(hsotg, chan);
830
831 /*
832 * Program the HCCHARn register with the endpoint characteristics for
833 * the current transfer
834 */
835 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
836 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
837 if (chan->ep_is_in)
838 hcchar |= HCCHAR_EPDIR;
839 if (chan->speed == USB_SPEED_LOW)
840 hcchar |= HCCHAR_LSPDDEV;
841 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
842 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
843 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
844 if (dbg_hc(chan)) {
845 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
846 hc_num, hcchar);
847
848 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
849 __func__, hc_num);
850 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
851 chan->dev_addr);
852 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
853 chan->ep_num);
854 dev_vdbg(hsotg->dev, " Is In: %d\n",
855 chan->ep_is_in);
856 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
857 chan->speed == USB_SPEED_LOW);
858 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
859 chan->ep_type);
860 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
861 chan->max_packet);
862 }
863
864 /* Program the HCSPLT register for SPLITs */
865 if (chan->do_split) {
866 if (dbg_hc(chan))
867 dev_vdbg(hsotg->dev,
868 "Programming HC %d with split --> %s\n",
869 hc_num,
870 chan->complete_split ? "CSPLIT" : "SSPLIT");
871 if (chan->complete_split)
872 hcsplt |= HCSPLT_COMPSPLT;
873 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
874 HCSPLT_XACTPOS_MASK;
875 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
876 HCSPLT_HUBADDR_MASK;
877 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
878 HCSPLT_PRTADDR_MASK;
879 if (dbg_hc(chan)) {
880 dev_vdbg(hsotg->dev, " comp split %d\n",
881 chan->complete_split);
882 dev_vdbg(hsotg->dev, " xact pos %d\n",
883 chan->xact_pos);
884 dev_vdbg(hsotg->dev, " hub addr %d\n",
885 chan->hub_addr);
886 dev_vdbg(hsotg->dev, " hub port %d\n",
887 chan->hub_port);
888 dev_vdbg(hsotg->dev, " is_in %d\n",
889 chan->ep_is_in);
890 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
891 chan->max_packet);
892 dev_vdbg(hsotg->dev, " xferlen %d\n",
893 chan->xfer_len);
894 }
895 }
896
897 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
898}
899
900/**
901 * dwc2_hc_halt() - Attempts to halt a host channel
902 *
903 * @hsotg: Controller register interface
904 * @chan: Host channel to halt
905 * @halt_status: Reason for halting the channel
906 *
907 * This function should only be called in Slave mode or to abort a transfer in
908 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
909 * controller halts the channel when the transfer is complete or a condition
910 * occurs that requires application intervention.
911 *
912 * In slave mode, checks for a free request queue entry, then sets the Channel
913 * Enable and Channel Disable bits of the Host Channel Characteristics
914 * register of the specified channel to intiate the halt. If there is no free
915 * request queue entry, sets only the Channel Disable bit of the HCCHARn
916 * register to flush requests for this channel. In the latter case, sets a
917 * flag to indicate that the host channel needs to be halted when a request
918 * queue slot is open.
919 *
920 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
921 * HCCHARn register. The controller ensures there is space in the request
922 * queue before submitting the halt request.
923 *
924 * Some time may elapse before the core flushes any posted requests for this
925 * host channel and halts. The Channel Halted interrupt handler completes the
926 * deactivation of the host channel.
927 */
928void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
929 enum dwc2_halt_status halt_status)
930{
931 u32 nptxsts, hptxsts, hcchar;
932
933 if (dbg_hc(chan))
934 dev_vdbg(hsotg->dev, "%s()\n", __func__);
935 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
936 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
937
938 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
939 halt_status == DWC2_HC_XFER_AHB_ERR) {
940 /*
941 * Disable all channel interrupts except Ch Halted. The QTD
942 * and QH state associated with this transfer has been cleared
943 * (in the case of URB_DEQUEUE), so the channel needs to be
944 * shut down carefully to prevent crashes.
945 */
946 u32 hcintmsk = HCINTMSK_CHHLTD;
947
948 dev_vdbg(hsotg->dev, "dequeue/error\n");
949 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
950
951 /*
952 * Make sure no other interrupts besides halt are currently
953 * pending. Handling another interrupt could cause a crash due
954 * to the QTD and QH state.
955 */
956 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
957
958 /*
959 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
960 * even if the channel was already halted for some other
961 * reason
962 */
963 chan->halt_status = halt_status;
964
965 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
966 if (!(hcchar & HCCHAR_CHENA)) {
967 /*
968 * The channel is either already halted or it hasn't
969 * started yet. In DMA mode, the transfer may halt if
970 * it finishes normally or a condition occurs that
971 * requires driver intervention. Don't want to halt
972 * the channel again. In either Slave or DMA mode,
973 * it's possible that the transfer has been assigned
974 * to a channel, but not started yet when an URB is
975 * dequeued. Don't want to halt a channel that hasn't
976 * started yet.
977 */
978 return;
979 }
980 }
981 if (chan->halt_pending) {
982 /*
983 * A halt has already been issued for this channel. This might
984 * happen when a transfer is aborted by a higher level in
985 * the stack.
986 */
987 dev_vdbg(hsotg->dev,
988 "*** %s: Channel %d, chan->halt_pending already set ***\n",
989 __func__, chan->hc_num);
990 return;
991 }
992
993 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
994
995 /* No need to set the bit in DDMA for disabling the channel */
996 /* TODO check it everywhere channel is disabled */
997 if (hsotg->core_params->dma_desc_enable <= 0) {
998 if (dbg_hc(chan))
999 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1000 hcchar |= HCCHAR_CHENA;
1001 } else {
1002 if (dbg_hc(chan))
1003 dev_dbg(hsotg->dev, "desc DMA enabled\n");
1004 }
1005 hcchar |= HCCHAR_CHDIS;
1006
1007 if (hsotg->core_params->dma_enable <= 0) {
1008 if (dbg_hc(chan))
1009 dev_vdbg(hsotg->dev, "DMA not enabled\n");
1010 hcchar |= HCCHAR_CHENA;
1011
1012 /* Check for space in the request queue to issue the halt */
1013 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1014 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1015 dev_vdbg(hsotg->dev, "control/bulk\n");
1016 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
1017 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1018 dev_vdbg(hsotg->dev, "Disabling channel\n");
1019 hcchar &= ~HCCHAR_CHENA;
1020 }
1021 } else {
1022 if (dbg_perio())
1023 dev_vdbg(hsotg->dev, "isoc/intr\n");
1024 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
1025 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1026 hsotg->queuing_high_bandwidth) {
1027 if (dbg_perio())
1028 dev_vdbg(hsotg->dev, "Disabling channel\n");
1029 hcchar &= ~HCCHAR_CHENA;
1030 }
1031 }
1032 } else {
1033 if (dbg_hc(chan))
1034 dev_vdbg(hsotg->dev, "DMA enabled\n");
1035 }
1036
1037 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1038 chan->halt_status = halt_status;
1039
1040 if (hcchar & HCCHAR_CHENA) {
1041 if (dbg_hc(chan))
1042 dev_vdbg(hsotg->dev, "Channel enabled\n");
1043 chan->halt_pending = 1;
1044 chan->halt_on_queue = 0;
1045 } else {
1046 if (dbg_hc(chan))
1047 dev_vdbg(hsotg->dev, "Channel disabled\n");
1048 chan->halt_on_queue = 1;
1049 }
1050
1051 if (dbg_hc(chan)) {
1052 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1053 chan->hc_num);
1054 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1055 hcchar);
1056 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1057 chan->halt_pending);
1058 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1059 chan->halt_on_queue);
1060 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1061 chan->halt_status);
1062 }
1063}
1064
1065/**
1066 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1067 *
1068 * @hsotg: Programming view of DWC_otg controller
1069 * @chan: Identifies the host channel to clean up
1070 *
1071 * This function is normally called after a transfer is done and the host
1072 * channel is being released
1073 */
1074void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1075{
1076 u32 hcintmsk;
1077
1078 chan->xfer_started = 0;
1079
1080 list_del_init(&chan->split_order_list_entry);
1081
1082 /*
1083 * Clear channel interrupt enables and any unhandled channel interrupt
1084 * conditions
1085 */
1086 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1087 hcintmsk = 0xffffffff;
1088 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1089 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1090}
1091
1092/**
1093 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1094 * which frame a periodic transfer should occur
1095 *
1096 * @hsotg: Programming view of DWC_otg controller
1097 * @chan: Identifies the host channel to set up and its properties
1098 * @hcchar: Current value of the HCCHAR register for the specified host channel
1099 *
1100 * This function has no effect on non-periodic transfers
1101 */
1102static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1103 struct dwc2_host_chan *chan, u32 *hcchar)
1104{
1105 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1106 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1107 int host_speed;
1108 int xfer_ns;
1109 int xfer_us;
1110 int bytes_in_fifo;
1111 u16 fifo_space;
1112 u16 frame_number;
1113 u16 wire_frame;
1114
1115 /*
1116 * Try to figure out if we're an even or odd frame. If we set
1117 * even and the current frame number is even the the transfer
1118 * will happen immediately. Similar if both are odd. If one is
1119 * even and the other is odd then the transfer will happen when
1120 * the frame number ticks.
1121 *
1122 * There's a bit of a balancing act to get this right.
1123 * Sometimes we may want to send data in the current frame (AK
1124 * right away). We might want to do this if the frame number
1125 * _just_ ticked, but we might also want to do this in order
1126 * to continue a split transaction that happened late in a
1127 * microframe (so we didn't know to queue the next transfer
1128 * until the frame number had ticked). The problem is that we
1129 * need a lot of knowledge to know if there's actually still
1130 * time to send things or if it would be better to wait until
1131 * the next frame.
1132 *
1133 * We can look at how much time is left in the current frame
1134 * and make a guess about whether we'll have time to transfer.
1135 * We'll do that.
1136 */
1137
1138 /* Get speed host is running at */
1139 host_speed = (chan->speed != USB_SPEED_HIGH &&
1140 !chan->do_split) ? chan->speed : USB_SPEED_HIGH;
1141
1142 /* See how many bytes are in the periodic FIFO right now */
1143 fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) &
1144 TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
1145 bytes_in_fifo = sizeof(u32) *
1146 (hsotg->core_params->host_perio_tx_fifo_size -
1147 fifo_space);
1148
1149 /*
1150 * Roughly estimate bus time for everything in the periodic
1151 * queue + our new transfer. This is "rough" because we're
1152 * using a function that makes takes into account IN/OUT
1153 * and INT/ISO and we're just slamming in one value for all
1154 * transfers. This should be an over-estimate and that should
1155 * be OK, but we can probably tighten it.
1156 */
1157 xfer_ns = usb_calc_bus_time(host_speed, false, false,
1158 chan->xfer_len + bytes_in_fifo);
1159 xfer_us = NS_TO_US(xfer_ns);
1160
1161 /* See what frame number we'll be at by the time we finish */
1162 frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us);
1163
1164 /* This is when we were scheduled to be on the wire */
1165 wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1);
1166
1167 /*
1168 * If we'd finish _after_ the frame we're scheduled in then
1169 * it's hopeless. Just schedule right away and hope for the
1170 * best. Note that it _might_ be wise to call back into the
1171 * scheduler to pick a better frame, but this is better than
1172 * nothing.
1173 */
1174 if (dwc2_frame_num_gt(frame_number, wire_frame)) {
1175 dwc2_sch_vdbg(hsotg,
1176 "QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
1177 chan->qh, wire_frame, frame_number,
1178 dwc2_frame_num_dec(frame_number,
1179 wire_frame));
1180 wire_frame = frame_number;
1181
1182 /*
1183 * We picked a different frame number; communicate this
1184 * back to the scheduler so it doesn't try to schedule
1185 * another in the same frame.
1186 *
1187 * Remember that next_active_frame is 1 before the wire
1188 * frame.
1189 */
1190 chan->qh->next_active_frame =
1191 dwc2_frame_num_dec(frame_number, 1);
1192 }
1193
1194 if (wire_frame & 1)
1195 *hcchar |= HCCHAR_ODDFRM;
1196 else
1197 *hcchar &= ~HCCHAR_ODDFRM;
1198 }
1199}
1200
1201static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1202{
1203 /* Set up the initial PID for the transfer */
1204 if (chan->speed == USB_SPEED_HIGH) {
1205 if (chan->ep_is_in) {
1206 if (chan->multi_count == 1)
1207 chan->data_pid_start = DWC2_HC_PID_DATA0;
1208 else if (chan->multi_count == 2)
1209 chan->data_pid_start = DWC2_HC_PID_DATA1;
1210 else
1211 chan->data_pid_start = DWC2_HC_PID_DATA2;
1212 } else {
1213 if (chan->multi_count == 1)
1214 chan->data_pid_start = DWC2_HC_PID_DATA0;
1215 else
1216 chan->data_pid_start = DWC2_HC_PID_MDATA;
1217 }
1218 } else {
1219 chan->data_pid_start = DWC2_HC_PID_DATA0;
1220 }
1221}
1222
1223/**
1224 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1225 * the Host Channel
1226 *
1227 * @hsotg: Programming view of DWC_otg controller
1228 * @chan: Information needed to initialize the host channel
1229 *
1230 * This function should only be called in Slave mode. For a channel associated
1231 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1232 * associated with a periodic EP, the periodic Tx FIFO is written.
1233 *
1234 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1235 * the number of bytes written to the Tx FIFO.
1236 */
1237static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1238 struct dwc2_host_chan *chan)
1239{
1240 u32 i;
1241 u32 remaining_count;
1242 u32 byte_count;
1243 u32 dword_count;
1244 u32 __iomem *data_fifo;
1245 u32 *data_buf = (u32 *)chan->xfer_buf;
1246
1247 if (dbg_hc(chan))
1248 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1249
1250 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1251
1252 remaining_count = chan->xfer_len - chan->xfer_count;
1253 if (remaining_count > chan->max_packet)
1254 byte_count = chan->max_packet;
1255 else
1256 byte_count = remaining_count;
1257
1258 dword_count = (byte_count + 3) / 4;
1259
1260 if (((unsigned long)data_buf & 0x3) == 0) {
1261 /* xfer_buf is DWORD aligned */
1262 for (i = 0; i < dword_count; i++, data_buf++)
1263 dwc2_writel(*data_buf, data_fifo);
1264 } else {
1265 /* xfer_buf is not DWORD aligned */
1266 for (i = 0; i < dword_count; i++, data_buf++) {
1267 u32 data = data_buf[0] | data_buf[1] << 8 |
1268 data_buf[2] << 16 | data_buf[3] << 24;
1269 dwc2_writel(data, data_fifo);
1270 }
1271 }
1272
1273 chan->xfer_count += byte_count;
1274 chan->xfer_buf += byte_count;
1275}
1276
1277/**
1278 * dwc2_hc_do_ping() - Starts a PING transfer
1279 *
1280 * @hsotg: Programming view of DWC_otg controller
1281 * @chan: Information needed to initialize the host channel
1282 *
1283 * This function should only be called in Slave mode. The Do Ping bit is set in
1284 * the HCTSIZ register, then the channel is enabled.
1285 */
1286static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
1287 struct dwc2_host_chan *chan)
1288{
1289 u32 hcchar;
1290 u32 hctsiz;
1291
1292 if (dbg_hc(chan))
1293 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1294 chan->hc_num);
1295
1296 hctsiz = TSIZ_DOPNG;
1297 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1298 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1299
1300 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1301 hcchar |= HCCHAR_CHENA;
1302 hcchar &= ~HCCHAR_CHDIS;
1303 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1304}
1305
1306/**
1307 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1308 * channel and starts the transfer
1309 *
1310 * @hsotg: Programming view of DWC_otg controller
1311 * @chan: Information needed to initialize the host channel. The xfer_len value
1312 * may be reduced to accommodate the max widths of the XferSize and
1313 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1314 * changed to reflect the final xfer_len value.
1315 *
1316 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1317 * the caller must ensure that there is sufficient space in the request queue
1318 * and Tx Data FIFO.
1319 *
1320 * For an OUT transfer in Slave mode, it loads a data packet into the
1321 * appropriate FIFO. If necessary, additional data packets are loaded in the
1322 * Host ISR.
1323 *
1324 * For an IN transfer in Slave mode, a data packet is requested. The data
1325 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1326 * additional data packets are requested in the Host ISR.
1327 *
1328 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1329 * register along with a packet count of 1 and the channel is enabled. This
1330 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1331 * simply set to 0 since no data transfer occurs in this case.
1332 *
1333 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1334 * all the information required to perform the subsequent data transfer. In
1335 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1336 * controller performs the entire PING protocol, then starts the data
1337 * transfer.
1338 */
1339static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1340 struct dwc2_host_chan *chan)
1341{
1342 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1343 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1344 u32 hcchar;
1345 u32 hctsiz = 0;
1346 u16 num_packets;
1347 u32 ec_mc;
1348
1349 if (dbg_hc(chan))
1350 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1351
1352 if (chan->do_ping) {
1353 if (hsotg->core_params->dma_enable <= 0) {
1354 if (dbg_hc(chan))
1355 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1356 dwc2_hc_do_ping(hsotg, chan);
1357 chan->xfer_started = 1;
1358 return;
1359 }
1360
1361 if (dbg_hc(chan))
1362 dev_vdbg(hsotg->dev, "ping, DMA\n");
1363
1364 hctsiz |= TSIZ_DOPNG;
1365 }
1366
1367 if (chan->do_split) {
1368 if (dbg_hc(chan))
1369 dev_vdbg(hsotg->dev, "split\n");
1370 num_packets = 1;
1371
1372 if (chan->complete_split && !chan->ep_is_in)
1373 /*
1374 * For CSPLIT OUT Transfer, set the size to 0 so the
1375 * core doesn't expect any data written to the FIFO
1376 */
1377 chan->xfer_len = 0;
1378 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1379 chan->xfer_len = chan->max_packet;
1380 else if (!chan->ep_is_in && chan->xfer_len > 188)
1381 chan->xfer_len = 188;
1382
1383 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1384 TSIZ_XFERSIZE_MASK;
1385
1386 /* For split set ec_mc for immediate retries */
1387 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1388 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1389 ec_mc = 3;
1390 else
1391 ec_mc = 1;
1392 } else {
1393 if (dbg_hc(chan))
1394 dev_vdbg(hsotg->dev, "no split\n");
1395 /*
1396 * Ensure that the transfer length and packet count will fit
1397 * in the widths allocated for them in the HCTSIZn register
1398 */
1399 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1400 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1401 /*
1402 * Make sure the transfer size is no larger than one
1403 * (micro)frame's worth of data. (A check was done
1404 * when the periodic transfer was accepted to ensure
1405 * that a (micro)frame's worth of data can be
1406 * programmed into a channel.)
1407 */
1408 u32 max_periodic_len =
1409 chan->multi_count * chan->max_packet;
1410
1411 if (chan->xfer_len > max_periodic_len)
1412 chan->xfer_len = max_periodic_len;
1413 } else if (chan->xfer_len > max_hc_xfer_size) {
1414 /*
1415 * Make sure that xfer_len is a multiple of max packet
1416 * size
1417 */
1418 chan->xfer_len =
1419 max_hc_xfer_size - chan->max_packet + 1;
1420 }
1421
1422 if (chan->xfer_len > 0) {
1423 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1424 chan->max_packet;
1425 if (num_packets > max_hc_pkt_count) {
1426 num_packets = max_hc_pkt_count;
1427 chan->xfer_len = num_packets * chan->max_packet;
1428 }
1429 } else {
1430 /* Need 1 packet for transfer length of 0 */
1431 num_packets = 1;
1432 }
1433
1434 if (chan->ep_is_in)
1435 /*
1436 * Always program an integral # of max packets for IN
1437 * transfers
1438 */
1439 chan->xfer_len = num_packets * chan->max_packet;
1440
1441 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1442 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1443 /*
1444 * Make sure that the multi_count field matches the
1445 * actual transfer length
1446 */
1447 chan->multi_count = num_packets;
1448
1449 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1450 dwc2_set_pid_isoc(chan);
1451
1452 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1453 TSIZ_XFERSIZE_MASK;
1454
1455 /* The ec_mc gets the multi_count for non-split */
1456 ec_mc = chan->multi_count;
1457 }
1458
1459 chan->start_pkt_count = num_packets;
1460 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1461 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1462 TSIZ_SC_MC_PID_MASK;
1463 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1464 if (dbg_hc(chan)) {
1465 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1466 hctsiz, chan->hc_num);
1467
1468 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1469 chan->hc_num);
1470 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1471 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1472 TSIZ_XFERSIZE_SHIFT);
1473 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1474 (hctsiz & TSIZ_PKTCNT_MASK) >>
1475 TSIZ_PKTCNT_SHIFT);
1476 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1477 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1478 TSIZ_SC_MC_PID_SHIFT);
1479 }
1480
1481 if (hsotg->core_params->dma_enable > 0) {
1482 dwc2_writel((u32)chan->xfer_dma,
1483 hsotg->regs + HCDMA(chan->hc_num));
1484 if (dbg_hc(chan))
1485 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1486 (unsigned long)chan->xfer_dma, chan->hc_num);
1487 }
1488
1489 /* Start the split */
1490 if (chan->do_split) {
1491 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
1492
1493 hcsplt |= HCSPLT_SPLTENA;
1494 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1495 }
1496
1497 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1498 hcchar &= ~HCCHAR_MULTICNT_MASK;
1499 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
1500 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1501
1502 if (hcchar & HCCHAR_CHDIS)
1503 dev_warn(hsotg->dev,
1504 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1505 __func__, chan->hc_num, hcchar);
1506
1507 /* Set host channel enable after all other setup is complete */
1508 hcchar |= HCCHAR_CHENA;
1509 hcchar &= ~HCCHAR_CHDIS;
1510
1511 if (dbg_hc(chan))
1512 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1513 (hcchar & HCCHAR_MULTICNT_MASK) >>
1514 HCCHAR_MULTICNT_SHIFT);
1515
1516 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1517 if (dbg_hc(chan))
1518 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1519 chan->hc_num);
1520
1521 chan->xfer_started = 1;
1522 chan->requests++;
1523
1524 if (hsotg->core_params->dma_enable <= 0 &&
1525 !chan->ep_is_in && chan->xfer_len > 0)
1526 /* Load OUT packet into the appropriate Tx FIFO */
1527 dwc2_hc_write_packet(hsotg, chan);
1528}
1529
1530/**
1531 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1532 * host channel and starts the transfer in Descriptor DMA mode
1533 *
1534 * @hsotg: Programming view of DWC_otg controller
1535 * @chan: Information needed to initialize the host channel
1536 *
1537 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1538 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1539 * with micro-frame bitmap.
1540 *
1541 * Initializes HCDMA register with descriptor list address and CTD value then
1542 * starts the transfer via enabling the channel.
1543 */
1544void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1545 struct dwc2_host_chan *chan)
1546{
1547 u32 hcchar;
1548 u32 hctsiz = 0;
1549
1550 if (chan->do_ping)
1551 hctsiz |= TSIZ_DOPNG;
1552
1553 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1554 dwc2_set_pid_isoc(chan);
1555
1556 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1557 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1558 TSIZ_SC_MC_PID_MASK;
1559
1560 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1561 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1562
1563 /* Non-zero only for high-speed interrupt endpoints */
1564 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1565
1566 if (dbg_hc(chan)) {
1567 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1568 chan->hc_num);
1569 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1570 chan->data_pid_start);
1571 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1572 }
1573
1574 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1575
1576 dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
1577 chan->desc_list_sz, DMA_TO_DEVICE);
1578
1579 dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
1580
1581 if (dbg_hc(chan))
1582 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
1583 &chan->desc_list_addr, chan->hc_num);
1584
1585 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1586 hcchar &= ~HCCHAR_MULTICNT_MASK;
1587 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1588 HCCHAR_MULTICNT_MASK;
1589
1590 if (hcchar & HCCHAR_CHDIS)
1591 dev_warn(hsotg->dev,
1592 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1593 __func__, chan->hc_num, hcchar);
1594
1595 /* Set host channel enable after all other setup is complete */
1596 hcchar |= HCCHAR_CHENA;
1597 hcchar &= ~HCCHAR_CHDIS;
1598
1599 if (dbg_hc(chan))
1600 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1601 (hcchar & HCCHAR_MULTICNT_MASK) >>
1602 HCCHAR_MULTICNT_SHIFT);
1603
1604 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1605 if (dbg_hc(chan))
1606 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1607 chan->hc_num);
1608
1609 chan->xfer_started = 1;
1610 chan->requests++;
1611}
1612
1613/**
1614 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1615 * a previous call to dwc2_hc_start_transfer()
1616 *
1617 * @hsotg: Programming view of DWC_otg controller
1618 * @chan: Information needed to initialize the host channel
1619 *
1620 * The caller must ensure there is sufficient space in the request queue and Tx
1621 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1622 * the controller acts autonomously to complete transfers programmed to a host
1623 * channel.
1624 *
1625 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1626 * if there is any data remaining to be queued. For an IN transfer, another
1627 * data packet is always requested. For the SETUP phase of a control transfer,
1628 * this function does nothing.
1629 *
1630 * Return: 1 if a new request is queued, 0 if no more requests are required
1631 * for this transfer
1632 */
1633static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1634 struct dwc2_host_chan *chan)
1635{
1636 if (dbg_hc(chan))
1637 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1638 chan->hc_num);
1639
1640 if (chan->do_split)
1641 /* SPLITs always queue just once per channel */
1642 return 0;
1643
1644 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1645 /* SETUPs are queued only once since they can't be NAK'd */
1646 return 0;
1647
1648 if (chan->ep_is_in) {
1649 /*
1650 * Always queue another request for other IN transfers. If
1651 * back-to-back INs are issued and NAKs are received for both,
1652 * the driver may still be processing the first NAK when the
1653 * second NAK is received. When the interrupt handler clears
1654 * the NAK interrupt for the first NAK, the second NAK will
1655 * not be seen. So we can't depend on the NAK interrupt
1656 * handler to requeue a NAK'd request. Instead, IN requests
1657 * are issued each time this function is called. When the
1658 * transfer completes, the extra requests for the channel will
1659 * be flushed.
1660 */
1661 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1662
1663 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1664 hcchar |= HCCHAR_CHENA;
1665 hcchar &= ~HCCHAR_CHDIS;
1666 if (dbg_hc(chan))
1667 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1668 hcchar);
1669 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1670 chan->requests++;
1671 return 1;
1672 }
1673
1674 /* OUT transfers */
1675
1676 if (chan->xfer_count < chan->xfer_len) {
1677 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1678 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1679 u32 hcchar = dwc2_readl(hsotg->regs +
1680 HCCHAR(chan->hc_num));
1681
1682 dwc2_hc_set_even_odd_frame(hsotg, chan,
1683 &hcchar);
1684 }
1685
1686 /* Load OUT packet into the appropriate Tx FIFO */
1687 dwc2_hc_write_packet(hsotg, chan);
1688 chan->requests++;
1689 return 1;
1690 }
1691
1692 return 0;
1693}
1694
1695/*
1696 * =========================================================================
1697 * HCD
1698 * =========================================================================
1699 */
1700
1701/*
123 * Processes all the URBs in a single list of QHs. Completes them with 1702 * Processes all the URBs in a single list of QHs. Completes them with
124 * -ETIMEDOUT and frees the QTD. 1703 * -ETIMEDOUT and frees the QTD.
125 * 1704 *
@@ -164,6 +1743,9 @@ static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
164 qtd_list_entry) 1743 qtd_list_entry)
165 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1744 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
166 1745
1746 if (qh->channel && qh->channel->qh == qh)
1747 qh->channel->qh = NULL;
1748
167 spin_unlock_irqrestore(&hsotg->lock, flags); 1749 spin_unlock_irqrestore(&hsotg->lock, flags);
168 dwc2_hcd_qh_free(hsotg, qh); 1750 dwc2_hcd_qh_free(hsotg, qh);
169 spin_lock_irqsave(&hsotg->lock, flags); 1751 spin_lock_irqsave(&hsotg->lock, flags);
@@ -554,7 +2136,12 @@ static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
554 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 2136 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
555 2137
556 ep->hcpriv = NULL; 2138 ep->hcpriv = NULL;
2139
2140 if (qh->channel && qh->channel->qh == qh)
2141 qh->channel->qh = NULL;
2142
557 spin_unlock_irqrestore(&hsotg->lock, flags); 2143 spin_unlock_irqrestore(&hsotg->lock, flags);
2144
558 dwc2_hcd_qh_free(hsotg, qh); 2145 dwc2_hcd_qh_free(hsotg, qh);
559 2146
560 return 0; 2147 return 0;
@@ -580,6 +2167,224 @@ static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
580 return 0; 2167 return 0;
581} 2168}
582 2169
2170/**
2171 * dwc2_core_init() - Initializes the DWC_otg controller registers and
2172 * prepares the core for device mode or host mode operation
2173 *
2174 * @hsotg: Programming view of the DWC_otg controller
2175 * @initial_setup: If true then this is the first init for this instance.
2176 */
2177static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
2178{
2179 u32 usbcfg, otgctl;
2180 int retval;
2181
2182 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
2183
2184 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2185
2186 /* Set ULPI External VBUS bit if needed */
2187 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
2188 if (hsotg->core_params->phy_ulpi_ext_vbus ==
2189 DWC2_PHY_ULPI_EXTERNAL_VBUS)
2190 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
2191
2192 /* Set external TS Dline pulsing bit if needed */
2193 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
2194 if (hsotg->core_params->ts_dline > 0)
2195 usbcfg |= GUSBCFG_TERMSELDLPULSE;
2196
2197 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
2198
2199 /*
2200 * Reset the Controller
2201 *
2202 * We only need to reset the controller if this is a re-init.
2203 * For the first init we know for sure that earlier code reset us (it
2204 * needed to in order to properly detect various parameters).
2205 */
2206 if (!initial_setup) {
2207 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
2208 if (retval) {
2209 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
2210 __func__);
2211 return retval;
2212 }
2213 }
2214
2215 /*
2216 * This needs to happen in FS mode before any other programming occurs
2217 */
2218 retval = dwc2_phy_init(hsotg, initial_setup);
2219 if (retval)
2220 return retval;
2221
2222 /* Program the GAHBCFG Register */
2223 retval = dwc2_gahbcfg_init(hsotg);
2224 if (retval)
2225 return retval;
2226
2227 /* Program the GUSBCFG register */
2228 dwc2_gusbcfg_init(hsotg);
2229
2230 /* Program the GOTGCTL register */
2231 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
2232 otgctl &= ~GOTGCTL_OTGVER;
2233 if (hsotg->core_params->otg_ver > 0)
2234 otgctl |= GOTGCTL_OTGVER;
2235 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
2236 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
2237
2238 /* Clear the SRP success bit for FS-I2c */
2239 hsotg->srp_success = 0;
2240
2241 /* Enable common interrupts */
2242 dwc2_enable_common_interrupts(hsotg);
2243
2244 /*
2245 * Do device or host initialization based on mode during PCD and
2246 * HCD initialization
2247 */
2248 if (dwc2_is_host_mode(hsotg)) {
2249 dev_dbg(hsotg->dev, "Host Mode\n");
2250 hsotg->op_state = OTG_STATE_A_HOST;
2251 } else {
2252 dev_dbg(hsotg->dev, "Device Mode\n");
2253 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
2254 }
2255
2256 return 0;
2257}
2258
2259/**
2260 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
2261 * Host mode
2262 *
2263 * @hsotg: Programming view of DWC_otg controller
2264 *
2265 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
2266 * request queues. Host channels are reset to ensure that they are ready for
2267 * performing transfers.
2268 */
2269static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
2270{
2271 u32 hcfg, hfir, otgctl;
2272
2273 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
2274
2275 /* Restart the Phy Clock */
2276 dwc2_writel(0, hsotg->regs + PCGCTL);
2277
2278 /* Initialize Host Configuration Register */
2279 dwc2_init_fs_ls_pclk_sel(hsotg);
2280 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
2281 hcfg = dwc2_readl(hsotg->regs + HCFG);
2282 hcfg |= HCFG_FSLSSUPP;
2283 dwc2_writel(hcfg, hsotg->regs + HCFG);
2284 }
2285
2286 /*
2287 * This bit allows dynamic reloading of the HFIR register during
2288 * runtime. This bit needs to be programmed during initial configuration
2289 * and its value must not be changed during runtime.
2290 */
2291 if (hsotg->core_params->reload_ctl > 0) {
2292 hfir = dwc2_readl(hsotg->regs + HFIR);
2293 hfir |= HFIR_RLDCTRL;
2294 dwc2_writel(hfir, hsotg->regs + HFIR);
2295 }
2296
2297 if (hsotg->core_params->dma_desc_enable > 0) {
2298 u32 op_mode = hsotg->hw_params.op_mode;
2299
2300 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
2301 !hsotg->hw_params.dma_desc_enable ||
2302 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
2303 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
2304 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
2305 dev_err(hsotg->dev,
2306 "Hardware does not support descriptor DMA mode -\n");
2307 dev_err(hsotg->dev,
2308 "falling back to buffer DMA mode.\n");
2309 hsotg->core_params->dma_desc_enable = 0;
2310 } else {
2311 hcfg = dwc2_readl(hsotg->regs + HCFG);
2312 hcfg |= HCFG_DESCDMA;
2313 dwc2_writel(hcfg, hsotg->regs + HCFG);
2314 }
2315 }
2316
2317 /* Configure data FIFO sizes */
2318 dwc2_config_fifos(hsotg);
2319
2320 /* TODO - check this */
2321 /* Clear Host Set HNP Enable in the OTG Control Register */
2322 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
2323 otgctl &= ~GOTGCTL_HSTSETHNPEN;
2324 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
2325
2326 /* Make sure the FIFOs are flushed */
2327 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
2328 dwc2_flush_rx_fifo(hsotg);
2329
2330 /* Clear Host Set HNP Enable in the OTG Control Register */
2331 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
2332 otgctl &= ~GOTGCTL_HSTSETHNPEN;
2333 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
2334
2335 if (hsotg->core_params->dma_desc_enable <= 0) {
2336 int num_channels, i;
2337 u32 hcchar;
2338
2339 /* Flush out any leftover queued requests */
2340 num_channels = hsotg->core_params->host_channels;
2341 for (i = 0; i < num_channels; i++) {
2342 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
2343 hcchar &= ~HCCHAR_CHENA;
2344 hcchar |= HCCHAR_CHDIS;
2345 hcchar &= ~HCCHAR_EPDIR;
2346 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
2347 }
2348
2349 /* Halt all channels to put them into a known state */
2350 for (i = 0; i < num_channels; i++) {
2351 int count = 0;
2352
2353 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
2354 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
2355 hcchar &= ~HCCHAR_EPDIR;
2356 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
2357 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
2358 __func__, i);
2359 do {
2360 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
2361 if (++count > 1000) {
2362 dev_err(hsotg->dev,
2363 "Unable to clear enable on channel %d\n",
2364 i);
2365 break;
2366 }
2367 udelay(1);
2368 } while (hcchar & HCCHAR_CHENA);
2369 }
2370 }
2371
2372 /* Turn on the vbus power */
2373 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
2374 if (hsotg->op_state == OTG_STATE_A_HOST) {
2375 u32 hprt0 = dwc2_read_hprt0(hsotg);
2376
2377 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
2378 !!(hprt0 & HPRT0_PWR));
2379 if (!(hprt0 & HPRT0_PWR)) {
2380 hprt0 |= HPRT0_PWR;
2381 dwc2_writel(hprt0, hsotg->regs + HPRT0);
2382 }
2383 }
2384
2385 dwc2_enable_host_interrupts(hsotg);
2386}
2387
583/* 2388/*
584 * Initializes dynamic portions of the DWC_otg HCD state 2389 * Initializes dynamic portions of the DWC_otg HCD state
585 * 2390 *
@@ -635,9 +2440,9 @@ static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
635 chan->hub_port = (u8)hub_port; 2440 chan->hub_port = (u8)hub_port;
636} 2441}
637 2442
638static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, 2443static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
639 struct dwc2_host_chan *chan, 2444 struct dwc2_host_chan *chan,
640 struct dwc2_qtd *qtd, void *bufptr) 2445 struct dwc2_qtd *qtd)
641{ 2446{
642 struct dwc2_hcd_urb *urb = qtd->urb; 2447 struct dwc2_hcd_urb *urb = qtd->urb;
643 struct dwc2_hcd_iso_packet_desc *frame_desc; 2448 struct dwc2_hcd_iso_packet_desc *frame_desc;
@@ -657,7 +2462,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
657 else 2462 else
658 chan->xfer_buf = urb->setup_packet; 2463 chan->xfer_buf = urb->setup_packet;
659 chan->xfer_len = 8; 2464 chan->xfer_len = 8;
660 bufptr = NULL;
661 break; 2465 break;
662 2466
663 case DWC2_CONTROL_DATA: 2467 case DWC2_CONTROL_DATA:
@@ -684,7 +2488,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
684 chan->xfer_dma = hsotg->status_buf_dma; 2488 chan->xfer_dma = hsotg->status_buf_dma;
685 else 2489 else
686 chan->xfer_buf = hsotg->status_buf; 2490 chan->xfer_buf = hsotg->status_buf;
687 bufptr = NULL;
688 break; 2491 break;
689 } 2492 }
690 break; 2493 break;
@@ -717,14 +2520,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
717 2520
718 chan->xfer_len = frame_desc->length - qtd->isoc_split_offset; 2521 chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
719 2522
720 /* For non-dword aligned buffers */
721 if (hsotg->core_params->dma_enable > 0 &&
722 (chan->xfer_dma & 0x3))
723 bufptr = (u8 *)urb->buf + frame_desc->offset +
724 qtd->isoc_split_offset;
725 else
726 bufptr = NULL;
727
728 if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) { 2523 if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
729 if (chan->xfer_len <= 188) 2524 if (chan->xfer_len <= 188)
730 chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL; 2525 chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
@@ -733,63 +2528,93 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
733 } 2528 }
734 break; 2529 break;
735 } 2530 }
2531}
2532
2533#define DWC2_USB_DMA_ALIGN 4
2534
2535struct dma_aligned_buffer {
2536 void *kmalloc_ptr;
2537 void *old_xfer_buffer;
2538 u8 data[0];
2539};
2540
2541static void dwc2_free_dma_aligned_buffer(struct urb *urb)
2542{
2543 struct dma_aligned_buffer *temp;
2544
2545 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2546 return;
736 2547
737 return bufptr; 2548 temp = container_of(urb->transfer_buffer,
2549 struct dma_aligned_buffer, data);
2550
2551 if (usb_urb_dir_in(urb))
2552 memcpy(temp->old_xfer_buffer, temp->data,
2553 urb->transfer_buffer_length);
2554 urb->transfer_buffer = temp->old_xfer_buffer;
2555 kfree(temp->kmalloc_ptr);
2556
2557 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
738} 2558}
739 2559
740static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 2560static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
741 struct dwc2_host_chan *chan,
742 struct dwc2_hcd_urb *urb, void *bufptr)
743{ 2561{
744 u32 buf_size; 2562 struct dma_aligned_buffer *temp, *kmalloc_ptr;
745 struct urb *usb_urb; 2563 size_t kmalloc_size;
746 struct usb_hcd *hcd;
747 2564
748 if (!qh->dw_align_buf) { 2565 if (urb->num_sgs || urb->sg ||
749 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) 2566 urb->transfer_buffer_length == 0 ||
750 buf_size = hsotg->core_params->max_transfer_size; 2567 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
751 else 2568 return 0;
752 /* 3072 = 3 max-size Isoc packets */
753 buf_size = 3072;
754 2569
755 qh->dw_align_buf = kmalloc(buf_size, GFP_ATOMIC | GFP_DMA); 2570 /* Allocate a buffer with enough padding for alignment */
756 if (!qh->dw_align_buf) 2571 kmalloc_size = urb->transfer_buffer_length +
757 return -ENOMEM; 2572 sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
758 qh->dw_align_buf_size = buf_size;
759 }
760 2573
761 if (chan->xfer_len) { 2574 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
762 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); 2575 if (!kmalloc_ptr)
763 usb_urb = urb->priv; 2576 return -ENOMEM;
764 2577
765 if (usb_urb) { 2578 /* Position our struct dma_aligned_buffer such that data is aligned */
766 if (usb_urb->transfer_flags & 2579 temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
767 (URB_SETUP_MAP_SINGLE | URB_DMA_MAP_SG | 2580 temp->kmalloc_ptr = kmalloc_ptr;
768 URB_DMA_MAP_PAGE | URB_DMA_MAP_SINGLE)) { 2581 temp->old_xfer_buffer = urb->transfer_buffer;
769 hcd = dwc2_hsotg_to_hcd(hsotg); 2582 if (usb_urb_dir_out(urb))
770 usb_hcd_unmap_urb_for_dma(hcd, usb_urb); 2583 memcpy(temp->data, urb->transfer_buffer,
771 } 2584 urb->transfer_buffer_length);
772 if (!chan->ep_is_in) 2585 urb->transfer_buffer = temp->data;
773 memcpy(qh->dw_align_buf, bufptr,
774 chan->xfer_len);
775 } else {
776 dev_warn(hsotg->dev, "no URB in dwc2_urb\n");
777 }
778 }
779 2586
780 qh->dw_align_buf_dma = dma_map_single(hsotg->dev, 2587 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
781 qh->dw_align_buf, qh->dw_align_buf_size,
782 chan->ep_is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
783 if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
784 dev_err(hsotg->dev, "can't map align_buf\n");
785 chan->align_buf = 0;
786 return -EINVAL;
787 }
788 2588
789 chan->align_buf = qh->dw_align_buf_dma;
790 return 0; 2589 return 0;
791} 2590}
792 2591
2592static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2593 gfp_t mem_flags)
2594{
2595 int ret;
2596
2597 /* We assume setup_dma is always aligned; warn if not */
2598 WARN_ON_ONCE(urb->setup_dma &&
2599 (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1)));
2600
2601 ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags);
2602 if (ret)
2603 return ret;
2604
2605 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2606 if (ret)
2607 dwc2_free_dma_aligned_buffer(urb);
2608
2609 return ret;
2610}
2611
2612static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2613{
2614 usb_hcd_unmap_urb_for_dma(hcd, urb);
2615 dwc2_free_dma_aligned_buffer(urb);
2616}
2617
793/** 2618/**
794 * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host 2619 * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
795 * channel and initializes the host channel to perform the transactions. The 2620 * channel and initializes the host channel to perform the transactions. The
@@ -804,7 +2629,6 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
804 struct dwc2_host_chan *chan; 2629 struct dwc2_host_chan *chan;
805 struct dwc2_hcd_urb *urb; 2630 struct dwc2_hcd_urb *urb;
806 struct dwc2_qtd *qtd; 2631 struct dwc2_qtd *qtd;
807 void *bufptr = NULL;
808 2632
809 if (dbg_qh(qh)) 2633 if (dbg_qh(qh))
810 dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh); 2634 dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
@@ -866,16 +2690,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
866 !dwc2_hcd_is_pipe_in(&urb->pipe_info)) 2690 !dwc2_hcd_is_pipe_in(&urb->pipe_info))
867 urb->actual_length = urb->length; 2691 urb->actual_length = urb->length;
868 2692
869 if (hsotg->core_params->dma_enable > 0) { 2693 if (hsotg->core_params->dma_enable > 0)
870 chan->xfer_dma = urb->dma + urb->actual_length; 2694 chan->xfer_dma = urb->dma + urb->actual_length;
871 2695 else
872 /* For non-dword aligned case */
873 if (hsotg->core_params->dma_desc_enable <= 0 &&
874 (chan->xfer_dma & 0x3))
875 bufptr = (u8 *)urb->buf + urb->actual_length;
876 } else {
877 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; 2696 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
878 }
879 2697
880 chan->xfer_len = urb->length - urb->actual_length; 2698 chan->xfer_len = urb->length - urb->actual_length;
881 chan->xfer_count = 0; 2699 chan->xfer_count = 0;
@@ -887,27 +2705,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
887 chan->do_split = 0; 2705 chan->do_split = 0;
888 2706
889 /* Set the transfer attributes */ 2707 /* Set the transfer attributes */
890 bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr); 2708 dwc2_hc_init_xfer(hsotg, chan, qtd);
891
892 /* Non DWORD-aligned buffer case */
893 if (bufptr) {
894 dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
895 if (dwc2_hc_setup_align_buf(hsotg, qh, chan, urb, bufptr)) {
896 dev_err(hsotg->dev,
897 "%s: Failed to allocate memory to handle non-dword aligned buffer\n",
898 __func__);
899 /* Add channel back to free list */
900 chan->align_buf = 0;
901 chan->multi_count = 0;
902 list_add_tail(&chan->hc_list_entry,
903 &hsotg->free_hc_list);
904 qtd->in_process = 0;
905 qh->channel = NULL;
906 return -ENOMEM;
907 }
908 } else {
909 chan->align_buf = 0;
910 }
911 2709
912 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 2710 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
913 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 2711 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
@@ -968,7 +2766,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
968 * periodic assigned schedule 2766 * periodic assigned schedule
969 */ 2767 */
970 qh_ptr = qh_ptr->next; 2768 qh_ptr = qh_ptr->next;
971 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned); 2769 list_move_tail(&qh->qh_list_entry,
2770 &hsotg->periodic_sched_assigned);
972 ret_val = DWC2_TRANSACTION_PERIODIC; 2771 ret_val = DWC2_TRANSACTION_PERIODIC;
973 } 2772 }
974 2773
@@ -1001,8 +2800,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
1001 * non-periodic active schedule 2800 * non-periodic active schedule
1002 */ 2801 */
1003 qh_ptr = qh_ptr->next; 2802 qh_ptr = qh_ptr->next;
1004 list_move(&qh->qh_list_entry, 2803 list_move_tail(&qh->qh_list_entry,
1005 &hsotg->non_periodic_sched_active); 2804 &hsotg->non_periodic_sched_active);
1006 2805
1007 if (ret_val == DWC2_TRANSACTION_NONE) 2806 if (ret_val == DWC2_TRANSACTION_NONE)
1008 ret_val = DWC2_TRANSACTION_NON_PERIODIC; 2807 ret_val = DWC2_TRANSACTION_NON_PERIODIC;
@@ -1043,6 +2842,11 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
1043{ 2842{
1044 int retval = 0; 2843 int retval = 0;
1045 2844
2845 if (chan->do_split)
2846 /* Put ourselves on the list to keep order straight */
2847 list_move_tail(&chan->split_order_list_entry,
2848 &hsotg->split_order);
2849
1046 if (hsotg->core_params->dma_enable > 0) { 2850 if (hsotg->core_params->dma_enable > 0) {
1047 if (hsotg->core_params->dma_desc_enable > 0) { 2851 if (hsotg->core_params->dma_desc_enable > 0) {
1048 if (!chan->xfer_started || 2852 if (!chan->xfer_started ||
@@ -1102,10 +2906,14 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
1102 u32 fspcavail; 2906 u32 fspcavail;
1103 u32 gintmsk; 2907 u32 gintmsk;
1104 int status; 2908 int status;
1105 int no_queue_space = 0; 2909 bool no_queue_space = false;
1106 int no_fifo_space = 0; 2910 bool no_fifo_space = false;
1107 u32 qspcavail; 2911 u32 qspcavail;
1108 2912
2913 /* If empty list then just adjust interrupt enables */
2914 if (list_empty(&hsotg->periodic_sched_assigned))
2915 goto exit;
2916
1109 if (dbg_perio()) 2917 if (dbg_perio())
1110 dev_vdbg(hsotg->dev, "Queue periodic transactions\n"); 2918 dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
1111 2919
@@ -1175,50 +2983,40 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
1175 * Move the QH from the periodic assigned schedule to 2983 * Move the QH from the periodic assigned schedule to
1176 * the periodic queued schedule 2984 * the periodic queued schedule
1177 */ 2985 */
1178 list_move(&qh->qh_list_entry, 2986 list_move_tail(&qh->qh_list_entry,
1179 &hsotg->periodic_sched_queued); 2987 &hsotg->periodic_sched_queued);
1180 2988
1181 /* done queuing high bandwidth */ 2989 /* done queuing high bandwidth */
1182 hsotg->queuing_high_bandwidth = 0; 2990 hsotg->queuing_high_bandwidth = 0;
1183 } 2991 }
1184 } 2992 }
1185 2993
1186 if (hsotg->core_params->dma_enable <= 0) { 2994exit:
1187 tx_status = dwc2_readl(hsotg->regs + HPTXSTS); 2995 if (no_queue_space || no_fifo_space ||
1188 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 2996 (hsotg->core_params->dma_enable <= 0 &&
1189 TXSTS_QSPCAVAIL_SHIFT; 2997 !list_empty(&hsotg->periodic_sched_assigned))) {
1190 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 2998 /*
1191 TXSTS_FSPCAVAIL_SHIFT; 2999 * May need to queue more transactions as the request
1192 if (dbg_perio()) { 3000 * queue or Tx FIFO empties. Enable the periodic Tx
1193 dev_vdbg(hsotg->dev, 3001 * FIFO empty interrupt. (Always use the half-empty
1194 " P Tx Req Queue Space Avail (after queue): %d\n", 3002 * level to ensure that new requests are loaded as
1195 qspcavail); 3003 * soon as possible.)
1196 dev_vdbg(hsotg->dev, 3004 */
1197 " P Tx FIFO Space Avail (after queue): %d\n", 3005 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
1198 fspcavail); 3006 if (!(gintmsk & GINTSTS_PTXFEMP)) {
1199 }
1200
1201 if (!list_empty(&hsotg->periodic_sched_assigned) ||
1202 no_queue_space || no_fifo_space) {
1203 /*
1204 * May need to queue more transactions as the request
1205 * queue or Tx FIFO empties. Enable the periodic Tx
1206 * FIFO empty interrupt. (Always use the half-empty
1207 * level to ensure that new requests are loaded as
1208 * soon as possible.)
1209 */
1210 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
1211 gintmsk |= GINTSTS_PTXFEMP; 3007 gintmsk |= GINTSTS_PTXFEMP;
1212 dwc2_writel(gintmsk, hsotg->regs + GINTMSK); 3008 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
1213 } else { 3009 }
1214 /* 3010 } else {
1215 * Disable the Tx FIFO empty interrupt since there are 3011 /*
1216 * no more transactions that need to be queued right 3012 * Disable the Tx FIFO empty interrupt since there are
1217 * now. This function is called from interrupt 3013 * no more transactions that need to be queued right
1218 * handlers to queue more transactions as transfer 3014 * now. This function is called from interrupt
1219 * states change. 3015 * handlers to queue more transactions as transfer
1220 */ 3016 * states change.
1221 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 3017 */
3018 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
3019 if (gintmsk & GINTSTS_PTXFEMP) {
1222 gintmsk &= ~GINTSTS_PTXFEMP; 3020 gintmsk &= ~GINTSTS_PTXFEMP;
1223 dwc2_writel(gintmsk, hsotg->regs + GINTMSK); 3021 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
1224 } 3022 }
@@ -1365,9 +3163,8 @@ void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
1365 dev_vdbg(hsotg->dev, "Queue Transactions\n"); 3163 dev_vdbg(hsotg->dev, "Queue Transactions\n");
1366#endif 3164#endif
1367 /* Process host channels associated with periodic transfers */ 3165 /* Process host channels associated with periodic transfers */
1368 if ((tr_type == DWC2_TRANSACTION_PERIODIC || 3166 if (tr_type == DWC2_TRANSACTION_PERIODIC ||
1369 tr_type == DWC2_TRANSACTION_ALL) && 3167 tr_type == DWC2_TRANSACTION_ALL)
1370 !list_empty(&hsotg->periodic_sched_assigned))
1371 dwc2_process_periodic_channels(hsotg); 3168 dwc2_process_periodic_channels(hsotg);
1372 3169
1373 /* Process host channels associated with non-periodic transfers */ 3170 /* Process host channels associated with non-periodic transfers */
@@ -1947,6 +3744,35 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
1947 return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; 3744 return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
1948} 3745}
1949 3746
3747int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us)
3748{
3749 u32 hprt = dwc2_readl(hsotg->regs + HPRT0);
3750 u32 hfir = dwc2_readl(hsotg->regs + HFIR);
3751 u32 hfnum = dwc2_readl(hsotg->regs + HFNUM);
3752 unsigned int us_per_frame;
3753 unsigned int frame_number;
3754 unsigned int remaining;
3755 unsigned int interval;
3756 unsigned int phy_clks;
3757
3758 /* High speed has 125 us per (micro) frame; others are 1 ms per */
3759 us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125;
3760
3761 /* Extract fields */
3762 frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
3763 remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT;
3764 interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT;
3765
3766 /*
3767 * Number of phy clocks since the last tick of the frame number after
3768 * "us" has passed.
3769 */
3770 phy_clks = (interval - remaining) +
3771 DIV_ROUND_UP(interval * us, us_per_frame);
3772
3773 return dwc2_frame_num_inc(frame_number, phy_clks / interval);
3774}
3775
1950int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg) 3776int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
1951{ 3777{
1952 return hsotg->op_state == OTG_STATE_B_HOST; 3778 return hsotg->op_state == OTG_STATE_B_HOST;
@@ -2223,6 +4049,90 @@ void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
2223 *hub_port = urb->dev->ttport; 4049 *hub_port = urb->dev->ttport;
2224} 4050}
2225 4051
4052/**
4053 * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
4054 *
4055 * This will get the dwc2_tt structure (and ttport) associated with the given
4056 * context (which is really just a struct urb pointer).
4057 *
4058 * The first time this is called for a given TT we allocate memory for our
4059 * structure. When everyone is done and has called dwc2_host_put_tt_info()
4060 * then the refcount for the structure will go to 0 and we'll free it.
4061 *
4062 * @hsotg: The HCD state structure for the DWC OTG controller.
4063 * @qh: The QH structure.
4064 * @context: The priv pointer from a struct dwc2_hcd_urb.
4065 * @mem_flags: Flags for allocating memory.
4066 * @ttport: We'll return this device's port number here. That's used to
4067 * reference into the bitmap if we're on a multi_tt hub.
4068 *
4069 * Return: a pointer to a struct dwc2_tt. Don't forget to call
4070 * dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure.
4071 */
4072
4073struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context,
4074 gfp_t mem_flags, int *ttport)
4075{
4076 struct urb *urb = context;
4077 struct dwc2_tt *dwc_tt = NULL;
4078
4079 if (urb->dev->tt) {
4080 *ttport = urb->dev->ttport;
4081
4082 dwc_tt = urb->dev->tt->hcpriv;
4083 if (dwc_tt == NULL) {
4084 size_t bitmap_size;
4085
4086 /*
4087 * For single_tt we need one schedule. For multi_tt
4088 * we need one per port.
4089 */
4090 bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP *
4091 sizeof(dwc_tt->periodic_bitmaps[0]);
4092 if (urb->dev->tt->multi)
4093 bitmap_size *= urb->dev->tt->hub->maxchild;
4094
4095 dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size,
4096 mem_flags);
4097 if (dwc_tt == NULL)
4098 return NULL;
4099
4100 dwc_tt->usb_tt = urb->dev->tt;
4101 dwc_tt->usb_tt->hcpriv = dwc_tt;
4102 }
4103
4104 dwc_tt->refcount++;
4105 }
4106
4107 return dwc_tt;
4108}
4109
4110/**
4111 * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info()
4112 *
4113 * Frees resources allocated by dwc2_host_get_tt_info() if all current holders
4114 * of the structure are done.
4115 *
4116 * It's OK to call this with NULL.
4117 *
4118 * @hsotg: The HCD state structure for the DWC OTG controller.
4119 * @dwc_tt: The pointer returned by dwc2_host_get_tt_info.
4120 */
4121void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt)
4122{
4123 /* Model kfree and make put of NULL a no-op */
4124 if (dwc_tt == NULL)
4125 return;
4126
4127 WARN_ON(dwc_tt->refcount < 1);
4128
4129 dwc_tt->refcount--;
4130 if (!dwc_tt->refcount) {
4131 dwc_tt->usb_tt->hcpriv = NULL;
4132 kfree(dwc_tt);
4133 }
4134}
4135
2226int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) 4136int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
2227{ 4137{
2228 struct urb *urb = context; 4138 struct urb *urb = context;
@@ -2334,9 +4244,7 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
2334 kfree(qtd->urb); 4244 kfree(qtd->urb);
2335 qtd->urb = NULL; 4245 qtd->urb = NULL;
2336 4246
2337 spin_unlock(&hsotg->lock);
2338 usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status); 4247 usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
2339 spin_lock(&hsotg->lock);
2340} 4248}
2341 4249
2342/* 4250/*
@@ -2789,6 +4697,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2789fail3: 4697fail3:
2790 dwc2_urb->priv = NULL; 4698 dwc2_urb->priv = NULL;
2791 usb_hcd_unlink_urb_from_ep(hcd, urb); 4699 usb_hcd_unlink_urb_from_ep(hcd, urb);
4700 if (qh_allocated && qh->channel && qh->channel->qh == qh)
4701 qh->channel->qh = NULL;
2792fail2: 4702fail2:
2793 spin_unlock_irqrestore(&hsotg->lock, flags); 4703 spin_unlock_irqrestore(&hsotg->lock, flags);
2794 urb->hcpriv = NULL; 4704 urb->hcpriv = NULL;
@@ -2955,7 +4865,7 @@ static struct hc_driver dwc2_hc_driver = {
2955 .hcd_priv_size = sizeof(struct wrapper_priv_data), 4865 .hcd_priv_size = sizeof(struct wrapper_priv_data),
2956 4866
2957 .irq = _dwc2_hcd_irq, 4867 .irq = _dwc2_hcd_irq,
2958 .flags = HCD_MEMORY | HCD_USB2, 4868 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
2959 4869
2960 .start = _dwc2_hcd_start, 4870 .start = _dwc2_hcd_start,
2961 .stop = _dwc2_hcd_stop, 4871 .stop = _dwc2_hcd_stop,
@@ -2971,6 +4881,9 @@ static struct hc_driver dwc2_hc_driver = {
2971 4881
2972 .bus_suspend = _dwc2_hcd_suspend, 4882 .bus_suspend = _dwc2_hcd_suspend,
2973 .bus_resume = _dwc2_hcd_resume, 4883 .bus_resume = _dwc2_hcd_resume,
4884
4885 .map_urb_for_dma = dwc2_map_urb_for_dma,
4886 .unmap_urb_for_dma = dwc2_unmap_urb_for_dma,
2974}; 4887};
2975 4888
2976/* 4889/*
@@ -3081,8 +4994,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
3081 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); 4994 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
3082 if (!hsotg->last_frame_num_array) 4995 if (!hsotg->last_frame_num_array)
3083 goto error1; 4996 goto error1;
3084 hsotg->last_frame_num = HFNUM_MAX_FRNUM;
3085#endif 4997#endif
4998 hsotg->last_frame_num = HFNUM_MAX_FRNUM;
3086 4999
3087 /* Check if the bus driver or platform code has setup a dma_mask */ 5000 /* Check if the bus driver or platform code has setup a dma_mask */
3088 if (hsotg->core_params->dma_enable > 0 && 5001 if (hsotg->core_params->dma_enable > 0 &&
@@ -3146,6 +5059,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
3146 INIT_LIST_HEAD(&hsotg->periodic_sched_assigned); 5059 INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
3147 INIT_LIST_HEAD(&hsotg->periodic_sched_queued); 5060 INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
3148 5061
5062 INIT_LIST_HEAD(&hsotg->split_order);
5063
3149 /* 5064 /*
3150 * Create a host channel descriptor for each host channel implemented 5065 * Create a host channel descriptor for each host channel implemented
3151 * in the controller. Initialize the channel descriptor array. 5066 * in the controller. Initialize the channel descriptor array.
@@ -3159,12 +5074,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
3159 if (channel == NULL) 5074 if (channel == NULL)
3160 goto error3; 5075 goto error3;
3161 channel->hc_num = i; 5076 channel->hc_num = i;
5077 INIT_LIST_HEAD(&channel->split_order_list_entry);
3162 hsotg->hc_ptr_array[i] = channel; 5078 hsotg->hc_ptr_array[i] = channel;
3163 } 5079 }
3164 5080
3165 if (hsotg->core_params->uframe_sched > 0)
3166 dwc2_hcd_init_usecs(hsotg);
3167
3168 /* Initialize hsotg start work */ 5081 /* Initialize hsotg start work */
3169 INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func); 5082 INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
3170 5083
@@ -3317,3 +5230,67 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
3317 kfree(hsotg->frame_num_array); 5230 kfree(hsotg->frame_num_array);
3318#endif 5231#endif
3319} 5232}
5233
5234/**
5235 * dwc2_backup_host_registers() - Backup controller host registers.
5236 * When suspending usb bus, registers needs to be backuped
5237 * if controller power is disabled once suspended.
5238 *
5239 * @hsotg: Programming view of the DWC_otg controller
5240 */
5241int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
5242{
5243 struct dwc2_hregs_backup *hr;
5244 int i;
5245
5246 dev_dbg(hsotg->dev, "%s\n", __func__);
5247
5248 /* Backup Host regs */
5249 hr = &hsotg->hr_backup;
5250 hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
5251 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
5252 for (i = 0; i < hsotg->core_params->host_channels; ++i)
5253 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
5254
5255 hr->hprt0 = dwc2_read_hprt0(hsotg);
5256 hr->hfir = dwc2_readl(hsotg->regs + HFIR);
5257 hr->valid = true;
5258
5259 return 0;
5260}
5261
5262/**
5263 * dwc2_restore_host_registers() - Restore controller host registers.
5264 * When resuming usb bus, device registers needs to be restored
5265 * if controller power were disabled.
5266 *
5267 * @hsotg: Programming view of the DWC_otg controller
5268 */
5269int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
5270{
5271 struct dwc2_hregs_backup *hr;
5272 int i;
5273
5274 dev_dbg(hsotg->dev, "%s\n", __func__);
5275
5276 /* Restore host regs */
5277 hr = &hsotg->hr_backup;
5278 if (!hr->valid) {
5279 dev_err(hsotg->dev, "%s: no host registers to restore\n",
5280 __func__);
5281 return -EINVAL;
5282 }
5283 hr->valid = false;
5284
5285 dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
5286 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
5287
5288 for (i = 0; i < hsotg->core_params->host_channels; ++i)
5289 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
5290
5291 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
5292 dwc2_writel(hr->hfir, hsotg->regs + HFIR);
5293 hsotg->frame_number = 0;
5294
5295 return 0;
5296}
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 8f0a29cefdf7..89fa26cb25f4 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -75,8 +75,6 @@ struct dwc2_qh;
75 * (micro)frame 75 * (micro)frame
76 * @xfer_buf: Pointer to current transfer buffer position 76 * @xfer_buf: Pointer to current transfer buffer position
77 * @xfer_dma: DMA address of xfer_buf 77 * @xfer_dma: DMA address of xfer_buf
78 * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
79 * DWORD aligned
80 * @xfer_len: Total number of bytes to transfer 78 * @xfer_len: Total number of bytes to transfer
81 * @xfer_count: Number of bytes transferred so far 79 * @xfer_count: Number of bytes transferred so far
82 * @start_pkt_count: Packet count at start of transfer 80 * @start_pkt_count: Packet count at start of transfer
@@ -108,6 +106,7 @@ struct dwc2_qh;
108 * @hc_list_entry: For linking to list of host channels 106 * @hc_list_entry: For linking to list of host channels
109 * @desc_list_addr: Current QH's descriptor list DMA address 107 * @desc_list_addr: Current QH's descriptor list DMA address
110 * @desc_list_sz: Current QH's descriptor list size 108 * @desc_list_sz: Current QH's descriptor list size
109 * @split_order_list_entry: List entry for keeping track of the order of splits
111 * 110 *
112 * This structure represents the state of a single host channel when acting in 111 * This structure represents the state of a single host channel when acting in
113 * host mode. It contains the data items needed to transfer packets to an 112 * host mode. It contains the data items needed to transfer packets to an
@@ -133,7 +132,6 @@ struct dwc2_host_chan {
133 132
134 u8 *xfer_buf; 133 u8 *xfer_buf;
135 dma_addr_t xfer_dma; 134 dma_addr_t xfer_dma;
136 dma_addr_t align_buf;
137 u32 xfer_len; 135 u32 xfer_len;
138 u32 xfer_count; 136 u32 xfer_count;
139 u16 start_pkt_count; 137 u16 start_pkt_count;
@@ -161,6 +159,7 @@ struct dwc2_host_chan {
161 struct list_head hc_list_entry; 159 struct list_head hc_list_entry;
162 dma_addr_t desc_list_addr; 160 dma_addr_t desc_list_addr;
163 u32 desc_list_sz; 161 u32 desc_list_sz;
162 struct list_head split_order_list_entry;
164}; 163};
165 164
166struct dwc2_hcd_pipe_info { 165struct dwc2_hcd_pipe_info {
@@ -213,9 +212,47 @@ enum dwc2_transaction_type {
213 DWC2_TRANSACTION_ALL, 212 DWC2_TRANSACTION_ALL,
214}; 213};
215 214
215/* The number of elements per LS bitmap (per port on multi_tt) */
216#define DWC2_ELEMENTS_PER_LS_BITMAP DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \
217 BITS_PER_LONG)
218
219/**
220 * struct dwc2_tt - dwc2 data associated with a usb_tt
221 *
222 * @refcount: Number of Queue Heads (QHs) holding a reference.
223 * @usb_tt: Pointer back to the official usb_tt.
224 * @periodic_bitmaps: Bitmap for which parts of the 1ms frame are accounted
225 * for already. Each is DWC2_ELEMENTS_PER_LS_BITMAP
226 * elements (so sizeof(long) times that in bytes).
227 *
228 * This structure is stored in the hcpriv of the official usb_tt.
229 */
230struct dwc2_tt {
231 int refcount;
232 struct usb_tt *usb_tt;
233 unsigned long periodic_bitmaps[];
234};
235
236/**
237 * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus.
238 *
239 * @start_schedule_usecs: The start time on the main bus schedule. Note that
240 * the main bus schedule is tightly packed and this
241 * time should be interpreted as tightly packed (so
242 * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us
243 * instead of 125 us).
244 * @duration_us: How long this transfer goes.
245 */
246
247struct dwc2_hs_transfer_time {
248 u32 start_schedule_us;
249 u16 duration_us;
250};
251
216/** 252/**
217 * struct dwc2_qh - Software queue head structure 253 * struct dwc2_qh - Software queue head structure
218 * 254 *
255 * @hsotg: The HCD state structure for the DWC OTG controller
219 * @ep_type: Endpoint type. One of the following values: 256 * @ep_type: Endpoint type. One of the following values:
220 * - USB_ENDPOINT_XFER_CONTROL 257 * - USB_ENDPOINT_XFER_CONTROL
221 * - USB_ENDPOINT_XFER_BULK 258 * - USB_ENDPOINT_XFER_BULK
@@ -236,17 +273,35 @@ enum dwc2_transaction_type {
236 * @do_split: Full/low speed endpoint on high-speed hub requires split 273 * @do_split: Full/low speed endpoint on high-speed hub requires split
237 * @td_first: Index of first activated isochronous transfer descriptor 274 * @td_first: Index of first activated isochronous transfer descriptor
238 * @td_last: Index of last activated isochronous transfer descriptor 275 * @td_last: Index of last activated isochronous transfer descriptor
239 * @usecs: Bandwidth in microseconds per (micro)frame 276 * @host_us: Bandwidth in microseconds per transfer as seen by host
240 * @interval: Interval between transfers in (micro)frames 277 * @device_us: Bandwidth in microseconds per transfer as seen by device
241 * @sched_frame: (Micro)frame to initialize a periodic transfer. 278 * @host_interval: Interval between transfers as seen by the host. If
242 * The transfer executes in the following (micro)frame. 279 * the host is high speed and the device is low speed this
243 * @frame_usecs: Internal variable used by the microframe scheduler 280 * will be 8 times device interval.
244 * @start_split_frame: (Micro)frame at which last start split was initialized 281 * @device_interval: Interval between transfers as seen by the device.
282 * interval.
283 * @next_active_frame: (Micro)frame _before_ we next need to put something on
284 * the bus. We'll move the qh to active here. If the
285 * host is in high speed mode this will be a uframe. If
286 * the host is in low speed mode this will be a full frame.
287 * @start_active_frame: If we are partway through a split transfer, this will be
288 * what next_active_frame was when we started. Otherwise
289 * it should always be the same as next_active_frame.
290 * @num_hs_transfers: Number of transfers in hs_transfers.
291 * Normally this is 1 but can be more than one for splits.
292 * Always >= 1 unless the host is in low/full speed mode.
293 * @hs_transfers: Transfers that are scheduled as seen by the high speed
294 * bus. Not used if host is in low or full speed mode (but
295 * note that it IS USED if the device is low or full speed
296 * as long as the HOST is in high speed mode).
297 * @ls_start_schedule_slice: Start time (in slices) on the low speed bus
298 * schedule that's being used by this device. This
299 * will be on the periodic_bitmap in a
300 * "struct dwc2_tt". Not used if this device is high
301 * speed. Note that this is in "schedule slice" which
302 * is tightly packed.
303 * @ls_duration_us: Duration on the low speed bus schedule.
245 * @ntd: Actual number of transfer descriptors in a list 304 * @ntd: Actual number of transfer descriptors in a list
246 * @dw_align_buf: Used instead of original buffer if its physical address
247 * is not dword-aligned
248 * @dw_align_buf_size: Size of dw_align_buf
249 * @dw_align_buf_dma: DMA address for dw_align_buf
250 * @qtd_list: List of QTDs for this QH 305 * @qtd_list: List of QTDs for this QH
251 * @channel: Host channel currently processing transfers for this QH 306 * @channel: Host channel currently processing transfers for this QH
252 * @qh_list_entry: Entry for QH in either the periodic or non-periodic 307 * @qh_list_entry: Entry for QH in either the periodic or non-periodic
@@ -257,13 +312,20 @@ enum dwc2_transaction_type {
257 * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer 312 * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
258 * descriptor and indicates original XferSize value for the 313 * descriptor and indicates original XferSize value for the
259 * descriptor 314 * descriptor
315 * @unreserve_timer: Timer for releasing periodic reservation.
316 * @dwc2_tt: Pointer to our tt info (or NULL if no tt).
317 * @ttport: Port number within our tt.
260 * @tt_buffer_dirty True if clear_tt_buffer_complete is pending 318 * @tt_buffer_dirty True if clear_tt_buffer_complete is pending
319 * @unreserve_pending: True if we planned to unreserve but haven't yet.
320 * @schedule_low_speed: True if we have a low/full speed component (either the
321 * host is in low/full speed mode or do_split).
261 * 322 *
262 * A Queue Head (QH) holds the static characteristics of an endpoint and 323 * A Queue Head (QH) holds the static characteristics of an endpoint and
263 * maintains a list of transfers (QTDs) for that endpoint. A QH structure may 324 * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
264 * be entered in either the non-periodic or periodic schedule. 325 * be entered in either the non-periodic or periodic schedule.
265 */ 326 */
266struct dwc2_qh { 327struct dwc2_qh {
328 struct dwc2_hsotg *hsotg;
267 u8 ep_type; 329 u8 ep_type;
268 u8 ep_is_in; 330 u8 ep_is_in;
269 u16 maxp; 331 u16 maxp;
@@ -273,15 +335,16 @@ struct dwc2_qh {
273 u8 do_split; 335 u8 do_split;
274 u8 td_first; 336 u8 td_first;
275 u8 td_last; 337 u8 td_last;
276 u16 usecs; 338 u16 host_us;
277 u16 interval; 339 u16 device_us;
278 u16 sched_frame; 340 u16 host_interval;
279 u16 frame_usecs[8]; 341 u16 device_interval;
280 u16 start_split_frame; 342 u16 next_active_frame;
343 u16 start_active_frame;
344 s16 num_hs_transfers;
345 struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
346 u32 ls_start_schedule_slice;
281 u16 ntd; 347 u16 ntd;
282 u8 *dw_align_buf;
283 int dw_align_buf_size;
284 dma_addr_t dw_align_buf_dma;
285 struct list_head qtd_list; 348 struct list_head qtd_list;
286 struct dwc2_host_chan *channel; 349 struct dwc2_host_chan *channel;
287 struct list_head qh_list_entry; 350 struct list_head qh_list_entry;
@@ -289,7 +352,12 @@ struct dwc2_qh {
289 dma_addr_t desc_list_dma; 352 dma_addr_t desc_list_dma;
290 u32 desc_list_sz; 353 u32 desc_list_sz;
291 u32 *n_bytes; 354 u32 *n_bytes;
355 struct timer_list unreserve_timer;
356 struct dwc2_tt *dwc_tt;
357 int ttport;
292 unsigned tt_buffer_dirty:1; 358 unsigned tt_buffer_dirty:1;
359 unsigned unreserve_pending:1;
360 unsigned schedule_low_speed:1;
293}; 361};
294 362
295/** 363/**
@@ -362,6 +430,8 @@ struct hc_xfer_info {
362}; 430};
363#endif 431#endif
364 432
433u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
434
365/* Gets the struct usb_hcd that contains a struct dwc2_hsotg */ 435/* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
366static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg) 436static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
367{ 437{
@@ -383,6 +453,12 @@ static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
383 dwc2_writel(mask, hsotg->regs + HCINTMSK(chnum)); 453 dwc2_writel(mask, hsotg->regs + HCINTMSK(chnum));
384} 454}
385 455
456void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
457void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
458 enum dwc2_halt_status halt_status);
459void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
460 struct dwc2_host_chan *chan);
461
386/* 462/*
387 * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they 463 * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
388 * are read as 1, they won't clear when written back. 464 * are read as 1, they won't clear when written back.
@@ -456,7 +532,6 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
456 532
457/* Schedule Queue Functions */ 533/* Schedule Queue Functions */
458/* Implemented in hcd_queue.c */ 534/* Implemented in hcd_queue.c */
459extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
460extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 535extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
461 struct dwc2_hcd_urb *urb, 536 struct dwc2_hcd_urb *urb,
462 gfp_t mem_flags); 537 gfp_t mem_flags);
@@ -571,6 +646,11 @@ static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
571 return (frame + inc) & HFNUM_MAX_FRNUM; 646 return (frame + inc) & HFNUM_MAX_FRNUM;
572} 647}
573 648
649static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec)
650{
651 return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM;
652}
653
574static inline u16 dwc2_full_frame_num(u16 frame) 654static inline u16 dwc2_full_frame_num(u16 frame)
575{ 655{
576 return (frame & HFNUM_MAX_FRNUM) >> 3; 656 return (frame & HFNUM_MAX_FRNUM) >> 3;
@@ -648,7 +728,7 @@ static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
648 return 0; 728 return 0;
649 } 729 }
650 730
651 return qh->usecs; 731 return qh->host_us;
652} 732}
653 733
654extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, 734extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
@@ -717,6 +797,12 @@ extern void dwc2_host_start(struct dwc2_hsotg *hsotg);
717extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg); 797extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
718extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, 798extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
719 int *hub_addr, int *hub_port); 799 int *hub_addr, int *hub_port);
800extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
801 void *context, gfp_t mem_flags,
802 int *ttport);
803
804extern void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg,
805 struct dwc2_tt *dwc_tt);
720extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context); 806extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
721extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 807extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
722 int status); 808 int status);
@@ -739,7 +825,7 @@ do { \
739 _qtd_ = list_entry((_qh_)->qtd_list.next, struct dwc2_qtd, \ 825 _qtd_ = list_entry((_qh_)->qtd_list.next, struct dwc2_qtd, \
740 qtd_list_entry); \ 826 qtd_list_entry); \
741 if (usb_pipeint(_qtd_->urb->pipe) && \ 827 if (usb_pipeint(_qtd_->urb->pipe) && \
742 (_qh_)->start_split_frame != 0 && !_qtd_->complete_split) { \ 828 (_qh_)->start_active_frame != 0 && !_qtd_->complete_split) { \
743 _hfnum_.d32 = dwc2_readl((_hcd_)->regs + HFNUM); \ 829 _hfnum_.d32 = dwc2_readl((_hcd_)->regs + HFNUM); \
744 switch (_hfnum_.b.frnum & 0x7) { \ 830 switch (_hfnum_.b.frnum & 0x7) { \
745 case 7: \ 831 case 7: \
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index a41274aa52ad..0e1d42b5dec5 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -81,7 +81,7 @@ static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
81static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) 81static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
82{ 82{
83 return qh->dev_speed == USB_SPEED_HIGH ? 83 return qh->dev_speed == USB_SPEED_HIGH ?
84 (qh->interval + 8 - 1) / 8 : qh->interval; 84 (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
85} 85}
86 86
87static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 87static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
@@ -111,7 +111,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
111 dma_unmap_single(hsotg->dev, qh->desc_list_dma, 111 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
112 qh->desc_list_sz, 112 qh->desc_list_sz,
113 DMA_FROM_DEVICE); 113 DMA_FROM_DEVICE);
114 kfree(qh->desc_list); 114 kmem_cache_free(desc_cache, qh->desc_list);
115 qh->desc_list = NULL; 115 qh->desc_list = NULL;
116 return -ENOMEM; 116 return -ENOMEM;
117 } 117 }
@@ -252,7 +252,7 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
252 chan = qh->channel; 252 chan = qh->channel;
253 inc = dwc2_frame_incr_val(qh); 253 inc = dwc2_frame_incr_val(qh);
254 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) 254 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
255 i = dwc2_frame_list_idx(qh->sched_frame); 255 i = dwc2_frame_list_idx(qh->next_active_frame);
256 else 256 else
257 i = 0; 257 i = 0;
258 258
@@ -278,13 +278,13 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
278 return; 278 return;
279 279
280 chan->schinfo = 0; 280 chan->schinfo = 0;
281 if (chan->speed == USB_SPEED_HIGH && qh->interval) { 281 if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
282 j = 1; 282 j = 1;
283 /* TODO - check this */ 283 /* TODO - check this */
284 inc = (8 + qh->interval - 1) / qh->interval; 284 inc = (8 + qh->host_interval - 1) / qh->host_interval;
285 for (i = 0; i < inc; i++) { 285 for (i = 0; i < inc; i++) {
286 chan->schinfo |= j; 286 chan->schinfo |= j;
287 j = j << qh->interval; 287 j = j << qh->host_interval;
288 } 288 }
289 } else { 289 } else {
290 chan->schinfo = 0xff; 290 chan->schinfo = 0xff;
@@ -431,7 +431,10 @@ static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
431 431
432 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 432 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
433 433
434 /* sched_frame is always frame number (not uFrame) both in FS and HS! */ 434 /*
435 * next_active_frame is always frame number (not uFrame) both in FS
436 * and HS!
437 */
435 438
436 /* 439 /*
437 * skip_frames is used to limit activated descriptors number 440 * skip_frames is used to limit activated descriptors number
@@ -514,13 +517,13 @@ static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
514 */ 517 */
515 fr_idx_tmp = dwc2_frame_list_idx(frame); 518 fr_idx_tmp = dwc2_frame_list_idx(frame);
516 fr_idx = (FRLISTEN_64_SIZE + 519 fr_idx = (FRLISTEN_64_SIZE +
517 dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) 520 dwc2_frame_list_idx(qh->next_active_frame) -
518 % dwc2_frame_incr_val(qh); 521 fr_idx_tmp) % dwc2_frame_incr_val(qh);
519 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; 522 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
520 } else { 523 } else {
521 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, 524 qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
522 &skip_frames); 525 &skip_frames);
523 fr_idx = dwc2_frame_list_idx(qh->sched_frame); 526 fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
524 } 527 }
525 528
526 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); 529 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
@@ -583,7 +586,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
583 u16 next_idx; 586 u16 next_idx;
584 587
585 idx = qh->td_last; 588 idx = qh->td_last;
586 inc = qh->interval; 589 inc = qh->host_interval;
587 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 590 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
588 cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 591 cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
589 next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); 592 next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
@@ -605,11 +608,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
605 } 608 }
606 } 609 }
607 610
608 if (qh->interval) { 611 if (qh->host_interval) {
609 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / 612 ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
610 qh->interval; 613 qh->host_interval;
611 if (skip_frames && !qh->channel) 614 if (skip_frames && !qh->channel)
612 ntd_max -= skip_frames / qh->interval; 615 ntd_max -= skip_frames / qh->host_interval;
613 } 616 }
614 617
615 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? 618 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
@@ -1029,7 +1032,7 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1029 idx); 1032 idx);
1030 if (rc < 0) 1033 if (rc < 0)
1031 return; 1034 return;
1032 idx = dwc2_desclist_idx_inc(idx, qh->interval, 1035 idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
1033 chan->speed); 1036 chan->speed);
1034 if (!rc) 1037 if (!rc)
1035 continue; 1038 continue;
@@ -1039,7 +1042,7 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1039 1042
1040 /* rc == DWC2_CMPL_STOP */ 1043 /* rc == DWC2_CMPL_STOP */
1041 1044
1042 if (qh->interval >= 32) 1045 if (qh->host_interval >= 32)
1043 goto stop_scan; 1046 goto stop_scan;
1044 1047
1045 qh->td_first = idx; 1048 qh->td_first = idx;
@@ -1242,8 +1245,10 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1242 for (i = 0; i < qtd_desc_count; i++) { 1245 for (i = 0; i < qtd_desc_count; i++) {
1243 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1246 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1244 desc_num, halt_status, 1247 desc_num, halt_status,
1245 &xfer_done)) 1248 &xfer_done)) {
1249 qtd = NULL;
1246 goto stop_scan; 1250 goto stop_scan;
1251 }
1247 1252
1248 desc_num++; 1253 desc_num++;
1249 } 1254 }
@@ -1258,7 +1263,7 @@ stop_scan:
1258 if (halt_status == DWC2_HC_XFER_STALL) 1263 if (halt_status == DWC2_HC_XFER_STALL)
1259 qh->data_toggle = DWC2_HC_PID_DATA0; 1264 qh->data_toggle = DWC2_HC_PID_DATA0;
1260 else 1265 else
1261 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1266 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
1262 } 1267 }
1263 1268
1264 if (halt_status == DWC2_HC_XFER_COMPLETE) { 1269 if (halt_status == DWC2_HC_XFER_COMPLETE) {
@@ -1326,8 +1331,8 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1326 dwc2_hcd_qh_unlink(hsotg, qh); 1331 dwc2_hcd_qh_unlink(hsotg, qh);
1327 } else { 1332 } else {
1328 /* Keep in assigned schedule to continue transfer */ 1333 /* Keep in assigned schedule to continue transfer */
1329 list_move(&qh->qh_list_entry, 1334 list_move_tail(&qh->qh_list_entry,
1330 &hsotg->periodic_sched_assigned); 1335 &hsotg->periodic_sched_assigned);
1331 /* 1336 /*
1332 * If channel has been halted during giveback of urb 1337 * If channel has been halted during giveback of urb
1333 * then prevent any new scheduling. 1338 * then prevent any new scheduling.
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index cadba8b13c48..906f223542ee 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -55,12 +55,16 @@
55/* This function is for debug only */ 55/* This function is for debug only */
56static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) 56static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
57{ 57{
58#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
59 u16 curr_frame_number = hsotg->frame_number; 58 u16 curr_frame_number = hsotg->frame_number;
59 u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1);
60
61 if (expected != curr_frame_number)
62 dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n",
63 expected, curr_frame_number);
60 64
65#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
61 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { 66 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
62 if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) != 67 if (expected != curr_frame_number) {
63 curr_frame_number) {
64 hsotg->frame_num_array[hsotg->frame_num_idx] = 68 hsotg->frame_num_array[hsotg->frame_num_idx] =
65 curr_frame_number; 69 curr_frame_number;
66 hsotg->last_frame_num_array[hsotg->frame_num_idx] = 70 hsotg->last_frame_num_array[hsotg->frame_num_idx] =
@@ -79,14 +83,15 @@ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
79 } 83 }
80 hsotg->dumped_frame_num_array = 1; 84 hsotg->dumped_frame_num_array = 1;
81 } 85 }
82 hsotg->last_frame_num = curr_frame_number;
83#endif 86#endif
87 hsotg->last_frame_num = curr_frame_number;
84} 88}
85 89
86static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg, 90static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
87 struct dwc2_host_chan *chan, 91 struct dwc2_host_chan *chan,
88 struct dwc2_qtd *qtd) 92 struct dwc2_qtd *qtd)
89{ 93{
94 struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub;
90 struct urb *usb_urb; 95 struct urb *usb_urb;
91 96
92 if (!chan->qh) 97 if (!chan->qh)
@@ -102,6 +107,15 @@ static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
102 if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt) 107 if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
103 return; 108 return;
104 109
110 /*
111 * The root hub doesn't really have a TT, but Linux thinks it
112 * does because how could you have a "high speed hub" that
113 * directly talks directly to low speed devices without a TT?
114 * It's all lies. Lies, I tell you.
115 */
116 if (usb_urb->dev->tt->hub == root_hub)
117 return;
118
105 if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) { 119 if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
106 chan->qh->tt_buffer_dirty = 1; 120 chan->qh->tt_buffer_dirty = 1;
107 if (usb_hub_clear_tt_buffer(usb_urb)) 121 if (usb_hub_clear_tt_buffer(usb_urb))
@@ -138,13 +152,19 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
138 while (qh_entry != &hsotg->periodic_sched_inactive) { 152 while (qh_entry != &hsotg->periodic_sched_inactive) {
139 qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry); 153 qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
140 qh_entry = qh_entry->next; 154 qh_entry = qh_entry->next;
141 if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number)) 155 if (dwc2_frame_num_le(qh->next_active_frame,
156 hsotg->frame_number)) {
157 dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n",
158 qh, hsotg->frame_number,
159 qh->next_active_frame);
160
142 /* 161 /*
143 * Move QH to the ready list to be executed next 162 * Move QH to the ready list to be executed next
144 * (micro)frame 163 * (micro)frame
145 */ 164 */
146 list_move(&qh->qh_list_entry, 165 list_move_tail(&qh->qh_list_entry,
147 &hsotg->periodic_sched_ready); 166 &hsotg->periodic_sched_ready);
167 }
148 } 168 }
149 tr_type = dwc2_hcd_select_transactions(hsotg); 169 tr_type = dwc2_hcd_select_transactions(hsotg);
150 if (tr_type != DWC2_TRANSACTION_NONE) 170 if (tr_type != DWC2_TRANSACTION_NONE)
@@ -472,18 +492,6 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
472 xfer_length = urb->length - urb->actual_length; 492 xfer_length = urb->length - urb->actual_length;
473 } 493 }
474 494
475 /* Non DWORD-aligned buffer case handling */
476 if (chan->align_buf && xfer_length) {
477 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
478 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
479 chan->qh->dw_align_buf_size,
480 chan->ep_is_in ?
481 DMA_FROM_DEVICE : DMA_TO_DEVICE);
482 if (chan->ep_is_in)
483 memcpy(urb->buf + urb->actual_length,
484 chan->qh->dw_align_buf, xfer_length);
485 }
486
487 dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n", 495 dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
488 urb->actual_length, xfer_length); 496 urb->actual_length, xfer_length);
489 urb->actual_length += xfer_length; 497 urb->actual_length += xfer_length;
@@ -573,21 +581,6 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
573 frame_desc->status = 0; 581 frame_desc->status = 0;
574 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, 582 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
575 chan, chnum, qtd, halt_status, NULL); 583 chan, chnum, qtd, halt_status, NULL);
576
577 /* Non DWORD-aligned buffer case handling */
578 if (chan->align_buf && frame_desc->actual_length) {
579 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
580 __func__);
581 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
582 chan->qh->dw_align_buf_size,
583 chan->ep_is_in ?
584 DMA_FROM_DEVICE : DMA_TO_DEVICE);
585 if (chan->ep_is_in)
586 memcpy(urb->buf + frame_desc->offset +
587 qtd->isoc_split_offset,
588 chan->qh->dw_align_buf,
589 frame_desc->actual_length);
590 }
591 break; 584 break;
592 case DWC2_HC_XFER_FRAME_OVERRUN: 585 case DWC2_HC_XFER_FRAME_OVERRUN:
593 urb->error_count++; 586 urb->error_count++;
@@ -608,21 +601,6 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
608 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, 601 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
609 chan, chnum, qtd, halt_status, NULL); 602 chan, chnum, qtd, halt_status, NULL);
610 603
611 /* Non DWORD-aligned buffer case handling */
612 if (chan->align_buf && frame_desc->actual_length) {
613 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
614 __func__);
615 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
616 chan->qh->dw_align_buf_size,
617 chan->ep_is_in ?
618 DMA_FROM_DEVICE : DMA_TO_DEVICE);
619 if (chan->ep_is_in)
620 memcpy(urb->buf + frame_desc->offset +
621 qtd->isoc_split_offset,
622 chan->qh->dw_align_buf,
623 frame_desc->actual_length);
624 }
625
626 /* Skip whole frame */ 604 /* Skip whole frame */
627 if (chan->qh->do_split && 605 if (chan->qh->do_split &&
628 chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && 606 chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
@@ -688,8 +666,6 @@ static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
688 } 666 }
689 667
690no_qtd: 668no_qtd:
691 if (qh->channel)
692 qh->channel->align_buf = 0;
693 qh->channel = NULL; 669 qh->channel = NULL;
694 dwc2_hcd_qh_deactivate(hsotg, qh, continue_split); 670 dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
695} 671}
@@ -846,7 +822,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
846 * halt to be queued when the periodic schedule is 822 * halt to be queued when the periodic schedule is
847 * processed. 823 * processed.
848 */ 824 */
849 list_move(&chan->qh->qh_list_entry, 825 list_move_tail(&chan->qh->qh_list_entry,
850 &hsotg->periodic_sched_assigned); 826 &hsotg->periodic_sched_assigned);
851 827
852 /* 828 /*
@@ -954,14 +930,6 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
954 930
955 frame_desc->actual_length += len; 931 frame_desc->actual_length += len;
956 932
957 if (chan->align_buf) {
958 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
959 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
960 chan->qh->dw_align_buf_size, DMA_FROM_DEVICE);
961 memcpy(qtd->urb->buf + frame_desc->offset +
962 qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
963 }
964
965 qtd->isoc_split_offset += len; 933 qtd->isoc_split_offset += len;
966 934
967 if (frame_desc->actual_length >= frame_desc->length) { 935 if (frame_desc->actual_length >= frame_desc->length) {
@@ -1184,19 +1152,6 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1184 xfer_length = urb->length - urb->actual_length; 1152 xfer_length = urb->length - urb->actual_length;
1185 } 1153 }
1186 1154
1187 /* Non DWORD-aligned buffer case handling */
1188 if (chan->align_buf && xfer_length && chan->ep_is_in) {
1189 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1190 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
1191 chan->qh->dw_align_buf_size,
1192 chan->ep_is_in ?
1193 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1194 if (chan->ep_is_in)
1195 memcpy(urb->buf + urb->actual_length,
1196 chan->qh->dw_align_buf,
1197 xfer_length);
1198 }
1199
1200 urb->actual_length += xfer_length; 1155 urb->actual_length += xfer_length;
1201 1156
1202 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 1157 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
@@ -1416,14 +1371,50 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1416 1371
1417 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1372 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1418 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1373 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1419 int frnum = dwc2_hcd_get_frame_number(hsotg); 1374 struct dwc2_qh *qh = chan->qh;
1375 bool past_end;
1376
1377 if (hsotg->core_params->uframe_sched <= 0) {
1378 int frnum = dwc2_hcd_get_frame_number(hsotg);
1379
1380 /* Don't have num_hs_transfers; simple logic */
1381 past_end = dwc2_full_frame_num(frnum) !=
1382 dwc2_full_frame_num(qh->next_active_frame);
1383 } else {
1384 int end_frnum;
1420 1385
1421 if (dwc2_full_frame_num(frnum) !=
1422 dwc2_full_frame_num(chan->qh->sched_frame)) {
1423 /* 1386 /*
1424 * No longer in the same full speed frame. 1387 * Figure out the end frame based on schedule.
1425 * Treat this as a transaction error. 1388 *
1426 */ 1389 * We don't want to go on trying again and again
1390 * forever. Let's stop when we've done all the
1391 * transfers that were scheduled.
1392 *
1393 * We're going to be comparing start_active_frame
1394 * and next_active_frame, both of which are 1
1395 * before the time the packet goes on the wire,
1396 * so that cancels out. Basically if had 1
1397 * transfer and we saw 1 NYET then we're done.
1398 * We're getting a NYET here so if next >=
1399 * (start + num_transfers) we're done. The
1400 * complexity is that for all but ISOC_OUT we
1401 * skip one slot.
1402 */
1403 end_frnum = dwc2_frame_num_inc(
1404 qh->start_active_frame,
1405 qh->num_hs_transfers);
1406
1407 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
1408 qh->ep_is_in)
1409 end_frnum =
1410 dwc2_frame_num_inc(end_frnum, 1);
1411
1412 past_end = dwc2_frame_num_le(
1413 end_frnum, qh->next_active_frame);
1414 }
1415
1416 if (past_end) {
1417 /* Treat this as a transaction error. */
1427#if 0 1418#if 0
1428 /* 1419 /*
1429 * Todo: Fix system performance so this can 1420 * Todo: Fix system performance so this can
@@ -2008,6 +1999,16 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
2008 } 1999 }
2009 2000
2010 dwc2_writel(hcint, hsotg->regs + HCINT(chnum)); 2001 dwc2_writel(hcint, hsotg->regs + HCINT(chnum));
2002
2003 /*
2004 * If we got an interrupt after someone called
2005 * dwc2_hcd_endpoint_disable() we don't want to crash below
2006 */
2007 if (!chan->qh) {
2008 dev_warn(hsotg->dev, "Interrupt on disabled channel\n");
2009 return;
2010 }
2011
2011 chan->hcint = hcint; 2012 chan->hcint = hcint;
2012 hcint &= hcintmsk; 2013 hcint &= hcintmsk;
2013 2014
@@ -2130,6 +2131,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2130{ 2131{
2131 u32 haint; 2132 u32 haint;
2132 int i; 2133 int i;
2134 struct dwc2_host_chan *chan, *chan_tmp;
2133 2135
2134 haint = dwc2_readl(hsotg->regs + HAINT); 2136 haint = dwc2_readl(hsotg->regs + HAINT);
2135 if (dbg_perio()) { 2137 if (dbg_perio()) {
@@ -2138,6 +2140,22 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2138 dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint); 2140 dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2139 } 2141 }
2140 2142
2143 /*
2144 * According to USB 2.0 spec section 11.18.8, a host must
2145 * issue complete-split transactions in a microframe for a
2146 * set of full-/low-speed endpoints in the same relative
2147 * order as the start-splits were issued in a microframe for.
2148 */
2149 list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order,
2150 split_order_list_entry) {
2151 int hc_num = chan->hc_num;
2152
2153 if (haint & (1 << hc_num)) {
2154 dwc2_hc_n_intr(hsotg, hc_num);
2155 haint &= ~(1 << hc_num);
2156 }
2157 }
2158
2141 for (i = 0; i < hsotg->core_params->host_channels; i++) { 2159 for (i = 0; i < hsotg->core_params->host_channels; i++) {
2142 if (haint & (1 << i)) 2160 if (haint & (1 << i))
2143 dwc2_hc_n_intr(hsotg, i); 2161 dwc2_hc_n_intr(hsotg, i);
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 27d402f680a3..7f634fd771c7 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -38,6 +38,7 @@
38 * This file contains the functions to manage Queue Heads and Queue 38 * This file contains the functions to manage Queue Heads and Queue
39 * Transfer Descriptors for Host mode 39 * Transfer Descriptors for Host mode
40 */ 40 */
41#include <linux/gcd.h>
41#include <linux/kernel.h> 42#include <linux/kernel.h>
42#include <linux/module.h> 43#include <linux/module.h>
43#include <linux/spinlock.h> 44#include <linux/spinlock.h>
@@ -53,194 +54,8 @@
53#include "core.h" 54#include "core.h"
54#include "hcd.h" 55#include "hcd.h"
55 56
56/** 57/* Wait this long before releasing periodic reservation */
57 * dwc2_qh_init() - Initializes a QH structure 58#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
58 *
59 * @hsotg: The HCD state structure for the DWC OTG controller
60 * @qh: The QH to init
61 * @urb: Holds the information about the device/endpoint needed to initialize
62 * the QH
63 */
64#define SCHEDULE_SLOP 10
65static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
66 struct dwc2_hcd_urb *urb)
67{
68 int dev_speed, hub_addr, hub_port;
69 char *speed, *type;
70
71 dev_vdbg(hsotg->dev, "%s()\n", __func__);
72
73 /* Initialize QH */
74 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
75 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
76
77 qh->data_toggle = DWC2_HC_PID_DATA0;
78 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
79 INIT_LIST_HEAD(&qh->qtd_list);
80 INIT_LIST_HEAD(&qh->qh_list_entry);
81
82 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
83 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
84
85 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
86
87 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
88 hub_addr != 0 && hub_addr != 1) {
89 dev_vdbg(hsotg->dev,
90 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
91 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
92 hub_port);
93 qh->do_split = 1;
94 }
95
96 if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
97 qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
98 /* Compute scheduling parameters once and save them */
99 u32 hprt, prtspd;
100
101 /* Todo: Account for split transfers in the bus time */
102 int bytecount =
103 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
104
105 qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ?
106 USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
107 qh->ep_type == USB_ENDPOINT_XFER_ISOC,
108 bytecount));
109
110 /* Ensure frame_number corresponds to the reality */
111 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
112 /* Start in a slightly future (micro)frame */
113 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
114 SCHEDULE_SLOP);
115 qh->interval = urb->interval;
116#if 0
117 /* Increase interrupt polling rate for debugging */
118 if (qh->ep_type == USB_ENDPOINT_XFER_INT)
119 qh->interval = 8;
120#endif
121 hprt = dwc2_readl(hsotg->regs + HPRT0);
122 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
123 if (prtspd == HPRT0_SPD_HIGH_SPEED &&
124 (dev_speed == USB_SPEED_LOW ||
125 dev_speed == USB_SPEED_FULL)) {
126 qh->interval *= 8;
127 qh->sched_frame |= 0x7;
128 qh->start_split_frame = qh->sched_frame;
129 }
130 dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
131 }
132
133 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
134 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
135 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
136 dwc2_hcd_get_dev_addr(&urb->pipe_info));
137 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
138 dwc2_hcd_get_ep_num(&urb->pipe_info),
139 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
140
141 qh->dev_speed = dev_speed;
142
143 switch (dev_speed) {
144 case USB_SPEED_LOW:
145 speed = "low";
146 break;
147 case USB_SPEED_FULL:
148 speed = "full";
149 break;
150 case USB_SPEED_HIGH:
151 speed = "high";
152 break;
153 default:
154 speed = "?";
155 break;
156 }
157 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
158
159 switch (qh->ep_type) {
160 case USB_ENDPOINT_XFER_ISOC:
161 type = "isochronous";
162 break;
163 case USB_ENDPOINT_XFER_INT:
164 type = "interrupt";
165 break;
166 case USB_ENDPOINT_XFER_CONTROL:
167 type = "control";
168 break;
169 case USB_ENDPOINT_XFER_BULK:
170 type = "bulk";
171 break;
172 default:
173 type = "?";
174 break;
175 }
176
177 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
178
179 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
180 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
181 qh->usecs);
182 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
183 qh->interval);
184 }
185}
186
187/**
188 * dwc2_hcd_qh_create() - Allocates and initializes a QH
189 *
190 * @hsotg: The HCD state structure for the DWC OTG controller
191 * @urb: Holds the information about the device/endpoint needed
192 * to initialize the QH
193 * @atomic_alloc: Flag to do atomic allocation if needed
194 *
195 * Return: Pointer to the newly allocated QH, or NULL on error
196 */
197struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
198 struct dwc2_hcd_urb *urb,
199 gfp_t mem_flags)
200{
201 struct dwc2_qh *qh;
202
203 if (!urb->priv)
204 return NULL;
205
206 /* Allocate memory */
207 qh = kzalloc(sizeof(*qh), mem_flags);
208 if (!qh)
209 return NULL;
210
211 dwc2_qh_init(hsotg, qh, urb);
212
213 if (hsotg->core_params->dma_desc_enable > 0 &&
214 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
215 dwc2_hcd_qh_free(hsotg, qh);
216 return NULL;
217 }
218
219 return qh;
220}
221
222/**
223 * dwc2_hcd_qh_free() - Frees the QH
224 *
225 * @hsotg: HCD instance
226 * @qh: The QH to free
227 *
228 * QH should already be removed from the list. QTD list should already be empty
229 * if called from URB Dequeue.
230 *
231 * Must NOT be called with interrupt disabled or spinlock held
232 */
233void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
234{
235 if (qh->desc_list) {
236 dwc2_hcd_qh_free_ddma(hsotg, qh);
237 } else {
238 /* kfree(NULL) is safe */
239 kfree(qh->dw_align_buf);
240 qh->dw_align_buf_dma = (dma_addr_t)0;
241 }
242 kfree(qh);
243}
244 59
245/** 60/**
246 * dwc2_periodic_channel_available() - Checks that a channel is available for a 61 * dwc2_periodic_channel_available() - Checks that a channel is available for a
@@ -301,19 +116,19 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
301 * High speed mode 116 * High speed mode
302 * Max periodic usecs is 80% x 125 usec = 100 usec 117 * Max periodic usecs is 80% x 125 usec = 100 usec
303 */ 118 */
304 max_claimed_usecs = 100 - qh->usecs; 119 max_claimed_usecs = 100 - qh->host_us;
305 } else { 120 } else {
306 /* 121 /*
307 * Full speed mode 122 * Full speed mode
308 * Max periodic usecs is 90% x 1000 usec = 900 usec 123 * Max periodic usecs is 90% x 1000 usec = 900 usec
309 */ 124 */
310 max_claimed_usecs = 900 - qh->usecs; 125 max_claimed_usecs = 900 - qh->host_us;
311 } 126 }
312 127
313 if (hsotg->periodic_usecs > max_claimed_usecs) { 128 if (hsotg->periodic_usecs > max_claimed_usecs) {
314 dev_err(hsotg->dev, 129 dev_err(hsotg->dev,
315 "%s: already claimed usecs %d, required usecs %d\n", 130 "%s: already claimed usecs %d, required usecs %d\n",
316 __func__, hsotg->periodic_usecs, qh->usecs); 131 __func__, hsotg->periodic_usecs, qh->host_us);
317 status = -ENOSPC; 132 status = -ENOSPC;
318 } 133 }
319 134
@@ -321,113 +136,1177 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
321} 136}
322 137
323/** 138/**
324 * Microframe scheduler 139 * pmap_schedule() - Schedule time in a periodic bitmap (pmap).
325 * track the total use in hsotg->frame_usecs 140 *
326 * keep each qh use in qh->frame_usecs 141 * @map: The bitmap representing the schedule; will be updated
327 * when surrendering the qh then donate the time back 142 * upon success.
143 * @bits_per_period: The schedule represents several periods. This is how many
144 * bits are in each period. It's assumed that the beginning
145 * of the schedule will repeat after its end.
146 * @periods_in_map: The number of periods in the schedule.
147 * @num_bits: The number of bits we need per period we want to reserve
148 * in this function call.
149 * @interval: How often we need to be scheduled for the reservation this
150 * time. 1 means every period. 2 means every other period.
151 * ...you get the picture?
152 * @start: The bit number to start at. Normally 0. Must be within
153 * the interval or we return failure right away.
154 * @only_one_period: Normally we'll allow picking a start anywhere within the
155 * first interval, since we can still make all repetition
156 * requirements by doing that. However, if you pass true
157 * here then we'll return failure if we can't fit within
158 * the period that "start" is in.
159 *
160 * The idea here is that we want to schedule time for repeating events that all
161 * want the same resource. The resource is divided into fixed-sized periods
162 * and the events want to repeat every "interval" periods. The schedule
163 * granularity is one bit.
164 *
165 * To keep things "simple", we'll represent our schedule with a bitmap that
166 * contains a fixed number of periods. This gets rid of a lot of complexity
167 * but does mean that we need to handle things specially (and non-ideally) if
168 * the number of the periods in the schedule doesn't match well with the
169 * intervals that we're trying to schedule.
170 *
171 * Here's an explanation of the scheme we'll implement, assuming 8 periods.
172 * - If interval is 1, we need to take up space in each of the 8
173 * periods we're scheduling. Easy.
174 * - If interval is 2, we need to take up space in half of the
175 * periods. Again, easy.
176 * - If interval is 3, we actually need to fall back to interval 1.
177 * Why? Because we might need time in any period. AKA for the
178 * first 8 periods, we'll be in slot 0, 3, 6. Then we'll be
179 * in slot 1, 4, 7. Then we'll be in 2, 5. Then we'll be back to
180 * 0, 3, and 6. Since we could be in any frame we need to reserve
181 * for all of them. Sucks, but that's what you gotta do. Note that
182 * if we were instead scheduling 8 * 3 = 24 we'd do much better, but
183 * then we need more memory and time to do scheduling.
184 * - If interval is 4, easy.
185 * - If interval is 5, we again need interval 1. The schedule will be
186 * 0, 5, 2, 7, 4, 1, 6, 3, 0
187 * - If interval is 6, we need interval 2. 0, 6, 4, 2.
188 * - If interval is 7, we need interval 1.
189 * - If interval is 8, we need interval 8.
190 *
191 * If you do the math, you'll see that we need to pretend that interval is
192 * equal to the greatest_common_divisor(interval, periods_in_map).
193 *
194 * Note that at the moment this function tends to front-pack the schedule.
195 * In some cases that's really non-ideal (it's hard to schedule things that
196 * need to repeat every period). In other cases it's perfect (you can easily
197 * schedule bigger, less often repeating things).
198 *
199 * Here's the algorithm in action (8 periods, 5 bits per period):
200 * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
201 * |*****| ***|*****| ***|*****| ***|*****| ***| OK 3 bits, intv 3 at 2
202 * |*****|* ***|*****| ***|*****|* ***|*****| ***| OK 1 bits, intv 4 at 5
203 * |** |* |** | |** |* |** | | Remv 3 bits, intv 3 at 2
204 * |*** |* |*** | |*** |* |*** | | OK 1 bits, intv 6 at 2
205 * |**** |* * |**** | * |**** |* * |**** | * | OK 1 bits, intv 1 at 3
206 * |**** |**** |**** | *** |**** |**** |**** | *** | OK 2 bits, intv 2 at 6
207 * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 1 at 4
208 * |*****|*****|*****| ****|*****|*****|*****| ****| FAIL 1 bits, intv 1
209 * | ***|*****| ***| ****| ***|*****| ***| ****| Remv 2 bits, intv 2 at 0
210 * | ***| ****| ***| ****| ***| ****| ***| ****| Remv 1 bits, intv 4 at 5
211 * | **| ****| **| ****| **| ****| **| ****| Remv 1 bits, intv 6 at 2
212 * | *| ** *| *| ** *| *| ** *| *| ** *| Remv 1 bits, intv 1 at 3
213 * | *| *| *| *| *| *| *| *| Remv 2 bits, intv 2 at 6
214 * | | | | | | | | | Remv 1 bits, intv 1 at 4
215 * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
216 * |*** | |** | |*** | |** | | OK 1 bits, intv 4 at 2
217 * |*****| |** **| |*****| |** **| | OK 2 bits, intv 2 at 3
218 * |*****|* |** **| |*****|* |** **| | OK 1 bits, intv 4 at 5
219 * |*****|*** |** **| ** |*****|*** |** **| ** | OK 2 bits, intv 2 at 6
220 * |*****|*****|** **| ****|*****|*****|** **| ****| OK 2 bits, intv 2 at 8
221 * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 4 at 12
222 *
223 * This function is pretty generic and could be easily abstracted if anything
224 * needed similar scheduling.
225 *
226 * Returns either -ENOSPC or a >= 0 start bit which should be passed to the
227 * unschedule routine. The map bitmap will be updated on a non-error result.
328 */ 228 */
329static const unsigned short max_uframe_usecs[] = { 229static int pmap_schedule(unsigned long *map, int bits_per_period,
330 100, 100, 100, 100, 100, 100, 30, 0 230 int periods_in_map, int num_bits,
331}; 231 int interval, int start, bool only_one_period)
232{
233 int interval_bits;
234 int to_reserve;
235 int first_end;
236 int i;
237
238 if (num_bits > bits_per_period)
239 return -ENOSPC;
240
241 /* Adjust interval as per description */
242 interval = gcd(interval, periods_in_map);
243
244 interval_bits = bits_per_period * interval;
245 to_reserve = periods_in_map / interval;
246
247 /* If start has gotten us past interval then we can't schedule */
248 if (start >= interval_bits)
249 return -ENOSPC;
250
251 if (only_one_period)
252 /* Must fit within same period as start; end at begin of next */
253 first_end = (start / bits_per_period + 1) * bits_per_period;
254 else
255 /* Can fit anywhere in the first interval */
256 first_end = interval_bits;
257
258 /*
259 * We'll try to pick the first repetition, then see if that time
260 * is free for each of the subsequent repetitions. If it's not
261 * we'll adjust the start time for the next search of the first
262 * repetition.
263 */
264 while (start + num_bits <= first_end) {
265 int end;
266
267 /* Need to stay within this period */
268 end = (start / bits_per_period + 1) * bits_per_period;
269
270 /* Look for num_bits us in this microframe starting at start */
271 start = bitmap_find_next_zero_area(map, end, start, num_bits,
272 0);
273
274 /*
275 * We should get start >= end if we fail. We might be
276 * able to check the next microframe depending on the
277 * interval, so continue on (start already updated).
278 */
279 if (start >= end) {
280 start = end;
281 continue;
282 }
283
284 /* At this point we have a valid point for first one */
285 for (i = 1; i < to_reserve; i++) {
286 int ith_start = start + interval_bits * i;
287 int ith_end = end + interval_bits * i;
288 int ret;
289
290 /* Use this as a dumb "check if bits are 0" */
291 ret = bitmap_find_next_zero_area(
292 map, ith_start + num_bits, ith_start, num_bits,
293 0);
294
295 /* We got the right place, continue checking */
296 if (ret == ith_start)
297 continue;
298
299 /* Move start up for next time and exit for loop */
300 ith_start = bitmap_find_next_zero_area(
301 map, ith_end, ith_start, num_bits, 0);
302 if (ith_start >= ith_end)
303 /* Need a while new period next time */
304 start = end;
305 else
306 start = ith_start - interval_bits * i;
307 break;
308 }
309
310 /* If didn't exit the for loop with a break, we have success */
311 if (i == to_reserve)
312 break;
313 }
332 314
333void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg) 315 if (start + num_bits > first_end)
316 return -ENOSPC;
317
318 for (i = 0; i < to_reserve; i++) {
319 int ith_start = start + interval_bits * i;
320
321 bitmap_set(map, ith_start, num_bits);
322 }
323
324 return start;
325}
326
327/**
328 * pmap_unschedule() - Undo work done by pmap_schedule()
329 *
330 * @map: See pmap_schedule().
331 * @bits_per_period: See pmap_schedule().
332 * @periods_in_map: See pmap_schedule().
333 * @num_bits: The number of bits that was passed to schedule.
334 * @interval: The interval that was passed to schedule.
335 * @start: The return value from pmap_schedule().
336 */
337static void pmap_unschedule(unsigned long *map, int bits_per_period,
338 int periods_in_map, int num_bits,
339 int interval, int start)
334{ 340{
341 int interval_bits;
342 int to_release;
335 int i; 343 int i;
336 344
337 for (i = 0; i < 8; i++) 345 /* Adjust interval as per description in pmap_schedule() */
338 hsotg->frame_usecs[i] = max_uframe_usecs[i]; 346 interval = gcd(interval, periods_in_map);
347
348 interval_bits = bits_per_period * interval;
349 to_release = periods_in_map / interval;
350
351 for (i = 0; i < to_release; i++) {
352 int ith_start = start + interval_bits * i;
353
354 bitmap_clear(map, ith_start, num_bits);
355 }
339} 356}
340 357
341static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 358/*
359 * cat_printf() - A printf() + strcat() helper
360 *
361 * This is useful for concatenating a bunch of strings where each string is
362 * constructed using printf.
363 *
364 * @buf: The destination buffer; will be updated to point after the printed
365 * data.
366 * @size: The number of bytes in the buffer (includes space for '\0').
367 * @fmt: The format for printf.
368 * @...: The args for printf.
369 */
370static void cat_printf(char **buf, size_t *size, const char *fmt, ...)
342{ 371{
343 unsigned short utime = qh->usecs; 372 va_list args;
344 int i; 373 int i;
345 374
346 for (i = 0; i < 8; i++) { 375 if (*size == 0)
347 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */ 376 return;
348 if (utime <= hsotg->frame_usecs[i]) { 377
349 hsotg->frame_usecs[i] -= utime; 378 va_start(args, fmt);
350 qh->frame_usecs[i] += utime; 379 i = vsnprintf(*buf, *size, fmt, args);
351 return i; 380 va_end(args);
352 } 381
382 if (i >= *size) {
383 (*buf)[*size - 1] = '\0';
384 *buf += *size;
385 *size = 0;
386 } else {
387 *buf += i;
388 *size -= i;
353 } 389 }
354 return -ENOSPC;
355} 390}
356 391
357/* 392/*
358 * use this for FS apps that can span multiple uframes 393 * pmap_print() - Print the given periodic map
394 *
395 * Will attempt to print out the periodic schedule.
396 *
397 * @map: See pmap_schedule().
398 * @bits_per_period: See pmap_schedule().
399 * @periods_in_map: See pmap_schedule().
400 * @period_name: The name of 1 period, like "uFrame"
401 * @units: The name of the units, like "us".
402 * @print_fn: The function to call for printing.
403 * @print_data: Opaque data to pass to the print function.
404 */
405static void pmap_print(unsigned long *map, int bits_per_period,
406 int periods_in_map, const char *period_name,
407 const char *units,
408 void (*print_fn)(const char *str, void *data),
409 void *print_data)
410{
411 int period;
412
413 for (period = 0; period < periods_in_map; period++) {
414 char tmp[64];
415 char *buf = tmp;
416 size_t buf_size = sizeof(tmp);
417 int period_start = period * bits_per_period;
418 int period_end = period_start + bits_per_period;
419 int start = 0;
420 int count = 0;
421 bool printed = false;
422 int i;
423
424 for (i = period_start; i < period_end + 1; i++) {
425 /* Handle case when ith bit is set */
426 if (i < period_end &&
427 bitmap_find_next_zero_area(map, i + 1,
428 i, 1, 0) != i) {
429 if (count == 0)
430 start = i - period_start;
431 count++;
432 continue;
433 }
434
435 /* ith bit isn't set; don't care if count == 0 */
436 if (count == 0)
437 continue;
438
439 if (!printed)
440 cat_printf(&buf, &buf_size, "%s %d: ",
441 period_name, period);
442 else
443 cat_printf(&buf, &buf_size, ", ");
444 printed = true;
445
446 cat_printf(&buf, &buf_size, "%d %s -%3d %s", start,
447 units, start + count - 1, units);
448 count = 0;
449 }
450
451 if (printed)
452 print_fn(tmp, print_data);
453 }
454}
455
456/**
457 * dwc2_get_ls_map() - Get the map used for the given qh
458 *
459 * @hsotg: The HCD state structure for the DWC OTG controller.
460 * @qh: QH for the periodic transfer.
461 *
462 * We'll always get the periodic map out of our TT. Note that even if we're
463 * running the host straight in low speed / full speed mode it appears as if
464 * a TT is allocated for us, so we'll use it. If that ever changes we can
465 * add logic here to get a map out of "hsotg" if !qh->do_split.
466 *
467 * Returns: the map or NULL if a map couldn't be found.
359 */ 468 */
360static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 469static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
470 struct dwc2_qh *qh)
361{ 471{
362 unsigned short utime = qh->usecs; 472 unsigned long *map;
363 unsigned short xtime; 473
364 int t_left; 474 /* Don't expect to be missing a TT and be doing low speed scheduling */
475 if (WARN_ON(!qh->dwc_tt))
476 return NULL;
477
478 /* Get the map and adjust if this is a multi_tt hub */
479 map = qh->dwc_tt->periodic_bitmaps;
480 if (qh->dwc_tt->usb_tt->multi)
481 map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
482
483 return map;
484}
485
486struct dwc2_qh_print_data {
487 struct dwc2_hsotg *hsotg;
488 struct dwc2_qh *qh;
489};
490
491/**
492 * dwc2_qh_print() - Helper function for dwc2_qh_schedule_print()
493 *
494 * @str: The string to print
495 * @data: A pointer to a struct dwc2_qh_print_data
496 */
497static void dwc2_qh_print(const char *str, void *data)
498{
499 struct dwc2_qh_print_data *print_data = data;
500
501 dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
502}
503
504/**
505 * dwc2_qh_schedule_print() - Print the periodic schedule
506 *
507 * @hsotg: The HCD state structure for the DWC OTG controller.
508 * @qh: QH to print.
509 */
510static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
511 struct dwc2_qh *qh)
512{
513 struct dwc2_qh_print_data print_data = { hsotg, qh };
365 int i; 514 int i;
366 int j;
367 int k;
368 515
369 for (i = 0; i < 8; i++) { 516 /*
370 if (hsotg->frame_usecs[i] <= 0) 517 * The printing functions are quite slow and inefficient.
518 * If we don't have tracing turned on, don't run unless the special
519 * define is turned on.
520 */
521#ifndef DWC2_PRINT_SCHEDULE
522 return;
523#endif
524
525 if (qh->schedule_low_speed) {
526 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
527
528 dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us",
529 qh, qh->device_us,
530 DWC2_ROUND_US_TO_SLICE(qh->device_us),
531 DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
532
533 if (map) {
534 dwc2_sch_dbg(hsotg,
535 "QH=%p Whole low/full speed map %p now:\n",
536 qh, map);
537 pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
538 DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices",
539 dwc2_qh_print, &print_data);
540 }
541 }
542
543 for (i = 0; i < qh->num_hs_transfers; i++) {
544 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
545 int uframe = trans_time->start_schedule_us /
546 DWC2_HS_PERIODIC_US_PER_UFRAME;
547 int rel_us = trans_time->start_schedule_us %
548 DWC2_HS_PERIODIC_US_PER_UFRAME;
549
550 dwc2_sch_dbg(hsotg,
551 "QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n",
552 qh, i, trans_time->duration_us, uframe, rel_us);
553 }
554 if (qh->num_hs_transfers) {
555 dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
556 pmap_print(hsotg->hs_periodic_bitmap,
557 DWC2_HS_PERIODIC_US_PER_UFRAME,
558 DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
559 dwc2_qh_print, &print_data);
560 }
561
562}
563
564/**
565 * dwc2_ls_pmap_schedule() - Schedule a low speed QH
566 *
567 * @hsotg: The HCD state structure for the DWC OTG controller.
568 * @qh: QH for the periodic transfer.
569 * @search_slice: We'll start trying to schedule at the passed slice.
570 * Remember that slices are the units of the low speed
571 * schedule (think 25us or so).
572 *
573 * Wraps pmap_schedule() with the right parameters for low speed scheduling.
574 *
575 * Normally we schedule low speed devices on the map associated with the TT.
576 *
577 * Returns: 0 for success or an error code.
578 */
579static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
580 int search_slice)
581{
582 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
583 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
584 int slice;
585
586 if (map == NULL)
587 return -EINVAL;
588
589 /*
590 * Schedule on the proper low speed map with our low speed scheduling
591 * parameters. Note that we use the "device_interval" here since
592 * we want the low speed interval and the only way we'd be in this
593 * function is if the device is low speed.
594 *
595 * If we happen to be doing low speed and high speed scheduling for the
596 * same transaction (AKA we have a split) we always do low speed first.
597 * That means we can always pass "false" for only_one_period (that
598 * parameters is only useful when we're trying to get one schedule to
599 * match what we already planned in the other schedule).
600 */
601 slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
602 DWC2_LS_SCHEDULE_FRAMES, slices,
603 qh->device_interval, search_slice, false);
604
605 if (slice < 0)
606 return slice;
607
608 qh->ls_start_schedule_slice = slice;
609 return 0;
610}
611
612/**
613 * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_ls_pmap_schedule()
614 *
615 * @hsotg: The HCD state structure for the DWC OTG controller.
616 * @qh: QH for the periodic transfer.
617 */
618static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg,
619 struct dwc2_qh *qh)
620{
621 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
622 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
623
624 /* Schedule should have failed, so no worries about no error code */
625 if (map == NULL)
626 return;
627
628 pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
629 DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
630 qh->ls_start_schedule_slice);
631}
632
633/**
634 * dwc2_hs_pmap_schedule - Schedule in the main high speed schedule
635 *
636 * This will schedule something on the main dwc2 schedule.
637 *
638 * We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll
639 * update this with the result upon success. We also use the duration from
640 * the same structure.
641 *
642 * @hsotg: The HCD state structure for the DWC OTG controller.
643 * @qh: QH for the periodic transfer.
644 * @only_one_period: If true we will limit ourselves to just looking at
645 * one period (aka one 100us chunk). This is used if we have
646 * already scheduled something on the low speed schedule and
647 * need to find something that matches on the high speed one.
648 * @index: The index into qh->hs_transfers that we're working with.
649 *
650 * Returns: 0 for success or an error code. Upon success the
651 * dwc2_hs_transfer_time specified by "index" will be updated.
652 */
653static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
654 bool only_one_period, int index)
655{
656 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
657 int us;
658
659 us = pmap_schedule(hsotg->hs_periodic_bitmap,
660 DWC2_HS_PERIODIC_US_PER_UFRAME,
661 DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
662 qh->host_interval, trans_time->start_schedule_us,
663 only_one_period);
664
665 if (us < 0)
666 return us;
667
668 trans_time->start_schedule_us = us;
669 return 0;
670}
671
672/**
673 * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule()
674 *
675 * @hsotg: The HCD state structure for the DWC OTG controller.
676 * @qh: QH for the periodic transfer.
677 */
678static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
679 struct dwc2_qh *qh, int index)
680{
681 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
682
683 pmap_unschedule(hsotg->hs_periodic_bitmap,
684 DWC2_HS_PERIODIC_US_PER_UFRAME,
685 DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
686 qh->host_interval, trans_time->start_schedule_us);
687}
688
689/**
690 * dwc2_uframe_schedule_split - Schedule a QH for a periodic split xfer.
691 *
692 * This is the most complicated thing in USB. We have to find matching time
693 * in both the global high speed schedule for the port and the low speed
694 * schedule for the TT associated with the given device.
695 *
696 * Being here means that the host must be running in high speed mode and the
697 * device is in low or full speed mode (and behind a hub).
698 *
699 * @hsotg: The HCD state structure for the DWC OTG controller.
700 * @qh: QH for the periodic transfer.
701 */
702static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
703 struct dwc2_qh *qh)
704{
705 int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
706 int ls_search_slice;
707 int err = 0;
708 int host_interval_in_sched;
709
710 /*
711 * The interval (how often to repeat) in the actual host schedule.
712 * See pmap_schedule() for gcd() explanation.
713 */
714 host_interval_in_sched = gcd(qh->host_interval,
715 DWC2_HS_SCHEDULE_UFRAMES);
716
717 /*
718 * We always try to find space in the low speed schedule first, then
719 * try to find high speed time that matches. If we don't, we'll bump
720 * up the place we start searching in the low speed schedule and try
721 * again. To start we'll look right at the beginning of the low speed
722 * schedule.
723 *
724 * Note that this will tend to front-load the high speed schedule.
725 * We may eventually want to try to avoid this by either considering
726 * both schedules together or doing some sort of round robin.
727 */
728 ls_search_slice = 0;
729
730 while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) {
731 int start_s_uframe;
732 int ssplit_s_uframe;
733 int second_s_uframe;
734 int rel_uframe;
735 int first_count;
736 int middle_count;
737 int end_count;
738 int first_data_bytes;
739 int other_data_bytes;
740 int i;
741
742 if (qh->schedule_low_speed) {
743 err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
744
745 /*
746 * If we got an error here there's no other magic we
747 * can do, so bail. All the looping above is only
748 * helpful to redo things if we got a low speed slot
749 * and then couldn't find a matching high speed slot.
750 */
751 if (err)
752 return err;
753 } else {
754 /* Must be missing the tt structure? Why? */
755 WARN_ON_ONCE(1);
756 }
757
758 /*
759 * This will give us a number 0 - 7 if
760 * DWC2_LS_SCHEDULE_FRAMES == 1, or 0 - 15 if == 2, or ...
761 */
762 start_s_uframe = qh->ls_start_schedule_slice /
763 DWC2_SLICES_PER_UFRAME;
764
765 /* Get a number that's always 0 - 7 */
766 rel_uframe = (start_s_uframe % 8);
767
768 /*
769 * If we were going to start in uframe 7 then we would need to
770 * issue a start split in uframe 6, which spec says is not OK.
771 * Move on to the next full frame (assuming there is one).
772 *
773 * See 11.18.4 Host Split Transaction Scheduling Requirements
774 * bullet 1.
775 */
776 if (rel_uframe == 7) {
777 if (qh->schedule_low_speed)
778 dwc2_ls_pmap_unschedule(hsotg, qh);
779 ls_search_slice =
780 (qh->ls_start_schedule_slice /
781 DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) *
782 DWC2_LS_PERIODIC_SLICES_PER_FRAME;
371 continue; 783 continue;
784 }
372 785
373 /* 786 /*
374 * we need n consecutive slots so use j as a start slot 787 * For ISOC in:
375 * j plus j+1 must be enough time (for now) 788 * - start split (frame -1)
789 * - complete split w/ data (frame +1)
790 * - complete split w/ data (frame +2)
791 * - ...
792 * - complete split w/ data (frame +num_data_packets)
793 * - complete split w/ data (frame +num_data_packets+1)
794 * - complete split w/ data (frame +num_data_packets+2, max 8)
795 * ...though if frame was "0" then max is 7...
796 *
797 * For ISOC out we might need to do:
798 * - start split w/ data (frame -1)
799 * - start split w/ data (frame +0)
800 * - ...
801 * - start split w/ data (frame +num_data_packets-2)
802 *
803 * For INTERRUPT in we might need to do:
804 * - start split (frame -1)
805 * - complete split w/ data (frame +1)
806 * - complete split w/ data (frame +2)
807 * - complete split w/ data (frame +3, max 8)
808 *
809 * For INTERRUPT out we might need to do:
810 * - start split w/ data (frame -1)
811 * - complete split (frame +1)
812 * - complete split (frame +2)
813 * - complete split (frame +3, max 8)
814 *
815 * Start adjusting!
376 */ 816 */
377 xtime = hsotg->frame_usecs[i]; 817 ssplit_s_uframe = (start_s_uframe +
378 for (j = i + 1; j < 8; j++) { 818 host_interval_in_sched - 1) %
379 /* 819 host_interval_in_sched;
380 * if we add this frame remaining time to xtime we may 820 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
381 * be OK, if not we need to test j for a complete frame 821 second_s_uframe = start_s_uframe;
382 */ 822 else
383 if (xtime + hsotg->frame_usecs[j] < utime) { 823 second_s_uframe = start_s_uframe + 1;
384 if (hsotg->frame_usecs[j] < 824
385 max_uframe_usecs[j]) 825 /* First data transfer might not be all 188 bytes. */
386 continue; 826 first_data_bytes = 188 -
827 DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
828 DWC2_SLICES_PER_UFRAME),
829 DWC2_SLICES_PER_UFRAME);
830 if (first_data_bytes > bytecount)
831 first_data_bytes = bytecount;
832 other_data_bytes = bytecount - first_data_bytes;
833
834 /*
835 * For now, skip OUT xfers where first xfer is partial
836 *
837 * Main dwc2 code assumes:
838 * - INT transfers never get split in two.
839 * - ISOC transfers can always transfer 188 bytes the first
840 * time.
841 *
842 * Until that code is fixed, try again if the first transfer
843 * couldn't transfer everything.
844 *
845 * This code can be removed if/when the rest of dwc2 handles
846 * the above cases. Until it's fixed we just won't be able
847 * to schedule quite as tightly.
848 */
849 if (!qh->ep_is_in &&
850 (first_data_bytes != min_t(int, 188, bytecount))) {
851 dwc2_sch_dbg(hsotg,
852 "QH=%p avoiding broken 1st xfer (%d, %d)\n",
853 qh, first_data_bytes, bytecount);
854 if (qh->schedule_low_speed)
855 dwc2_ls_pmap_unschedule(hsotg, qh);
856 ls_search_slice = (start_s_uframe + 1) *
857 DWC2_SLICES_PER_UFRAME;
858 continue;
859 }
860
861 /* Start by assuming transfers for the bytes */
862 qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
863
864 /*
865 * Everything except ISOC OUT has extra transfers. Rules are
866 * complicated. See 11.18.4 Host Split Transaction Scheduling
867 * Requirements bullet 3.
868 */
869 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
870 if (rel_uframe == 6)
871 qh->num_hs_transfers += 2;
872 else
873 qh->num_hs_transfers += 3;
874
875 if (qh->ep_is_in) {
876 /*
877 * First is start split, middle/end is data.
878 * Allocate full data bytes for all data.
879 */
880 first_count = 4;
881 middle_count = bytecount;
882 end_count = bytecount;
883 } else {
884 /*
885 * First is data, middle/end is complete.
886 * First transfer and second can have data.
887 * Rest should just have complete split.
888 */
889 first_count = first_data_bytes;
890 middle_count = max_t(int, 4, other_data_bytes);
891 end_count = 4;
387 } 892 }
388 if (xtime >= utime) { 893 } else {
389 t_left = utime; 894 if (qh->ep_is_in) {
390 for (k = i; k < 8; k++) { 895 int last;
391 t_left -= hsotg->frame_usecs[k]; 896
392 if (t_left <= 0) { 897 /* Account for the start split */
393 qh->frame_usecs[k] += 898 qh->num_hs_transfers++;
394 hsotg->frame_usecs[k] 899
395 + t_left; 900 /* Calculate "L" value from spec */
396 hsotg->frame_usecs[k] = -t_left; 901 last = rel_uframe + qh->num_hs_transfers + 1;
397 return i; 902
398 } else { 903 /* Start with basic case */
399 qh->frame_usecs[k] += 904 if (last <= 6)
400 hsotg->frame_usecs[k]; 905 qh->num_hs_transfers += 2;
401 hsotg->frame_usecs[k] = 0; 906 else
402 } 907 qh->num_hs_transfers += 1;
403 } 908
909 /* Adjust downwards */
910 if (last >= 6 && rel_uframe == 0)
911 qh->num_hs_transfers--;
912
913 /* 1st = start; rest can contain data */
914 first_count = 4;
915 middle_count = min_t(int, 188, bytecount);
916 end_count = middle_count;
917 } else {
918 /* All contain data, last might be smaller */
919 first_count = first_data_bytes;
920 middle_count = min_t(int, 188,
921 other_data_bytes);
922 end_count = other_data_bytes % 188;
404 } 923 }
405 /* add the frame time to x time */
406 xtime += hsotg->frame_usecs[j];
407 /* we must have a fully available next frame or break */
408 if (xtime < utime &&
409 hsotg->frame_usecs[j] == max_uframe_usecs[j])
410 continue;
411 } 924 }
925
926 /* Assign durations per uFrame */
927 qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
928 for (i = 1; i < qh->num_hs_transfers - 1; i++)
929 qh->hs_transfers[i].duration_us =
930 HS_USECS_ISO(middle_count);
931 if (qh->num_hs_transfers > 1)
932 qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
933 HS_USECS_ISO(end_count);
934
935 /*
936 * Assign start us. The call below to dwc2_hs_pmap_schedule()
937 * will start with these numbers but may adjust within the same
938 * microframe.
939 */
940 qh->hs_transfers[0].start_schedule_us =
941 ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME;
942 for (i = 1; i < qh->num_hs_transfers; i++)
943 qh->hs_transfers[i].start_schedule_us =
944 ((second_s_uframe + i - 1) %
945 DWC2_HS_SCHEDULE_UFRAMES) *
946 DWC2_HS_PERIODIC_US_PER_UFRAME;
947
948 /* Try to schedule with filled in hs_transfers above */
949 for (i = 0; i < qh->num_hs_transfers; i++) {
950 err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
951 if (err)
952 break;
953 }
954
955 /* If we scheduled all w/out breaking out then we're all good */
956 if (i == qh->num_hs_transfers)
957 break;
958
959 for (; i >= 0; i--)
960 dwc2_hs_pmap_unschedule(hsotg, qh, i);
961
962 if (qh->schedule_low_speed)
963 dwc2_ls_pmap_unschedule(hsotg, qh);
964
965 /* Try again starting in the next microframe */
966 ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME;
412 } 967 }
413 return -ENOSPC; 968
969 if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES)
970 return -ENOSPC;
971
972 return 0;
973}
974
975/**
976 * dwc2_uframe_schedule_hs - Schedule a QH for a periodic high speed xfer.
977 *
978 * Basically this just wraps dwc2_hs_pmap_schedule() to provide a clean
979 * interface.
980 *
981 * @hsotg: The HCD state structure for the DWC OTG controller.
982 * @qh: QH for the periodic transfer.
983 */
984static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
985{
986 /* In non-split host and device time are the same */
987 WARN_ON(qh->host_us != qh->device_us);
988 WARN_ON(qh->host_interval != qh->device_interval);
989 WARN_ON(qh->num_hs_transfers != 1);
990
991 /* We'll have one transfer; init start to 0 before calling scheduler */
992 qh->hs_transfers[0].start_schedule_us = 0;
993 qh->hs_transfers[0].duration_us = qh->host_us;
994
995 return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
996}
997
998/**
999 * dwc2_uframe_schedule_ls - Schedule a QH for a periodic low/full speed xfer.
1000 *
1001 * Basically this just wraps dwc2_ls_pmap_schedule() to provide a clean
1002 * interface.
1003 *
1004 * @hsotg: The HCD state structure for the DWC OTG controller.
1005 * @qh: QH for the periodic transfer.
1006 */
1007static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1008{
1009 /* In non-split host and device time are the same */
1010 WARN_ON(qh->host_us != qh->device_us);
1011 WARN_ON(qh->host_interval != qh->device_interval);
1012 WARN_ON(!qh->schedule_low_speed);
1013
1014 /* Run on the main low speed schedule (no split = no hub = no TT) */
1015 return dwc2_ls_pmap_schedule(hsotg, qh, 0);
414} 1016}
415 1017
416static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 1018/**
1019 * dwc2_uframe_schedule - Schedule a QH for a periodic xfer.
1020 *
1021 * Calls one of the 3 sub-function depending on what type of transfer this QH
1022 * is for. Also adds some printing.
1023 *
1024 * @hsotg: The HCD state structure for the DWC OTG controller.
1025 * @qh: QH for the periodic transfer.
1026 */
1027static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
417{ 1028{
418 int ret; 1029 int ret;
419 1030
420 if (qh->dev_speed == USB_SPEED_HIGH) { 1031 if (qh->dev_speed == USB_SPEED_HIGH)
421 /* if this is a hs transaction we need a full frame */ 1032 ret = dwc2_uframe_schedule_hs(hsotg, qh);
422 ret = dwc2_find_single_uframe(hsotg, qh); 1033 else if (!qh->do_split)
1034 ret = dwc2_uframe_schedule_ls(hsotg, qh);
1035 else
1036 ret = dwc2_uframe_schedule_split(hsotg, qh);
1037
1038 if (ret)
1039 dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
1040 else
1041 dwc2_qh_schedule_print(hsotg, qh);
1042
1043 return ret;
1044}
1045
1046/**
1047 * dwc2_uframe_unschedule - Undoes dwc2_uframe_schedule().
1048 *
1049 * @hsotg: The HCD state structure for the DWC OTG controller.
1050 * @qh: QH for the periodic transfer.
1051 */
1052static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1053{
1054 int i;
1055
1056 for (i = 0; i < qh->num_hs_transfers; i++)
1057 dwc2_hs_pmap_unschedule(hsotg, qh, i);
1058
1059 if (qh->schedule_low_speed)
1060 dwc2_ls_pmap_unschedule(hsotg, qh);
1061
1062 dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
1063}
1064
1065/**
1066 * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled
1067 *
1068 * Takes a qh that has already been scheduled (which means we know we have the
1069 * bandwdith reserved for us) and set the next_active_frame and the
1070 * start_active_frame.
1071 *
1072 * This is expected to be called on qh's that weren't previously actively
1073 * running. It just picks the next frame that we can fit into without any
1074 * thought about the past.
1075 *
1076 * @hsotg: The HCD state structure for the DWC OTG controller
1077 * @qh: QH for a periodic endpoint
1078 *
1079 */
1080static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1081{
1082 u16 frame_number;
1083 u16 earliest_frame;
1084 u16 next_active_frame;
1085 u16 relative_frame;
1086 u16 interval;
1087
1088 /*
1089 * Use the real frame number rather than the cached value as of the
1090 * last SOF to give us a little extra slop.
1091 */
1092 frame_number = dwc2_hcd_get_frame_number(hsotg);
1093
1094 /*
1095 * We wouldn't want to start any earlier than the next frame just in
1096 * case the frame number ticks as we're doing this calculation.
1097 *
1098 * NOTE: if we could quantify how long till we actually get scheduled
1099 * we might be able to avoid the "+ 1" by looking at the upper part of
1100 * HFNUM (the FRREM field). For now we'll just use the + 1 though.
1101 */
1102 earliest_frame = dwc2_frame_num_inc(frame_number, 1);
1103 next_active_frame = earliest_frame;
1104
1105 /* Get the "no microframe schduler" out of the way... */
1106 if (hsotg->core_params->uframe_sched <= 0) {
1107 if (qh->do_split)
1108 /* Splits are active at microframe 0 minus 1 */
1109 next_active_frame |= 0x7;
1110 goto exit;
1111 }
1112
1113 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
1114 /*
1115 * We're either at high speed or we're doing a split (which
1116 * means we're talking high speed to a hub). In any case
1117 * the first frame should be based on when the first scheduled
1118 * event is.
1119 */
1120 WARN_ON(qh->num_hs_transfers < 1);
1121
1122 relative_frame = qh->hs_transfers[0].start_schedule_us /
1123 DWC2_HS_PERIODIC_US_PER_UFRAME;
1124
1125 /* Adjust interval as per high speed schedule */
1126 interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
1127
423 } else { 1128 } else {
424 /* 1129 /*
425 * if this is a fs transaction we may need a sequence 1130 * Low or full speed directly on dwc2. Just about the same
426 * of frames 1131 * as high speed but on a different schedule and with slightly
1132 * different adjustments. Note that this works because when
1133 * the host and device are both low speed then frames in the
1134 * controller tick at low speed.
427 */ 1135 */
428 ret = dwc2_find_multi_uframe(hsotg, qh); 1136 relative_frame = qh->ls_start_schedule_slice /
1137 DWC2_LS_PERIODIC_SLICES_PER_FRAME;
1138 interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
429 } 1139 }
430 return ret; 1140
1141 /* Scheduler messed up if frame is past interval */
1142 WARN_ON(relative_frame >= interval);
1143
1144 /*
1145 * We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've
1146 * done the gcd(), so it's safe to move to the beginning of the current
1147 * interval like this.
1148 *
1149 * After this we might be before earliest_frame, but don't worry,
1150 * we'll fix it...
1151 */
1152 next_active_frame = (next_active_frame / interval) * interval;
1153
1154 /*
1155 * Actually choose to start at the frame number we've been
1156 * scheduled for.
1157 */
1158 next_active_frame = dwc2_frame_num_inc(next_active_frame,
1159 relative_frame);
1160
1161 /*
1162 * We actually need 1 frame before since the next_active_frame is
1163 * the frame number we'll be put on the ready list and we won't be on
1164 * the bus until 1 frame later.
1165 */
1166 next_active_frame = dwc2_frame_num_dec(next_active_frame, 1);
1167
1168 /*
1169 * By now we might actually be before the earliest_frame. Let's move
1170 * up intervals until we're not.
1171 */
1172 while (dwc2_frame_num_gt(earliest_frame, next_active_frame))
1173 next_active_frame = dwc2_frame_num_inc(next_active_frame,
1174 interval);
1175
1176exit:
1177 qh->next_active_frame = next_active_frame;
1178 qh->start_active_frame = next_active_frame;
1179
1180 dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n",
1181 qh, frame_number, qh->next_active_frame);
1182}
1183
1184/**
1185 * dwc2_do_reserve() - Make a periodic reservation
1186 *
1187 * Try to allocate space in the periodic schedule. Depending on parameters
1188 * this might use the microframe scheduler or the dumb scheduler.
1189 *
1190 * @hsotg: The HCD state structure for the DWC OTG controller
1191 * @qh: QH for the periodic transfer.
1192 *
1193 * Returns: 0 upon success; error upon failure.
1194 */
1195static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1196{
1197 int status;
1198
1199 if (hsotg->core_params->uframe_sched > 0) {
1200 status = dwc2_uframe_schedule(hsotg, qh);
1201 } else {
1202 status = dwc2_periodic_channel_available(hsotg);
1203 if (status) {
1204 dev_info(hsotg->dev,
1205 "%s: No host channel available for periodic transfer\n",
1206 __func__);
1207 return status;
1208 }
1209
1210 status = dwc2_check_periodic_bandwidth(hsotg, qh);
1211 }
1212
1213 if (status) {
1214 dev_dbg(hsotg->dev,
1215 "%s: Insufficient periodic bandwidth for periodic transfer\n",
1216 __func__);
1217 return status;
1218 }
1219
1220 if (hsotg->core_params->uframe_sched <= 0)
1221 /* Reserve periodic channel */
1222 hsotg->periodic_channels++;
1223
1224 /* Update claimed usecs per (micro)frame */
1225 hsotg->periodic_usecs += qh->host_us;
1226
1227 dwc2_pick_first_frame(hsotg, qh);
1228
1229 return 0;
1230}
1231
1232/**
1233 * dwc2_do_unreserve() - Actually release the periodic reservation
1234 *
1235 * This function actually releases the periodic bandwidth that was reserved
1236 * by the given qh.
1237 *
1238 * @hsotg: The HCD state structure for the DWC OTG controller
1239 * @qh: QH for the periodic transfer.
1240 */
1241static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1242{
1243 assert_spin_locked(&hsotg->lock);
1244
1245 WARN_ON(!qh->unreserve_pending);
1246
1247 /* No more unreserve pending--we're doing it */
1248 qh->unreserve_pending = false;
1249
1250 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
1251 list_del_init(&qh->qh_list_entry);
1252
1253 /* Update claimed usecs per (micro)frame */
1254 hsotg->periodic_usecs -= qh->host_us;
1255
1256 if (hsotg->core_params->uframe_sched > 0) {
1257 dwc2_uframe_unschedule(hsotg, qh);
1258 } else {
1259 /* Release periodic channel reservation */
1260 hsotg->periodic_channels--;
1261 }
1262}
1263
1264/**
1265 * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
1266 *
1267 * According to the kernel doc for usb_submit_urb() (specifically the part about
1268 * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
1269 * long as a device driver keeps submitting. Since we're using HCD_BH to give
1270 * back the URB we need to give the driver a little bit of time before we
1271 * release the reservation. This worker is called after the appropriate
1272 * delay.
1273 *
1274 * @work: Pointer to a qh unreserve_work.
1275 */
1276static void dwc2_unreserve_timer_fn(unsigned long data)
1277{
1278 struct dwc2_qh *qh = (struct dwc2_qh *)data;
1279 struct dwc2_hsotg *hsotg = qh->hsotg;
1280 unsigned long flags;
1281
1282 /*
1283 * Wait for the lock, or for us to be scheduled again. We
1284 * could be scheduled again if:
1285 * - We started executing but didn't get the lock yet.
1286 * - A new reservation came in, but cancel didn't take effect
1287 * because we already started executing.
1288 * - The timer has been kicked again.
1289 * In that case cancel and wait for the next call.
1290 */
1291 while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
1292 if (timer_pending(&qh->unreserve_timer))
1293 return;
1294 }
1295
1296 /*
1297 * Might be no more unreserve pending if:
1298 * - We started executing but didn't get the lock yet.
1299 * - A new reservation came in, but cancel didn't take effect
1300 * because we already started executing.
1301 *
1302 * We can't put this in the loop above because unreserve_pending needs
1303 * to be accessed under lock, so we can only check it once we got the
1304 * lock.
1305 */
1306 if (qh->unreserve_pending)
1307 dwc2_do_unreserve(hsotg, qh);
1308
1309 spin_unlock_irqrestore(&hsotg->lock, flags);
431} 1310}
432 1311
433/** 1312/**
@@ -474,42 +1353,6 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
474{ 1353{
475 int status; 1354 int status;
476 1355
477 if (hsotg->core_params->uframe_sched > 0) {
478 int frame = -1;
479
480 status = dwc2_find_uframe(hsotg, qh);
481 if (status == 0)
482 frame = 7;
483 else if (status > 0)
484 frame = status - 1;
485
486 /* Set the new frame up */
487 if (frame >= 0) {
488 qh->sched_frame &= ~0x7;
489 qh->sched_frame |= (frame & 7);
490 }
491
492 if (status > 0)
493 status = 0;
494 } else {
495 status = dwc2_periodic_channel_available(hsotg);
496 if (status) {
497 dev_info(hsotg->dev,
498 "%s: No host channel available for periodic transfer\n",
499 __func__);
500 return status;
501 }
502
503 status = dwc2_check_periodic_bandwidth(hsotg, qh);
504 }
505
506 if (status) {
507 dev_dbg(hsotg->dev,
508 "%s: Insufficient periodic bandwidth for periodic transfer\n",
509 __func__);
510 return status;
511 }
512
513 status = dwc2_check_max_xfer_size(hsotg, qh); 1356 status = dwc2_check_max_xfer_size(hsotg, qh);
514 if (status) { 1357 if (status) {
515 dev_dbg(hsotg->dev, 1358 dev_dbg(hsotg->dev,
@@ -518,6 +1361,35 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
518 return status; 1361 return status;
519 } 1362 }
520 1363
1364 /* Cancel pending unreserve; if canceled OK, unreserve was pending */
1365 if (del_timer(&qh->unreserve_timer))
1366 WARN_ON(!qh->unreserve_pending);
1367
1368 /*
1369 * Only need to reserve if there's not an unreserve pending, since if an
1370 * unreserve is pending then by definition our old reservation is still
1371 * valid. Unreserve might still be pending even if we didn't cancel if
1372 * dwc2_unreserve_timer_fn() already started. Code in the timer handles
1373 * that case.
1374 */
1375 if (!qh->unreserve_pending) {
1376 status = dwc2_do_reserve(hsotg, qh);
1377 if (status)
1378 return status;
1379 } else {
1380 /*
1381 * It might have been a while, so make sure that frame_number
1382 * is still good. Note: we could also try to use the similar
1383 * dwc2_next_periodic_start() but that schedules much more
1384 * tightly and we might need to hurry and queue things up.
1385 */
1386 if (dwc2_frame_num_le(qh->next_active_frame,
1387 hsotg->frame_number))
1388 dwc2_pick_first_frame(hsotg, qh);
1389 }
1390
1391 qh->unreserve_pending = 0;
1392
521 if (hsotg->core_params->dma_desc_enable > 0) 1393 if (hsotg->core_params->dma_desc_enable > 0)
522 /* Don't rely on SOF and start in ready schedule */ 1394 /* Don't rely on SOF and start in ready schedule */
523 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); 1395 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
@@ -526,14 +1398,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
526 list_add_tail(&qh->qh_list_entry, 1398 list_add_tail(&qh->qh_list_entry,
527 &hsotg->periodic_sched_inactive); 1399 &hsotg->periodic_sched_inactive);
528 1400
529 if (hsotg->core_params->uframe_sched <= 0) 1401 return 0;
530 /* Reserve periodic channel */
531 hsotg->periodic_channels++;
532
533 /* Update claimed usecs per (micro)frame */
534 hsotg->periodic_usecs += qh->usecs;
535
536 return status;
537} 1402}
538 1403
539/** 1404/**
@@ -546,25 +1411,231 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
546static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, 1411static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
547 struct dwc2_qh *qh) 1412 struct dwc2_qh *qh)
548{ 1413{
549 int i; 1414 bool did_modify;
1415
1416 assert_spin_locked(&hsotg->lock);
1417
1418 /*
1419 * Schedule the unreserve to happen in a little bit. Cases here:
1420 * - Unreserve worker might be sitting there waiting to grab the lock.
1421 * In this case it will notice it's been schedule again and will
1422 * quit.
1423 * - Unreserve worker might not be scheduled.
1424 *
1425 * We should never already be scheduled since dwc2_schedule_periodic()
1426 * should have canceled the scheduled unreserve timer (hence the
1427 * warning on did_modify).
1428 *
1429 * We add + 1 to the timer to guarantee that at least 1 jiffy has
1430 * passed (otherwise if the jiffy counter might tick right after we
1431 * read it and we'll get no delay).
1432 */
1433 did_modify = mod_timer(&qh->unreserve_timer,
1434 jiffies + DWC2_UNRESERVE_DELAY + 1);
1435 WARN_ON(did_modify);
1436 qh->unreserve_pending = 1;
550 1437
551 list_del_init(&qh->qh_list_entry); 1438 list_del_init(&qh->qh_list_entry);
1439}
552 1440
553 /* Update claimed usecs per (micro)frame */ 1441/**
554 hsotg->periodic_usecs -= qh->usecs; 1442 * dwc2_qh_init() - Initializes a QH structure
1443 *
1444 * @hsotg: The HCD state structure for the DWC OTG controller
1445 * @qh: The QH to init
1446 * @urb: Holds the information about the device/endpoint needed to initialize
1447 * the QH
1448 * @mem_flags: Flags for allocating memory.
1449 */
1450static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1451 struct dwc2_hcd_urb *urb, gfp_t mem_flags)
1452{
1453 int dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
1454 u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1455 bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info);
1456 bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC);
1457 bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT);
1458 u32 hprt = dwc2_readl(hsotg->regs + HPRT0);
1459 u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
1460 bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
1461 dev_speed != USB_SPEED_HIGH);
1462 int maxp = dwc2_hcd_get_mps(&urb->pipe_info);
1463 int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp);
1464 char *speed, *type;
555 1465
556 if (hsotg->core_params->uframe_sched > 0) { 1466 /* Initialize QH */
557 for (i = 0; i < 8; i++) { 1467 qh->hsotg = hsotg;
558 hsotg->frame_usecs[i] += qh->frame_usecs[i]; 1468 setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
559 qh->frame_usecs[i] = 0; 1469 (unsigned long)qh);
1470 qh->ep_type = ep_type;
1471 qh->ep_is_in = ep_is_in;
1472
1473 qh->data_toggle = DWC2_HC_PID_DATA0;
1474 qh->maxp = maxp;
1475 INIT_LIST_HEAD(&qh->qtd_list);
1476 INIT_LIST_HEAD(&qh->qh_list_entry);
1477
1478 qh->do_split = do_split;
1479 qh->dev_speed = dev_speed;
1480
1481 if (ep_is_int || ep_is_isoc) {
1482 /* Compute scheduling parameters once and save them */
1483 int host_speed = do_split ? USB_SPEED_HIGH : dev_speed;
1484 struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv,
1485 mem_flags,
1486 &qh->ttport);
1487 int device_ns;
1488
1489 qh->dwc_tt = dwc_tt;
1490
1491 qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
1492 ep_is_isoc, bytecount));
1493 device_ns = usb_calc_bus_time(dev_speed, ep_is_in,
1494 ep_is_isoc, bytecount);
1495
1496 if (do_split && dwc_tt)
1497 device_ns += dwc_tt->usb_tt->think_time;
1498 qh->device_us = NS_TO_US(device_ns);
1499
1500
1501 qh->device_interval = urb->interval;
1502 qh->host_interval = urb->interval * (do_split ? 8 : 1);
1503
1504 /*
1505 * Schedule low speed if we're running the host in low or
1506 * full speed OR if we've got a "TT" to deal with to access this
1507 * device.
1508 */
1509 qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
1510 dwc_tt;
1511
1512 if (do_split) {
1513 /* We won't know num transfers until we schedule */
1514 qh->num_hs_transfers = -1;
1515 } else if (dev_speed == USB_SPEED_HIGH) {
1516 qh->num_hs_transfers = 1;
1517 } else {
1518 qh->num_hs_transfers = 0;
560 } 1519 }
561 } else { 1520
562 /* Release periodic channel reservation */ 1521 /* We'll schedule later when we have something to do */
563 hsotg->periodic_channels--; 1522 }
1523
1524 switch (dev_speed) {
1525 case USB_SPEED_LOW:
1526 speed = "low";
1527 break;
1528 case USB_SPEED_FULL:
1529 speed = "full";
1530 break;
1531 case USB_SPEED_HIGH:
1532 speed = "high";
1533 break;
1534 default:
1535 speed = "?";
1536 break;
1537 }
1538
1539 switch (qh->ep_type) {
1540 case USB_ENDPOINT_XFER_ISOC:
1541 type = "isochronous";
1542 break;
1543 case USB_ENDPOINT_XFER_INT:
1544 type = "interrupt";
1545 break;
1546 case USB_ENDPOINT_XFER_CONTROL:
1547 type = "control";
1548 break;
1549 case USB_ENDPOINT_XFER_BULK:
1550 type = "bulk";
1551 break;
1552 default:
1553 type = "?";
1554 break;
1555 }
1556
1557 dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
1558 speed, bytecount);
1559 dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
1560 dwc2_hcd_get_dev_addr(&urb->pipe_info),
1561 dwc2_hcd_get_ep_num(&urb->pipe_info),
1562 ep_is_in ? "IN" : "OUT");
1563 if (ep_is_int || ep_is_isoc) {
1564 dwc2_sch_dbg(hsotg,
1565 "QH=%p ...duration: host=%d us, device=%d us\n",
1566 qh, qh->host_us, qh->device_us);
1567 dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n",
1568 qh, qh->host_interval, qh->device_interval);
1569 if (qh->schedule_low_speed)
1570 dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n",
1571 qh, dwc2_get_ls_map(hsotg, qh));
564 } 1572 }
565} 1573}
566 1574
567/** 1575/**
1576 * dwc2_hcd_qh_create() - Allocates and initializes a QH
1577 *
1578 * @hsotg: The HCD state structure for the DWC OTG controller
1579 * @urb: Holds the information about the device/endpoint needed
1580 * to initialize the QH
1581 * @atomic_alloc: Flag to do atomic allocation if needed
1582 *
1583 * Return: Pointer to the newly allocated QH, or NULL on error
1584 */
1585struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
1586 struct dwc2_hcd_urb *urb,
1587 gfp_t mem_flags)
1588{
1589 struct dwc2_qh *qh;
1590
1591 if (!urb->priv)
1592 return NULL;
1593
1594 /* Allocate memory */
1595 qh = kzalloc(sizeof(*qh), mem_flags);
1596 if (!qh)
1597 return NULL;
1598
1599 dwc2_qh_init(hsotg, qh, urb, mem_flags);
1600
1601 if (hsotg->core_params->dma_desc_enable > 0 &&
1602 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
1603 dwc2_hcd_qh_free(hsotg, qh);
1604 return NULL;
1605 }
1606
1607 return qh;
1608}
1609
1610/**
1611 * dwc2_hcd_qh_free() - Frees the QH
1612 *
1613 * @hsotg: HCD instance
1614 * @qh: The QH to free
1615 *
1616 * QH should already be removed from the list. QTD list should already be empty
1617 * if called from URB Dequeue.
1618 *
1619 * Must NOT be called with interrupt disabled or spinlock held
1620 */
1621void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1622{
1623 /* Make sure any unreserve work is finished. */
1624 if (del_timer_sync(&qh->unreserve_timer)) {
1625 unsigned long flags;
1626
1627 spin_lock_irqsave(&hsotg->lock, flags);
1628 dwc2_do_unreserve(hsotg, qh);
1629 spin_unlock_irqrestore(&hsotg->lock, flags);
1630 }
1631 dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
1632
1633 if (qh->desc_list)
1634 dwc2_hcd_qh_free_ddma(hsotg, qh);
1635 kfree(qh);
1636}
1637
1638/**
568 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic 1639 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
569 * schedule if it is not already in the schedule. If the QH is already in 1640 * schedule if it is not already in the schedule. If the QH is already in
570 * the schedule, no action is taken. 1641 * the schedule, no action is taken.
@@ -586,16 +1657,12 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
586 /* QH already in a schedule */ 1657 /* QH already in a schedule */
587 return 0; 1658 return 0;
588 1659
589 if (!dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number) &&
590 !hsotg->frame_number) {
591 dev_dbg(hsotg->dev,
592 "reset frame number counter\n");
593 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
594 SCHEDULE_SLOP);
595 }
596
597 /* Add the new QH to the appropriate schedule */ 1660 /* Add the new QH to the appropriate schedule */
598 if (dwc2_qh_is_non_per(qh)) { 1661 if (dwc2_qh_is_non_per(qh)) {
1662 /* Schedule right away */
1663 qh->start_active_frame = hsotg->frame_number;
1664 qh->next_active_frame = qh->start_active_frame;
1665
599 /* Always start in inactive schedule */ 1666 /* Always start in inactive schedule */
600 list_add_tail(&qh->qh_list_entry, 1667 list_add_tail(&qh->qh_list_entry,
601 &hsotg->non_periodic_sched_inactive); 1668 &hsotg->non_periodic_sched_inactive);
@@ -649,39 +1716,164 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
649 } 1716 }
650} 1717}
651 1718
652/* 1719/**
653 * Schedule the next continuing periodic split transfer 1720 * dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split.
1721 *
1722 * This is called for setting next_active_frame for periodic splits for all but
1723 * the first packet of the split. Confusing? I thought so...
1724 *
1725 * Periodic splits are single low/full speed transfers that we end up splitting
1726 * up into several high speed transfers. They always fit into one full (1 ms)
1727 * frame but might be split over several microframes (125 us each). We to put
1728 * each of the parts on a very specific high speed frame.
1729 *
1730 * This function figures out where the next active uFrame needs to be.
1731 *
1732 * @hsotg: The HCD state structure
1733 * @qh: QH for the periodic transfer.
1734 * @frame_number: The current frame number.
1735 *
1736 * Return: number missed by (or 0 if we didn't miss).
654 */ 1737 */
655static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg, 1738static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg,
656 struct dwc2_qh *qh, u16 frame_number, 1739 struct dwc2_qh *qh, u16 frame_number)
657 int sched_next_periodic_split)
658{ 1740{
1741 u16 old_frame = qh->next_active_frame;
1742 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
1743 int missed = 0;
659 u16 incr; 1744 u16 incr;
660 1745
661 if (sched_next_periodic_split) { 1746 /*
662 qh->sched_frame = frame_number; 1747 * See dwc2_uframe_schedule_split() for split scheduling.
663 incr = dwc2_frame_num_inc(qh->start_split_frame, 1); 1748 *
664 if (dwc2_frame_num_le(frame_number, incr)) { 1749 * Basically: increment 1 normally, but 2 right after the start split
665 /* 1750 * (except for ISOC out).
666 * Allow one frame to elapse after start split 1751 */
667 * microframe before scheduling complete split, but 1752 if (old_frame == qh->start_active_frame &&
668 * DON'T if we are doing the next start split in the 1753 !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
669 * same frame for an ISOC out 1754 incr = 2;
670 */ 1755 else
671 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC || 1756 incr = 1;
672 qh->ep_is_in != 0) { 1757
673 qh->sched_frame = 1758 qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
674 dwc2_frame_num_inc(qh->sched_frame, 1); 1759
675 } 1760 /*
676 } 1761 * Note that it's OK for frame_number to be 1 frame past
677 } else { 1762 * next_active_frame. Remember that next_active_frame is supposed to
678 qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame, 1763 * be 1 frame _before_ when we want to be scheduled. If we're 1 frame
679 qh->interval); 1764 * past it just means schedule ASAP.
680 if (dwc2_frame_num_le(qh->sched_frame, frame_number)) 1765 *
681 qh->sched_frame = frame_number; 1766 * It's _not_ OK, however, if we're more than one frame past.
682 qh->sched_frame |= 0x7; 1767 */
683 qh->start_split_frame = qh->sched_frame; 1768 if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
1769 /*
1770 * OOPS, we missed. That's actually pretty bad since
1771 * the hub will be unhappy; try ASAP I guess.
1772 */
1773 missed = dwc2_frame_num_dec(prev_frame_number,
1774 qh->next_active_frame);
1775 qh->next_active_frame = frame_number;
684 } 1776 }
1777
1778 return missed;
1779}
1780
1781/**
1782 * dwc2_next_periodic_start() - Set next_active_frame for next transfer start
1783 *
1784 * This is called for setting next_active_frame for a periodic transfer for
1785 * all cases other than midway through a periodic split. This will also update
1786 * start_active_frame.
1787 *
1788 * Since we _always_ keep start_active_frame as the start of the previous
1789 * transfer this is normally pretty easy: we just add our interval to
1790 * start_active_frame and we've got our answer.
1791 *
1792 * The tricks come into play if we miss. In that case we'll look for the next
1793 * slot we can fit into.
1794 *
1795 * @hsotg: The HCD state structure
1796 * @qh: QH for the periodic transfer.
1797 * @frame_number: The current frame number.
1798 *
1799 * Return: number missed by (or 0 if we didn't miss).
1800 */
1801static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg,
1802 struct dwc2_qh *qh, u16 frame_number)
1803{
1804 int missed = 0;
1805 u16 interval = qh->host_interval;
1806 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
1807
1808 qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
1809 interval);
1810
1811 /*
1812 * The dwc2_frame_num_gt() function used below won't work terribly well
1813 * with if we just incremented by a really large intervals since the
1814 * frame counter only goes to 0x3fff. It's terribly unlikely that we
1815 * will have missed in this case anyway. Just go to exit. If we want
1816 * to try to do better we'll need to keep track of a bigger counter
1817 * somewhere in the driver and handle overflows.
1818 */
1819 if (interval >= 0x1000)
1820 goto exit;
1821
1822 /*
1823 * Test for misses, which is when it's too late to schedule.
1824 *
1825 * A few things to note:
1826 * - We compare against prev_frame_number since start_active_frame
1827 * and next_active_frame are always 1 frame before we want things
1828 * to be active and we assume we can still get scheduled in the
1829 * current frame number.
1830 * - It's possible for start_active_frame (now incremented) to be
1831 * next_active_frame if we got an EO MISS (even_odd miss) which
1832 * basically means that we detected there wasn't enough time for
1833 * the last packet and dwc2_hc_set_even_odd_frame() rescheduled us
1834 * at the last second. We want to make sure we don't schedule
1835 * another transfer for the same frame. My test webcam doesn't seem
1836 * terribly upset by missing a transfer but really doesn't like when
1837 * we do two transfers in the same frame.
1838 * - Some misses are expected. Specifically, in order to work
1839 * perfectly dwc2 really needs quite spectacular interrupt latency
1840 * requirements. It needs to be able to handle its interrupts
1841 * completely within 125 us of them being asserted. That not only
1842 * means that the dwc2 interrupt handler needs to be fast but it
1843 * means that nothing else in the system has to block dwc2 for a long
1844 * time. We can help with the dwc2 parts of this, but it's hard to
1845 * guarantee that a system will have interrupt latency < 125 us, so
1846 * we have to be robust to some misses.
1847 */
1848 if (qh->start_active_frame == qh->next_active_frame ||
1849 dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
1850 u16 ideal_start = qh->start_active_frame;
1851 int periods_in_map;
1852
1853 /*
1854 * Adjust interval as per gcd with map size.
1855 * See pmap_schedule() for more details here.
1856 */
1857 if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
1858 periods_in_map = DWC2_HS_SCHEDULE_UFRAMES;
1859 else
1860 periods_in_map = DWC2_LS_SCHEDULE_FRAMES;
1861 interval = gcd(interval, periods_in_map);
1862
1863 do {
1864 qh->start_active_frame = dwc2_frame_num_inc(
1865 qh->start_active_frame, interval);
1866 } while (dwc2_frame_num_gt(prev_frame_number,
1867 qh->start_active_frame));
1868
1869 missed = dwc2_frame_num_dec(qh->start_active_frame,
1870 ideal_start);
1871 }
1872
1873exit:
1874 qh->next_active_frame = qh->start_active_frame;
1875
1876 return missed;
685} 1877}
686 1878
687/* 1879/*
@@ -700,7 +1892,9 @@ static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
700void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 1892void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
701 int sched_next_periodic_split) 1893 int sched_next_periodic_split)
702{ 1894{
1895 u16 old_frame = qh->next_active_frame;
703 u16 frame_number; 1896 u16 frame_number;
1897 int missed;
704 1898
705 if (dbg_qh(qh)) 1899 if (dbg_qh(qh))
706 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1900 dev_vdbg(hsotg->dev, "%s()\n", __func__);
@@ -713,33 +1907,44 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
713 return; 1907 return;
714 } 1908 }
715 1909
1910 /*
1911 * Use the real frame number rather than the cached value as of the
1912 * last SOF just to get us a little closer to reality. Note that
1913 * means we don't actually know if we've already handled the SOF
1914 * interrupt for this frame.
1915 */
716 frame_number = dwc2_hcd_get_frame_number(hsotg); 1916 frame_number = dwc2_hcd_get_frame_number(hsotg);
717 1917
718 if (qh->do_split) { 1918 if (sched_next_periodic_split)
719 dwc2_sched_periodic_split(hsotg, qh, frame_number, 1919 missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
720 sched_next_periodic_split); 1920 else
721 } else { 1921 missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
722 qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame, 1922
723 qh->interval); 1923 dwc2_sch_vdbg(hsotg,
724 if (dwc2_frame_num_le(qh->sched_frame, frame_number)) 1924 "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n",
725 qh->sched_frame = frame_number; 1925 qh, sched_next_periodic_split, frame_number, old_frame,
726 } 1926 qh->next_active_frame,
1927 dwc2_frame_num_dec(qh->next_active_frame, old_frame),
1928 missed, missed ? "MISS" : "");
727 1929
728 if (list_empty(&qh->qtd_list)) { 1930 if (list_empty(&qh->qtd_list)) {
729 dwc2_hcd_qh_unlink(hsotg, qh); 1931 dwc2_hcd_qh_unlink(hsotg, qh);
730 return; 1932 return;
731 } 1933 }
1934
732 /* 1935 /*
733 * Remove from periodic_sched_queued and move to 1936 * Remove from periodic_sched_queued and move to
734 * appropriate queue 1937 * appropriate queue
1938 *
1939 * Note: we purposely use the frame_number from the "hsotg" structure
1940 * since we know SOF interrupt will handle future frames.
735 */ 1941 */
736 if ((hsotg->core_params->uframe_sched > 0 && 1942 if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
737 dwc2_frame_num_le(qh->sched_frame, frame_number)) || 1943 list_move_tail(&qh->qh_list_entry,
738 (hsotg->core_params->uframe_sched <= 0 && 1944 &hsotg->periodic_sched_ready);
739 qh->sched_frame == frame_number))
740 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
741 else 1945 else
742 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive); 1946 list_move_tail(&qh->qh_list_entry,
1947 &hsotg->periodic_sched_inactive);
743} 1948}
744 1949
745/** 1950/**
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 690b9fd98b55..88629bed6614 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -126,10 +126,10 @@ static const struct dwc2_core_params params_rk3066 = {
126 .speed = -1, 126 .speed = -1,
127 .enable_dynamic_fifo = 1, 127 .enable_dynamic_fifo = 1,
128 .en_multiple_tx_fifo = -1, 128 .en_multiple_tx_fifo = -1,
129 .host_rx_fifo_size = 520, /* 520 DWORDs */ 129 .host_rx_fifo_size = 525, /* 525 DWORDs */
130 .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ 130 .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
131 .host_perio_tx_fifo_size = 256, /* 256 DWORDs */ 131 .host_perio_tx_fifo_size = 256, /* 256 DWORDs */
132 .max_transfer_size = 65535, 132 .max_transfer_size = -1,
133 .max_packet_count = -1, 133 .max_packet_count = -1,
134 .host_channels = -1, 134 .host_channels = -1,
135 .phy_type = -1, 135 .phy_type = -1,
@@ -149,6 +149,38 @@ static const struct dwc2_core_params params_rk3066 = {
149 .hibernation = -1, 149 .hibernation = -1,
150}; 150};
151 151
152static const struct dwc2_core_params params_ltq = {
153 .otg_cap = 2, /* non-HNP/non-SRP */
154 .otg_ver = -1,
155 .dma_enable = -1,
156 .dma_desc_enable = -1,
157 .dma_desc_fs_enable = -1,
158 .speed = -1,
159 .enable_dynamic_fifo = -1,
160 .en_multiple_tx_fifo = -1,
161 .host_rx_fifo_size = 288, /* 288 DWORDs */
162 .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
163 .host_perio_tx_fifo_size = 96, /* 96 DWORDs */
164 .max_transfer_size = 65535,
165 .max_packet_count = 511,
166 .host_channels = -1,
167 .phy_type = -1,
168 .phy_utmi_width = -1,
169 .phy_ulpi_ddr = -1,
170 .phy_ulpi_ext_vbus = -1,
171 .i2c_enable = -1,
172 .ulpi_fs_ls = -1,
173 .host_support_fs_ls_low_power = -1,
174 .host_ls_low_power_phy_clk = -1,
175 .ts_dline = -1,
176 .reload_ctl = -1,
177 .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
178 GAHBCFG_HBSTLEN_SHIFT,
179 .uframe_sched = -1,
180 .external_id_pin_ctl = -1,
181 .hibernation = -1,
182};
183
152/* 184/*
153 * Check the dr_mode against the module configuration and hardware 185 * Check the dr_mode against the module configuration and hardware
154 * capabilities. 186 * capabilities.
@@ -428,6 +460,8 @@ static const struct of_device_id dwc2_of_match_table[] = {
428 { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 }, 460 { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
429 { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 }, 461 { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
430 { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 }, 462 { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
463 { .compatible = "lantiq,arx100-usb", .data = &params_ltq },
464 { .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
431 { .compatible = "snps,dwc2", .data = NULL }, 465 { .compatible = "snps,dwc2", .data = NULL },
432 { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, 466 { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
433 {}, 467 {},
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index de5e01f41bc2..17fd81447c9f 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -962,10 +962,6 @@ static int dwc3_probe(struct platform_device *pdev)
962 fladj = pdata->fladj_value; 962 fladj = pdata->fladj_value;
963 } 963 }
964 964
965 /* default to superspeed if no maximum_speed passed */
966 if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
967 dwc->maximum_speed = USB_SPEED_SUPER;
968
969 dwc->lpm_nyet_threshold = lpm_nyet_threshold; 965 dwc->lpm_nyet_threshold = lpm_nyet_threshold;
970 dwc->tx_de_emphasis = tx_de_emphasis; 966 dwc->tx_de_emphasis = tx_de_emphasis;
971 967
@@ -1016,6 +1012,33 @@ static int dwc3_probe(struct platform_device *pdev)
1016 goto err1; 1012 goto err1;
1017 } 1013 }
1018 1014
1015 /* Check the maximum_speed parameter */
1016 switch (dwc->maximum_speed) {
1017 case USB_SPEED_LOW:
1018 case USB_SPEED_FULL:
1019 case USB_SPEED_HIGH:
1020 case USB_SPEED_SUPER:
1021 case USB_SPEED_SUPER_PLUS:
1022 break;
1023 default:
1024 dev_err(dev, "invalid maximum_speed parameter %d\n",
1025 dwc->maximum_speed);
1026 /* fall through */
1027 case USB_SPEED_UNKNOWN:
1028 /* default to superspeed */
1029 dwc->maximum_speed = USB_SPEED_SUPER;
1030
1031 /*
1032 * default to superspeed plus if we are capable.
1033 */
1034 if (dwc3_is_usb31(dwc) &&
1035 (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
1036 DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
1037 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1038
1039 break;
1040 }
1041
1019 /* Adjust Frame Length */ 1042 /* Adjust Frame Length */
1020 dwc3_frame_length_adjustment(dwc, fladj); 1043 dwc3_frame_length_adjustment(dwc, fladj);
1021 1044
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e4f8b90d9627..6254b2ff9080 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -223,7 +223,8 @@
223/* Global HWPARAMS3 Register */ 223/* Global HWPARAMS3 Register */
224#define DWC3_GHWPARAMS3_SSPHY_IFC(n) ((n) & 3) 224#define DWC3_GHWPARAMS3_SSPHY_IFC(n) ((n) & 3)
225#define DWC3_GHWPARAMS3_SSPHY_IFC_DIS 0 225#define DWC3_GHWPARAMS3_SSPHY_IFC_DIS 0
226#define DWC3_GHWPARAMS3_SSPHY_IFC_ENA 1 226#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN1 1
227#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN2 2 /* DWC_usb31 only */
227#define DWC3_GHWPARAMS3_HSPHY_IFC(n) (((n) & (3 << 2)) >> 2) 228#define DWC3_GHWPARAMS3_HSPHY_IFC(n) (((n) & (3 << 2)) >> 2)
228#define DWC3_GHWPARAMS3_HSPHY_IFC_DIS 0 229#define DWC3_GHWPARAMS3_HSPHY_IFC_DIS 0
229#define DWC3_GHWPARAMS3_HSPHY_IFC_UTMI 1 230#define DWC3_GHWPARAMS3_HSPHY_IFC_UTMI 1
@@ -249,6 +250,7 @@
249#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) 250#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
250 251
251#define DWC3_DCFG_SPEED_MASK (7 << 0) 252#define DWC3_DCFG_SPEED_MASK (7 << 0)
253#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
252#define DWC3_DCFG_SUPERSPEED (4 << 0) 254#define DWC3_DCFG_SUPERSPEED (4 << 0)
253#define DWC3_DCFG_HIGHSPEED (0 << 0) 255#define DWC3_DCFG_HIGHSPEED (0 << 0)
254#define DWC3_DCFG_FULLSPEED2 (1 << 0) 256#define DWC3_DCFG_FULLSPEED2 (1 << 0)
@@ -339,6 +341,7 @@
339 341
340#define DWC3_DSTS_CONNECTSPD (7 << 0) 342#define DWC3_DSTS_CONNECTSPD (7 << 0)
341 343
344#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
342#define DWC3_DSTS_SUPERSPEED (4 << 0) 345#define DWC3_DSTS_SUPERSPEED (4 << 0)
343#define DWC3_DSTS_HIGHSPEED (0 << 0) 346#define DWC3_DSTS_HIGHSPEED (0 << 0)
344#define DWC3_DSTS_FULLSPEED2 (1 << 0) 347#define DWC3_DSTS_FULLSPEED2 (1 << 0)
@@ -1024,6 +1027,12 @@ struct dwc3_gadget_ep_cmd_params {
1024void dwc3_set_mode(struct dwc3 *dwc, u32 mode); 1027void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
1025int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc); 1028int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
1026 1029
1030/* check whether we are on the DWC_usb31 core */
1031static inline bool dwc3_is_usb31(struct dwc3 *dwc)
1032{
1033 return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
1034}
1035
1027#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) 1036#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
1028int dwc3_host_init(struct dwc3 *dwc); 1037int dwc3_host_init(struct dwc3 *dwc);
1029void dwc3_host_exit(struct dwc3 *dwc); 1038void dwc3_host_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 8d6b75c2f53b..eca2e6d8e041 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -356,7 +356,8 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
356 */ 356 */
357 usb_status |= dwc->gadget.is_selfpowered; 357 usb_status |= dwc->gadget.is_selfpowered;
358 358
359 if (dwc->speed == DWC3_DSTS_SUPERSPEED) { 359 if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
360 (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
360 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 361 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
361 if (reg & DWC3_DCTL_INITU1ENA) 362 if (reg & DWC3_DCTL_INITU1ENA)
362 usb_status |= 1 << USB_DEV_STAT_U1_ENABLED; 363 usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
@@ -426,7 +427,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
426 case USB_DEVICE_U1_ENABLE: 427 case USB_DEVICE_U1_ENABLE:
427 if (state != USB_STATE_CONFIGURED) 428 if (state != USB_STATE_CONFIGURED)
428 return -EINVAL; 429 return -EINVAL;
429 if (dwc->speed != DWC3_DSTS_SUPERSPEED) 430 if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
431 (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
430 return -EINVAL; 432 return -EINVAL;
431 433
432 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 434 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -440,7 +442,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
440 case USB_DEVICE_U2_ENABLE: 442 case USB_DEVICE_U2_ENABLE:
441 if (state != USB_STATE_CONFIGURED) 443 if (state != USB_STATE_CONFIGURED)
442 return -EINVAL; 444 return -EINVAL;
443 if (dwc->speed != DWC3_DSTS_SUPERSPEED) 445 if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
446 (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
444 return -EINVAL; 447 return -EINVAL;
445 448
446 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 449 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2363bad45af8..3ac170f9d94d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -463,7 +463,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
463 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 463 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
464 464
465 /* Burst size is only needed in SuperSpeed mode */ 465 /* Burst size is only needed in SuperSpeed mode */
466 if (dwc->gadget.speed == USB_SPEED_SUPER) { 466 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
467 u32 burst = dep->endpoint.maxburst - 1; 467 u32 burst = dep->endpoint.maxburst - 1;
468 468
469 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 469 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
@@ -1441,7 +1441,8 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
1441 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1441 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1442 1442
1443 speed = reg & DWC3_DSTS_CONNECTSPD; 1443 speed = reg & DWC3_DSTS_CONNECTSPD;
1444 if (speed == DWC3_DSTS_SUPERSPEED) { 1444 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1445 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
1445 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n"); 1446 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n");
1446 ret = -EINVAL; 1447 ret = -EINVAL;
1447 goto out; 1448 goto out;
@@ -1666,10 +1667,16 @@ static int dwc3_gadget_start(struct usb_gadget *g,
1666 case USB_SPEED_HIGH: 1667 case USB_SPEED_HIGH:
1667 reg |= DWC3_DSTS_HIGHSPEED; 1668 reg |= DWC3_DSTS_HIGHSPEED;
1668 break; 1669 break;
1669 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1670 case USB_SPEED_SUPER_PLUS:
1670 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1671 reg |= DWC3_DSTS_SUPERSPEED_PLUS;
1672 break;
1671 default: 1673 default:
1672 reg |= DWC3_DSTS_SUPERSPEED; 1674 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1675 dwc->maximum_speed);
1676 /* fall through */
1677 case USB_SPEED_SUPER:
1678 reg |= DWC3_DCFG_SUPERSPEED;
1679 break;
1673 } 1680 }
1674 } 1681 }
1675 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1682 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
@@ -2340,7 +2347,8 @@ static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2340 * this. Maybe it becomes part of the power saving plan. 2347 * this. Maybe it becomes part of the power saving plan.
2341 */ 2348 */
2342 2349
2343 if (speed != DWC3_DSTS_SUPERSPEED) 2350 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2351 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
2344 return; 2352 return;
2345 2353
2346 /* 2354 /*
@@ -2369,6 +2377,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2369 dwc3_update_ram_clk_sel(dwc, speed); 2377 dwc3_update_ram_clk_sel(dwc, speed);
2370 2378
2371 switch (speed) { 2379 switch (speed) {
2380 case DWC3_DCFG_SUPERSPEED_PLUS:
2381 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2382 dwc->gadget.ep0->maxpacket = 512;
2383 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2384 break;
2372 case DWC3_DCFG_SUPERSPEED: 2385 case DWC3_DCFG_SUPERSPEED:
2373 /* 2386 /*
2374 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2387 * WORKAROUND: DWC3 revisions <1.90a have an issue which
@@ -2410,8 +2423,9 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2410 2423
2411 /* Enable USB2 LPM Capability */ 2424 /* Enable USB2 LPM Capability */
2412 2425
2413 if ((dwc->revision > DWC3_REVISION_194A) 2426 if ((dwc->revision > DWC3_REVISION_194A) &&
2414 && (speed != DWC3_DCFG_SUPERSPEED)) { 2427 (speed != DWC3_DCFG_SUPERSPEED) &&
2428 (speed != DWC3_DCFG_SUPERSPEED_PLUS)) {
2415 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2429 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2416 reg |= DWC3_DCFG_LPM_CAP; 2430 reg |= DWC3_DCFG_LPM_CAP;
2417 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2431 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 8b14c2a13ac5..a5c62093c26c 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -54,6 +54,36 @@ static struct usb_gadget_strings **get_containers_gs(
54} 54}
55 55
56/** 56/**
57 * function_descriptors() - get function descriptors for speed
58 * @f: the function
59 * @speed: the speed
60 *
61 * Returns the descriptors or NULL if not set.
62 */
63static struct usb_descriptor_header **
64function_descriptors(struct usb_function *f,
65 enum usb_device_speed speed)
66{
67 struct usb_descriptor_header **descriptors;
68
69 switch (speed) {
70 case USB_SPEED_SUPER_PLUS:
71 descriptors = f->ssp_descriptors;
72 break;
73 case USB_SPEED_SUPER:
74 descriptors = f->ss_descriptors;
75 break;
76 case USB_SPEED_HIGH:
77 descriptors = f->hs_descriptors;
78 break;
79 default:
80 descriptors = f->fs_descriptors;
81 }
82
83 return descriptors;
84}
85
86/**
57 * next_ep_desc() - advance to the next EP descriptor 87 * next_ep_desc() - advance to the next EP descriptor
58 * @t: currect pointer within descriptor array 88 * @t: currect pointer within descriptor array
59 * 89 *
@@ -118,6 +148,13 @@ int config_ep_by_speed(struct usb_gadget *g,
118 148
119 /* select desired speed */ 149 /* select desired speed */
120 switch (g->speed) { 150 switch (g->speed) {
151 case USB_SPEED_SUPER_PLUS:
152 if (gadget_is_superspeed_plus(g)) {
153 speed_desc = f->ssp_descriptors;
154 want_comp_desc = 1;
155 break;
156 }
157 /* else: Fall trough */
121 case USB_SPEED_SUPER: 158 case USB_SPEED_SUPER:
122 if (gadget_is_superspeed(g)) { 159 if (gadget_is_superspeed(g)) {
123 speed_desc = f->ss_descriptors; 160 speed_desc = f->ss_descriptors;
@@ -161,7 +198,7 @@ ep_found:
161 (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP)) 198 (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP))
162 return -EIO; 199 return -EIO;
163 _ep->comp_desc = comp_desc; 200 _ep->comp_desc = comp_desc;
164 if (g->speed == USB_SPEED_SUPER) { 201 if (g->speed >= USB_SPEED_SUPER) {
165 switch (usb_endpoint_type(_ep->desc)) { 202 switch (usb_endpoint_type(_ep->desc)) {
166 case USB_ENDPOINT_XFER_ISOC: 203 case USB_ENDPOINT_XFER_ISOC:
167 /* mult: bits 1:0 of bmAttributes */ 204 /* mult: bits 1:0 of bmAttributes */
@@ -237,6 +274,8 @@ int usb_add_function(struct usb_configuration *config,
237 config->highspeed = true; 274 config->highspeed = true;
238 if (!config->superspeed && function->ss_descriptors) 275 if (!config->superspeed && function->ss_descriptors)
239 config->superspeed = true; 276 config->superspeed = true;
277 if (!config->superspeed_plus && function->ssp_descriptors)
278 config->superspeed_plus = true;
240 279
241done: 280done:
242 if (value) 281 if (value)
@@ -417,17 +456,7 @@ static int config_buf(struct usb_configuration *config,
417 list_for_each_entry(f, &config->functions, list) { 456 list_for_each_entry(f, &config->functions, list) {
418 struct usb_descriptor_header **descriptors; 457 struct usb_descriptor_header **descriptors;
419 458
420 switch (speed) { 459 descriptors = function_descriptors(f, speed);
421 case USB_SPEED_SUPER:
422 descriptors = f->ss_descriptors;
423 break;
424 case USB_SPEED_HIGH:
425 descriptors = f->hs_descriptors;
426 break;
427 default:
428 descriptors = f->fs_descriptors;
429 }
430
431 if (!descriptors) 460 if (!descriptors)
432 continue; 461 continue;
433 status = usb_descriptor_fillbuf(next, len, 462 status = usb_descriptor_fillbuf(next, len,
@@ -451,7 +480,7 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
451 u8 type = w_value >> 8; 480 u8 type = w_value >> 8;
452 enum usb_device_speed speed = USB_SPEED_UNKNOWN; 481 enum usb_device_speed speed = USB_SPEED_UNKNOWN;
453 482
454 if (gadget->speed == USB_SPEED_SUPER) 483 if (gadget->speed >= USB_SPEED_SUPER)
455 speed = gadget->speed; 484 speed = gadget->speed;
456 else if (gadget_is_dualspeed(gadget)) { 485 else if (gadget_is_dualspeed(gadget)) {
457 int hs = 0; 486 int hs = 0;
@@ -482,6 +511,10 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
482check_config: 511check_config:
483 /* ignore configs that won't work at this speed */ 512 /* ignore configs that won't work at this speed */
484 switch (speed) { 513 switch (speed) {
514 case USB_SPEED_SUPER_PLUS:
515 if (!c->superspeed_plus)
516 continue;
517 break;
485 case USB_SPEED_SUPER: 518 case USB_SPEED_SUPER:
486 if (!c->superspeed) 519 if (!c->superspeed)
487 continue; 520 continue;
@@ -509,18 +542,24 @@ static int count_configs(struct usb_composite_dev *cdev, unsigned type)
509 unsigned count = 0; 542 unsigned count = 0;
510 int hs = 0; 543 int hs = 0;
511 int ss = 0; 544 int ss = 0;
545 int ssp = 0;
512 546
513 if (gadget_is_dualspeed(gadget)) { 547 if (gadget_is_dualspeed(gadget)) {
514 if (gadget->speed == USB_SPEED_HIGH) 548 if (gadget->speed == USB_SPEED_HIGH)
515 hs = 1; 549 hs = 1;
516 if (gadget->speed == USB_SPEED_SUPER) 550 if (gadget->speed == USB_SPEED_SUPER)
517 ss = 1; 551 ss = 1;
552 if (gadget->speed == USB_SPEED_SUPER_PLUS)
553 ssp = 1;
518 if (type == USB_DT_DEVICE_QUALIFIER) 554 if (type == USB_DT_DEVICE_QUALIFIER)
519 hs = !hs; 555 hs = !hs;
520 } 556 }
521 list_for_each_entry(c, &cdev->configs, list) { 557 list_for_each_entry(c, &cdev->configs, list) {
522 /* ignore configs that won't work at this speed */ 558 /* ignore configs that won't work at this speed */
523 if (ss) { 559 if (ssp) {
560 if (!c->superspeed_plus)
561 continue;
562 } else if (ss) {
524 if (!c->superspeed) 563 if (!c->superspeed)
525 continue; 564 continue;
526 } else if (hs) { 565 } else if (hs) {
@@ -597,6 +636,48 @@ static int bos_desc(struct usb_composite_dev *cdev)
597 ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat; 636 ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
598 ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat; 637 ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
599 638
639 /* The SuperSpeedPlus USB Device Capability descriptor */
640 if (gadget_is_superspeed_plus(cdev->gadget)) {
641 struct usb_ssp_cap_descriptor *ssp_cap;
642
643 ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
644 bos->bNumDeviceCaps++;
645
646 /*
647 * Report typical values.
648 */
649
650 le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(1));
651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
654
655 /* SSAC = 1 (2 attributes) */
656 ssp_cap->bmAttributes = cpu_to_le32(1);
657
658 /* Min RX/TX Lane Count = 1 */
659 ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12);
660
661 /*
662 * bmSublinkSpeedAttr[0]:
663 * ST = Symmetric, RX
664 * LSE = 3 (Gbps)
665 * LP = 1 (SuperSpeedPlus)
666 * LSM = 10 (10 Gbps)
667 */
668 ssp_cap->bmSublinkSpeedAttr[0] =
669 (3 << 4) | (1 << 14) | (0xa << 16);
670 /*
671 * bmSublinkSpeedAttr[1] =
672 * ST = Symmetric, TX
673 * LSE = 3 (Gbps)
674 * LP = 1 (SuperSpeedPlus)
675 * LSM = 10 (10 Gbps)
676 */
677 ssp_cap->bmSublinkSpeedAttr[1] =
678 (3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7);
679 }
680
600 return le16_to_cpu(bos->wTotalLength); 681 return le16_to_cpu(bos->wTotalLength);
601} 682}
602 683
@@ -690,16 +771,7 @@ static int set_config(struct usb_composite_dev *cdev,
690 * function's setup callback instead of the current 771 * function's setup callback instead of the current
691 * configuration's setup callback. 772 * configuration's setup callback.
692 */ 773 */
693 switch (gadget->speed) { 774 descriptors = function_descriptors(f, gadget->speed);
694 case USB_SPEED_SUPER:
695 descriptors = f->ss_descriptors;
696 break;
697 case USB_SPEED_HIGH:
698 descriptors = f->hs_descriptors;
699 break;
700 default:
701 descriptors = f->fs_descriptors;
702 }
703 775
704 for (; *descriptors; ++descriptors) { 776 for (; *descriptors; ++descriptors) {
705 struct usb_endpoint_descriptor *ep; 777 struct usb_endpoint_descriptor *ep;
@@ -819,8 +891,9 @@ int usb_add_config(struct usb_composite_dev *cdev,
819 } else { 891 } else {
820 unsigned i; 892 unsigned i;
821 893
822 DBG(cdev, "cfg %d/%p speeds:%s%s%s\n", 894 DBG(cdev, "cfg %d/%p speeds:%s%s%s%s\n",
823 config->bConfigurationValue, config, 895 config->bConfigurationValue, config,
896 config->superspeed_plus ? " superplus" : "",
824 config->superspeed ? " super" : "", 897 config->superspeed ? " super" : "",
825 config->highspeed ? " high" : "", 898 config->highspeed ? " high" : "",
826 config->fullspeed 899 config->fullspeed
@@ -1499,7 +1572,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1499 cdev->gadget->ep0->maxpacket; 1572 cdev->gadget->ep0->maxpacket;
1500 if (gadget_is_superspeed(gadget)) { 1573 if (gadget_is_superspeed(gadget)) {
1501 if (gadget->speed >= USB_SPEED_SUPER) { 1574 if (gadget->speed >= USB_SPEED_SUPER) {
1502 cdev->desc.bcdUSB = cpu_to_le16(0x0300); 1575 cdev->desc.bcdUSB = cpu_to_le16(0x0310);
1503 cdev->desc.bMaxPacketSize0 = 9; 1576 cdev->desc.bMaxPacketSize0 = 9;
1504 } else { 1577 } else {
1505 cdev->desc.bcdUSB = cpu_to_le16(0x0210); 1578 cdev->desc.bcdUSB = cpu_to_le16(0x0210);
@@ -1634,15 +1707,24 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1634 *((u8 *)req->buf) = value; 1707 *((u8 *)req->buf) = value;
1635 value = min(w_length, (u16) 1); 1708 value = min(w_length, (u16) 1);
1636 break; 1709 break;
1637
1638 /*
1639 * USB 3.0 additions:
1640 * Function driver should handle get_status request. If such cb
1641 * wasn't supplied we respond with default value = 0
1642 * Note: function driver should supply such cb only for the first
1643 * interface of the function
1644 */
1645 case USB_REQ_GET_STATUS: 1710 case USB_REQ_GET_STATUS:
1711 if (gadget_is_otg(gadget) && gadget->hnp_polling_support &&
1712 (w_index == OTG_STS_SELECTOR)) {
1713 if (ctrl->bRequestType != (USB_DIR_IN |
1714 USB_RECIP_DEVICE))
1715 goto unknown;
1716 *((u8 *)req->buf) = gadget->host_request_flag;
1717 value = 1;
1718 break;
1719 }
1720
1721 /*
1722 * USB 3.0 additions:
1723 * Function driver should handle get_status request. If such cb
1724 * wasn't supplied we respond with default value = 0
1725 * Note: function driver should supply such cb only for the
1726 * first interface of the function
1727 */
1646 if (!gadget_is_superspeed(gadget)) 1728 if (!gadget_is_superspeed(gadget))
1647 goto unknown; 1729 goto unknown;
1648 if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE)) 1730 if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 0fafa7a1b6f6..e6c0542a063b 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -163,7 +163,8 @@ EXPORT_SYMBOL_GPL(usb_copy_descriptors);
163int usb_assign_descriptors(struct usb_function *f, 163int usb_assign_descriptors(struct usb_function *f,
164 struct usb_descriptor_header **fs, 164 struct usb_descriptor_header **fs,
165 struct usb_descriptor_header **hs, 165 struct usb_descriptor_header **hs,
166 struct usb_descriptor_header **ss) 166 struct usb_descriptor_header **ss,
167 struct usb_descriptor_header **ssp)
167{ 168{
168 struct usb_gadget *g = f->config->cdev->gadget; 169 struct usb_gadget *g = f->config->cdev->gadget;
169 170
@@ -182,6 +183,11 @@ int usb_assign_descriptors(struct usb_function *f,
182 if (!f->ss_descriptors) 183 if (!f->ss_descriptors)
183 goto err; 184 goto err;
184 } 185 }
186 if (ssp && gadget_is_superspeed_plus(g)) {
187 f->ssp_descriptors = usb_copy_descriptors(ssp);
188 if (!f->ssp_descriptors)
189 goto err;
190 }
185 return 0; 191 return 0;
186err: 192err:
187 usb_free_all_descriptors(f); 193 usb_free_all_descriptors(f);
@@ -194,6 +200,7 @@ void usb_free_all_descriptors(struct usb_function *f)
194 usb_free_descriptors(f->fs_descriptors); 200 usb_free_descriptors(f->fs_descriptors);
195 usb_free_descriptors(f->hs_descriptors); 201 usb_free_descriptors(f->hs_descriptors);
196 usb_free_descriptors(f->ss_descriptors); 202 usb_free_descriptors(f->ss_descriptors);
203 usb_free_descriptors(f->ssp_descriptors);
197} 204}
198EXPORT_SYMBOL_GPL(usb_free_all_descriptors); 205EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
199 206
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 590c44989e5e..c6cc15ebeed6 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1229,6 +1229,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
1229 } 1229 }
1230 c->next_interface_id = 0; 1230 c->next_interface_id = 0;
1231 memset(c->interface, 0, sizeof(c->interface)); 1231 memset(c->interface, 0, sizeof(c->interface));
1232 c->superspeed_plus = 0;
1232 c->superspeed = 0; 1233 c->superspeed = 0;
1233 c->highspeed = 0; 1234 c->highspeed = 0;
1234 c->fullspeed = 0; 1235 c->fullspeed = 0;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 2fa1e80a3ce7..a30766ca4226 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -685,7 +685,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
685 acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress; 685 acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress;
686 686
687 status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function, 687 status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
688 acm_ss_function); 688 acm_ss_function, NULL);
689 if (status) 689 if (status)
690 goto fail; 690 goto fail;
691 691
@@ -777,10 +777,10 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
777 return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num); 777 return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
778} 778}
779 779
780CONFIGFS_ATTR_RO(f_acm_port_, num); 780CONFIGFS_ATTR_RO(f_acm_, port_num);
781 781
782static struct configfs_attribute *acm_attrs[] = { 782static struct configfs_attribute *acm_attrs[] = {
783 &f_acm_port_attr_num, 783 &f_acm_attr_port_num,
784 NULL, 784 NULL,
785}; 785};
786 786
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 7ad60ee41914..4c488d15b6f6 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -786,7 +786,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
786 fs_ecm_notify_desc.bEndpointAddress; 786 fs_ecm_notify_desc.bEndpointAddress;
787 787
788 status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function, 788 status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
789 ecm_ss_function); 789 ecm_ss_function, NULL);
790 if (status) 790 if (status)
791 goto fail; 791 goto fail;
792 792
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index cad35a502d3f..d58bfc32be9e 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -309,7 +309,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
309 eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; 309 eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
310 310
311 status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function, 311 status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
312 eem_ss_function); 312 eem_ss_function, NULL);
313 if (status) 313 if (status)
314 goto fail; 314 goto fail;
315 315
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index cf43e9e18368..8cfce105c7ee 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -684,44 +684,38 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
684static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) 684static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
685{ 685{
686 struct ffs_epfile *epfile = file->private_data; 686 struct ffs_epfile *epfile = file->private_data;
687 struct usb_request *req;
687 struct ffs_ep *ep; 688 struct ffs_ep *ep;
688 char *data = NULL; 689 char *data = NULL;
689 ssize_t ret, data_len = -EINVAL; 690 ssize_t ret, data_len = -EINVAL;
690 int halt; 691 int halt;
691 692
692 /* Are we still active? */ 693 /* Are we still active? */
693 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) { 694 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
694 ret = -ENODEV; 695 return -ENODEV;
695 goto error;
696 }
697 696
698 /* Wait for endpoint to be enabled */ 697 /* Wait for endpoint to be enabled */
699 ep = epfile->ep; 698 ep = epfile->ep;
700 if (!ep) { 699 if (!ep) {
701 if (file->f_flags & O_NONBLOCK) { 700 if (file->f_flags & O_NONBLOCK)
702 ret = -EAGAIN; 701 return -EAGAIN;
703 goto error;
704 }
705 702
706 ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep)); 703 ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
707 if (ret) { 704 if (ret)
708 ret = -EINTR; 705 return -EINTR;
709 goto error;
710 }
711 } 706 }
712 707
713 /* Do we halt? */ 708 /* Do we halt? */
714 halt = (!io_data->read == !epfile->in); 709 halt = (!io_data->read == !epfile->in);
715 if (halt && epfile->isoc) { 710 if (halt && epfile->isoc)
716 ret = -EINVAL; 711 return -EINVAL;
717 goto error;
718 }
719 712
720 /* Allocate & copy */ 713 /* Allocate & copy */
721 if (!halt) { 714 if (!halt) {
722 /* 715 /*
723 * if we _do_ wait above, the epfile->ffs->gadget might be NULL 716 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
724 * before the waiting completes, so do not assign to 'gadget' earlier 717 * before the waiting completes, so do not assign to 'gadget'
718 * earlier
725 */ 719 */
726 struct usb_gadget *gadget = epfile->ffs->gadget; 720 struct usb_gadget *gadget = epfile->ffs->gadget;
727 size_t copied; 721 size_t copied;
@@ -763,17 +757,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
763 if (epfile->ep != ep) { 757 if (epfile->ep != ep) {
764 /* In the meantime, endpoint got disabled or changed. */ 758 /* In the meantime, endpoint got disabled or changed. */
765 ret = -ESHUTDOWN; 759 ret = -ESHUTDOWN;
766 spin_unlock_irq(&epfile->ffs->eps_lock);
767 } else if (halt) { 760 } else if (halt) {
768 /* Halt */ 761 /* Halt */
769 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep)) 762 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
770 usb_ep_set_halt(ep->ep); 763 usb_ep_set_halt(ep->ep);
771 spin_unlock_irq(&epfile->ffs->eps_lock);
772 ret = -EBADMSG; 764 ret = -EBADMSG;
773 } else { 765 } else if (unlikely(data_len == -EINVAL)) {
774 /* Fire the request */
775 struct usb_request *req;
776
777 /* 766 /*
778 * Sanity Check: even though data_len can't be used 767 * Sanity Check: even though data_len can't be used
779 * uninitialized at the time I write this comment, some 768 * uninitialized at the time I write this comment, some
@@ -785,80 +774,80 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
785 * For such reason, we're adding this redundant sanity check 774 * For such reason, we're adding this redundant sanity check
786 * here. 775 * here.
787 */ 776 */
788 if (unlikely(data_len == -EINVAL)) { 777 WARN(1, "%s: data_len == -EINVAL\n", __func__);
789 WARN(1, "%s: data_len == -EINVAL\n", __func__); 778 ret = -EINVAL;
790 ret = -EINVAL; 779 } else if (!io_data->aio) {
791 goto error_lock; 780 DECLARE_COMPLETION_ONSTACK(done);
792 } 781 bool interrupted = false;
793
794 if (io_data->aio) {
795 req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
796 if (unlikely(!req))
797 goto error_lock;
798
799 req->buf = data;
800 req->length = data_len;
801 782
802 io_data->buf = data; 783 req = ep->req;
803 io_data->ep = ep->ep; 784 req->buf = data;
804 io_data->req = req; 785 req->length = data_len;
805 io_data->ffs = epfile->ffs;
806 786
807 req->context = io_data; 787 req->context = &done;
808 req->complete = ffs_epfile_async_io_complete; 788 req->complete = ffs_epfile_io_complete;
809 789
810 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); 790 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
811 if (unlikely(ret)) { 791 if (unlikely(ret < 0))
812 usb_ep_free_request(ep->ep, req); 792 goto error_lock;
813 goto error_lock;
814 }
815 ret = -EIOCBQUEUED;
816 793
817 spin_unlock_irq(&epfile->ffs->eps_lock); 794 spin_unlock_irq(&epfile->ffs->eps_lock);
818 } else {
819 DECLARE_COMPLETION_ONSTACK(done);
820 795
821 req = ep->req; 796 if (unlikely(wait_for_completion_interruptible(&done))) {
822 req->buf = data; 797 /*
823 req->length = data_len; 798 * To avoid race condition with ffs_epfile_io_complete,
799 * dequeue the request first then check
800 * status. usb_ep_dequeue API should guarantee no race
801 * condition with req->complete callback.
802 */
803 usb_ep_dequeue(ep->ep, req);
804 interrupted = ep->status < 0;
805 }
824 806
825 req->context = &done; 807 /*
826 req->complete = ffs_epfile_io_complete; 808 * XXX We may end up silently droping data here. Since data_len
809 * (i.e. req->length) may be bigger than len (after being
810 * rounded up to maxpacketsize), we may end up with more data
811 * then user space has space for.
812 */
813 ret = interrupted ? -EINTR : ep->status;
814 if (io_data->read && ret > 0) {
815 ret = copy_to_iter(data, ret, &io_data->data);
816 if (!ret)
817 ret = -EFAULT;
818 }
819 goto error_mutex;
820 } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
821 ret = -ENOMEM;
822 } else {
823 req->buf = data;
824 req->length = data_len;
827 825
828 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); 826 io_data->buf = data;
827 io_data->ep = ep->ep;
828 io_data->req = req;
829 io_data->ffs = epfile->ffs;
829 830
830 spin_unlock_irq(&epfile->ffs->eps_lock); 831 req->context = io_data;
832 req->complete = ffs_epfile_async_io_complete;
831 833
832 if (unlikely(ret < 0)) { 834 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
833 /* nop */ 835 if (unlikely(ret)) {
834 } else if (unlikely( 836 usb_ep_free_request(ep->ep, req);
835 wait_for_completion_interruptible(&done))) { 837 goto error_lock;
836 ret = -EINTR;
837 usb_ep_dequeue(ep->ep, req);
838 } else {
839 /*
840 * XXX We may end up silently droping data
841 * here. Since data_len (i.e. req->length) may
842 * be bigger than len (after being rounded up
843 * to maxpacketsize), we may end up with more
844 * data then user space has space for.
845 */
846 ret = ep->status;
847 if (io_data->read && ret > 0) {
848 ret = copy_to_iter(data, ret, &io_data->data);
849 if (!ret)
850 ret = -EFAULT;
851 }
852 }
853 kfree(data);
854 } 838 }
855 }
856 839
857 mutex_unlock(&epfile->mutex); 840 ret = -EIOCBQUEUED;
858 return ret; 841 /*
842 * Do not kfree the buffer in this function. It will be freed
843 * by ffs_user_copy_worker.
844 */
845 data = NULL;
846 }
859 847
860error_lock: 848error_lock:
861 spin_unlock_irq(&epfile->ffs->eps_lock); 849 spin_unlock_irq(&epfile->ffs->eps_lock);
850error_mutex:
862 mutex_unlock(&epfile->mutex); 851 mutex_unlock(&epfile->mutex);
863error: 852error:
864 kfree(data); 853 kfree(data);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 99285b416308..51980c50546d 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -646,7 +646,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
646 hidg_fs_out_ep_desc.bEndpointAddress; 646 hidg_fs_out_ep_desc.bEndpointAddress;
647 647
648 status = usb_assign_descriptors(f, hidg_fs_descriptors, 648 status = usb_assign_descriptors(f, hidg_fs_descriptors,
649 hidg_hs_descriptors, NULL); 649 hidg_hs_descriptors, NULL, NULL);
650 if (status) 650 if (status)
651 goto fail; 651 goto fail;
652 652
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index ddc3aad886b7..3a9f8f9c77bd 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -211,7 +211,7 @@ autoconf_fail:
211 ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress; 211 ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
212 212
213 ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs, 213 ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
214 ss_loopback_descs); 214 ss_loopback_descs, NULL);
215 if (ret) 215 if (ret)
216 return ret; 216 return ret;
217 217
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 223ccf89d226..ee9390b07c17 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -3093,7 +3093,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
3093 fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst; 3093 fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
3094 3094
3095 ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function, 3095 ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
3096 fsg_ss_function); 3096 fsg_ss_function, fsg_ss_function);
3097 if (ret) 3097 if (ret)
3098 goto autoconf_fail; 3098 goto autoconf_fail;
3099 3099
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index fb1fe96d3215..84c0ee5ebd1e 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -56,7 +56,7 @@ static const char f_midi_longname[] = "MIDI Gadget";
56 * USB <- IN endpoint <- rawmidi 56 * USB <- IN endpoint <- rawmidi
57 */ 57 */
58struct gmidi_in_port { 58struct gmidi_in_port {
59 struct f_midi *midi; 59 struct snd_rawmidi_substream *substream;
60 int active; 60 int active;
61 uint8_t cable; 61 uint8_t cable;
62 uint8_t state; 62 uint8_t state;
@@ -78,9 +78,7 @@ struct f_midi {
78 struct snd_rawmidi *rmidi; 78 struct snd_rawmidi *rmidi;
79 u8 ms_id; 79 u8 ms_id;
80 80
81 struct snd_rawmidi_substream *in_substream[MAX_PORTS];
82 struct snd_rawmidi_substream *out_substream[MAX_PORTS]; 81 struct snd_rawmidi_substream *out_substream[MAX_PORTS];
83 struct gmidi_in_port *in_port[MAX_PORTS];
84 82
85 unsigned long out_triggered; 83 unsigned long out_triggered;
86 struct tasklet_struct tasklet; 84 struct tasklet_struct tasklet;
@@ -92,6 +90,8 @@ struct f_midi {
92 /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */ 90 /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
93 DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *); 91 DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
94 unsigned int in_last_port; 92 unsigned int in_last_port;
93
94 struct gmidi_in_port in_ports_array[/* in_ports */];
95}; 95};
96 96
97static inline struct f_midi *func_to_midi(struct usb_function *f) 97static inline struct f_midi *func_to_midi(struct usb_function *f)
@@ -518,98 +518,95 @@ static void f_midi_drop_out_substreams(struct f_midi *midi)
518{ 518{
519 unsigned int i; 519 unsigned int i;
520 520
521 for (i = 0; i < MAX_PORTS; i++) { 521 for (i = 0; i < midi->in_ports; i++) {
522 struct gmidi_in_port *port = midi->in_port[i]; 522 struct gmidi_in_port *port = midi->in_ports_array + i;
523 struct snd_rawmidi_substream *substream = midi->in_substream[i]; 523 struct snd_rawmidi_substream *substream = port->substream;
524 524 if (port->active && substream)
525 if (!port) 525 snd_rawmidi_drop_output(substream);
526 break;
527
528 if (!port->active || !substream)
529 continue;
530
531 snd_rawmidi_drop_output(substream);
532 } 526 }
533} 527}
534 528
535static void f_midi_transmit(struct f_midi *midi) 529static int f_midi_do_transmit(struct f_midi *midi, struct usb_ep *ep)
536{ 530{
537 struct usb_ep *ep = midi->in_ep; 531 struct usb_request *req = NULL;
538 bool active; 532 unsigned int len, i;
539 533 bool active = false;
540 /* We only care about USB requests if IN endpoint is enabled */ 534 int err;
541 if (!ep || !ep->enabled)
542 goto drop_out;
543 535
544 do { 536 /*
545 struct usb_request *req = NULL; 537 * We peek the request in order to reuse it if it fails to enqueue on
546 unsigned int len, i; 538 * its endpoint
539 */
540 len = kfifo_peek(&midi->in_req_fifo, &req);
541 if (len != 1) {
542 ERROR(midi, "%s: Couldn't get usb request\n", __func__);
543 return -1;
544 }
547 545
548 active = false; 546 /*
547 * If buffer overrun, then we ignore this transmission.
548 * IMPORTANT: This will cause the user-space rawmidi device to block
549 * until a) usb requests have been completed or b) snd_rawmidi_write()
550 * times out.
551 */
552 if (req->length > 0)
553 return 0;
549 554
550 /* We peek the request in order to reuse it if it fails 555 for (i = midi->in_last_port; i < midi->in_ports; ++i) {
551 * to enqueue on its endpoint */ 556 struct gmidi_in_port *port = midi->in_ports_array + i;
552 len = kfifo_peek(&midi->in_req_fifo, &req); 557 struct snd_rawmidi_substream *substream = port->substream;
553 if (len != 1) {
554 ERROR(midi, "%s: Couldn't get usb request\n", __func__);
555 goto drop_out;
556 }
557 558
558 /* If buffer overrun, then we ignore this transmission. 559 if (!port->active || !substream)
559 * IMPORTANT: This will cause the user-space rawmidi device to block until a) usb 560 continue;
560 * requests have been completed or b) snd_rawmidi_write() times out. */
561 if (req->length > 0)
562 return;
563 561
564 for (i = midi->in_last_port; i < MAX_PORTS; i++) { 562 while (req->length + 3 < midi->buflen) {
565 struct gmidi_in_port *port = midi->in_port[i]; 563 uint8_t b;
566 struct snd_rawmidi_substream *substream = midi->in_substream[i];
567 564
568 if (!port) { 565 if (snd_rawmidi_transmit(substream, &b, 1) != 1) {
569 /* Reset counter when we reach the last available port */ 566 port->active = 0;
570 midi->in_last_port = 0;
571 break; 567 break;
572 } 568 }
569 f_midi_transmit_byte(req, port, b);
570 }
573 571
574 if (!port->active || !substream) 572 active = !!port->active;
575 continue; 573 if (active)
574 break;
575 }
576 midi->in_last_port = active ? i : 0;
576 577
577 while (req->length + 3 < midi->buflen) { 578 if (req->length <= 0)
578 uint8_t b; 579 goto done;
579 580
580 if (snd_rawmidi_transmit(substream, &b, 1) != 1) { 581 err = usb_ep_queue(ep, req, GFP_ATOMIC);
581 port->active = 0; 582 if (err < 0) {
582 break; 583 ERROR(midi, "%s failed to queue req: %d\n",
583 } 584 midi->in_ep->name, err);
584 f_midi_transmit_byte(req, port, b); 585 req->length = 0; /* Re-use request next time. */
585 } 586 } else {
587 /* Upon success, put request at the back of the queue. */
588 kfifo_skip(&midi->in_req_fifo);
589 kfifo_put(&midi->in_req_fifo, req);
590 }
586 591
587 active = !!port->active; 592done:
588 /* Check if last port is still active, which means that 593 return active;
589 * there is still data on that substream but this current 594}
590 * request run out of space. */
591 if (active) {
592 midi->in_last_port = i;
593 /* There is no need to re-iterate though midi ports. */
594 break;
595 }
596 }
597 595
598 if (req->length > 0) { 596static void f_midi_transmit(struct f_midi *midi)
599 int err; 597{
598 struct usb_ep *ep = midi->in_ep;
599 int ret;
600 600
601 err = usb_ep_queue(ep, req, GFP_ATOMIC); 601 /* We only care about USB requests if IN endpoint is enabled */
602 if (err < 0) { 602 if (!ep || !ep->enabled)
603 ERROR(midi, "%s failed to queue req: %d\n", 603 goto drop_out;
604 midi->in_ep->name, err); 604
605 req->length = 0; /* Re-use request next time. */ 605 do {
606 } else { 606 ret = f_midi_do_transmit(midi, ep);
607 /* Upon success, put request at the back of the queue. */ 607 if (ret < 0)
608 kfifo_skip(&midi->in_req_fifo); 608 goto drop_out;
609 kfifo_put(&midi->in_req_fifo, req); 609 } while (ret);
610 }
611 }
612 } while (active);
613 610
614 return; 611 return;
615 612
@@ -626,13 +623,15 @@ static void f_midi_in_tasklet(unsigned long data)
626static int f_midi_in_open(struct snd_rawmidi_substream *substream) 623static int f_midi_in_open(struct snd_rawmidi_substream *substream)
627{ 624{
628 struct f_midi *midi = substream->rmidi->private_data; 625 struct f_midi *midi = substream->rmidi->private_data;
626 struct gmidi_in_port *port;
629 627
630 if (!midi->in_port[substream->number]) 628 if (substream->number >= midi->in_ports)
631 return -EINVAL; 629 return -EINVAL;
632 630
633 VDBG(midi, "%s()\n", __func__); 631 VDBG(midi, "%s()\n", __func__);
634 midi->in_substream[substream->number] = substream; 632 port = midi->in_ports_array + substream->number;
635 midi->in_port[substream->number]->state = STATE_UNKNOWN; 633 port->substream = substream;
634 port->state = STATE_UNKNOWN;
636 return 0; 635 return 0;
637} 636}
638 637
@@ -648,11 +647,11 @@ static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up)
648{ 647{
649 struct f_midi *midi = substream->rmidi->private_data; 648 struct f_midi *midi = substream->rmidi->private_data;
650 649
651 if (!midi->in_port[substream->number]) 650 if (substream->number >= midi->in_ports)
652 return; 651 return;
653 652
654 VDBG(midi, "%s() %d\n", __func__, up); 653 VDBG(midi, "%s() %d\n", __func__, up);
655 midi->in_port[substream->number]->active = up; 654 midi->in_ports_array[substream->number].active = up;
656 if (up) 655 if (up)
657 tasklet_hi_schedule(&midi->tasklet); 656 tasklet_hi_schedule(&midi->tasklet);
658} 657}
@@ -1128,14 +1127,11 @@ static void f_midi_free(struct usb_function *f)
1128{ 1127{
1129 struct f_midi *midi; 1128 struct f_midi *midi;
1130 struct f_midi_opts *opts; 1129 struct f_midi_opts *opts;
1131 int i;
1132 1130
1133 midi = func_to_midi(f); 1131 midi = func_to_midi(f);
1134 opts = container_of(f->fi, struct f_midi_opts, func_inst); 1132 opts = container_of(f->fi, struct f_midi_opts, func_inst);
1135 kfree(midi->id); 1133 kfree(midi->id);
1136 mutex_lock(&opts->lock); 1134 mutex_lock(&opts->lock);
1137 for (i = opts->in_ports - 1; i >= 0; --i)
1138 kfree(midi->in_port[i]);
1139 kfifo_free(&midi->in_req_fifo); 1135 kfifo_free(&midi->in_req_fifo);
1140 kfree(midi); 1136 kfree(midi);
1141 --opts->refcnt; 1137 --opts->refcnt;
@@ -1163,7 +1159,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
1163 1159
1164static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) 1160static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1165{ 1161{
1166 struct f_midi *midi; 1162 struct f_midi *midi = NULL;
1167 struct f_midi_opts *opts; 1163 struct f_midi_opts *opts;
1168 int status, i; 1164 int status, i;
1169 1165
@@ -1172,37 +1168,26 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1172 mutex_lock(&opts->lock); 1168 mutex_lock(&opts->lock);
1173 /* sanity check */ 1169 /* sanity check */
1174 if (opts->in_ports > MAX_PORTS || opts->out_ports > MAX_PORTS) { 1170 if (opts->in_ports > MAX_PORTS || opts->out_ports > MAX_PORTS) {
1175 mutex_unlock(&opts->lock); 1171 status = -EINVAL;
1176 return ERR_PTR(-EINVAL); 1172 goto setup_fail;
1177 } 1173 }
1178 1174
1179 /* allocate and initialize one new instance */ 1175 /* allocate and initialize one new instance */
1180 midi = kzalloc(sizeof(*midi), GFP_KERNEL); 1176 midi = kzalloc(
1177 sizeof(*midi) + opts->in_ports * sizeof(*midi->in_ports_array),
1178 GFP_KERNEL);
1181 if (!midi) { 1179 if (!midi) {
1182 mutex_unlock(&opts->lock); 1180 status = -ENOMEM;
1183 return ERR_PTR(-ENOMEM); 1181 goto setup_fail;
1184 } 1182 }
1185 1183
1186 for (i = 0; i < opts->in_ports; i++) { 1184 for (i = 0; i < opts->in_ports; i++)
1187 struct gmidi_in_port *port = kzalloc(sizeof(*port), GFP_KERNEL); 1185 midi->in_ports_array[i].cable = i;
1188
1189 if (!port) {
1190 status = -ENOMEM;
1191 mutex_unlock(&opts->lock);
1192 goto setup_fail;
1193 }
1194
1195 port->midi = midi;
1196 port->active = 0;
1197 port->cable = i;
1198 midi->in_port[i] = port;
1199 }
1200 1186
1201 /* set up ALSA midi devices */ 1187 /* set up ALSA midi devices */
1202 midi->id = kstrdup(opts->id, GFP_KERNEL); 1188 midi->id = kstrdup(opts->id, GFP_KERNEL);
1203 if (opts->id && !midi->id) { 1189 if (opts->id && !midi->id) {
1204 status = -ENOMEM; 1190 status = -ENOMEM;
1205 mutex_unlock(&opts->lock);
1206 goto setup_fail; 1191 goto setup_fail;
1207 } 1192 }
1208 midi->in_ports = opts->in_ports; 1193 midi->in_ports = opts->in_ports;
@@ -1229,8 +1214,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1229 return &midi->func; 1214 return &midi->func;
1230 1215
1231setup_fail: 1216setup_fail:
1232 for (--i; i >= 0; i--) 1217 mutex_unlock(&opts->lock);
1233 kfree(midi->in_port[i]);
1234 kfree(midi); 1218 kfree(midi);
1235 return ERR_PTR(status); 1219 return ERR_PTR(status);
1236} 1220}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 7ad798ace1e5..97f0a9bc84df 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1432,7 +1432,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
1432 fs_ncm_notify_desc.bEndpointAddress; 1432 fs_ncm_notify_desc.bEndpointAddress;
1433 1433
1434 status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, 1434 status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
1435 NULL); 1435 NULL, NULL);
1436 if (status) 1436 if (status)
1437 goto fail; 1437 goto fail;
1438 1438
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index d6396e0909ee..d43e86cea74f 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -364,7 +364,8 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f)
364 obex_hs_ep_out_desc.bEndpointAddress = 364 obex_hs_ep_out_desc.bEndpointAddress =
365 obex_fs_ep_out_desc.bEndpointAddress; 365 obex_fs_ep_out_desc.bEndpointAddress;
366 366
367 status = usb_assign_descriptors(f, fs_function, hs_function, NULL); 367 status = usb_assign_descriptors(f, fs_function, hs_function, NULL,
368 NULL);
368 if (status) 369 if (status)
369 goto fail; 370 goto fail;
370 371
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 157441dbfeba..0473d619d5bf 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -541,7 +541,7 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
541 541
542 /* Do not try to bind Phonet twice... */ 542 /* Do not try to bind Phonet twice... */
543 status = usb_assign_descriptors(f, fs_pn_function, hs_pn_function, 543 status = usb_assign_descriptors(f, fs_pn_function, hs_pn_function,
544 NULL); 544 NULL, NULL);
545 if (status) 545 if (status)
546 goto err; 546 goto err;
547 547
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 26ccad5d8680..c45104e3a64b 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1051,7 +1051,7 @@ autoconf_fail:
1051 ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress; 1051 ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
1052 1052
1053 ret = usb_assign_descriptors(f, fs_printer_function, 1053 ret = usb_assign_descriptors(f, fs_printer_function,
1054 hs_printer_function, ss_printer_function); 1054 hs_printer_function, ss_printer_function, NULL);
1055 if (ret) 1055 if (ret)
1056 return ret; 1056 return ret;
1057 1057
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index e587767e374c..d99dd9542048 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -783,7 +783,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
783 ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress; 783 ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
784 784
785 status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function, 785 status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
786 eth_ss_function); 786 eth_ss_function, NULL);
787 if (status) 787 if (status)
788 goto fail; 788 goto fail;
789 789
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 6bb44d613bab..cb00ada21d9c 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -236,7 +236,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
236 gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress; 236 gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
237 237
238 status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function, 238 status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
239 gser_ss_function); 239 gser_ss_function, NULL);
240 if (status) 240 if (status)
241 goto fail; 241 goto fail;
242 dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n", 242 dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 242ba5caffe5..df0189ddfdd5 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -437,7 +437,7 @@ no_iso:
437 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 437 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
438 438
439 ret = usb_assign_descriptors(f, fs_source_sink_descs, 439 ret = usb_assign_descriptors(f, fs_source_sink_descs,
440 hs_source_sink_descs, ss_source_sink_descs); 440 hs_source_sink_descs, ss_source_sink_descs, NULL);
441 if (ret) 441 if (ret)
442 return ret; 442 return ret;
443 443
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 829c78de9eba..434b983f3b4c 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -362,7 +362,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
362 fs_subset_out_desc.bEndpointAddress; 362 fs_subset_out_desc.bEndpointAddress;
363 363
364 status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function, 364 status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
365 ss_eth_function); 365 ss_eth_function, NULL);
366 if (status) 366 if (status)
367 goto fail; 367 goto fail;
368 368
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index bad007b5a190..dfb733047a4c 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -2098,7 +2098,7 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
2098 uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress; 2098 uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
2099 2099
2100 ret = usb_assign_descriptors(f, uasp_fs_function_desc, 2100 ret = usb_assign_descriptors(f, uasp_fs_function_desc,
2101 uasp_hs_function_desc, uasp_ss_function_desc); 2101 uasp_hs_function_desc, uasp_ss_function_desc, NULL);
2102 if (ret) 2102 if (ret)
2103 goto ep_fail; 2103 goto ep_fail;
2104 2104
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 6a2346b99f55..f2ac0cbc29a4 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -721,7 +721,8 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
721 status = -ENOMEM; 721 status = -ENOMEM;
722 722
723 /* copy descriptors, and track endpoint copies */ 723 /* copy descriptors, and track endpoint copies */
724 status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL); 724 status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
725 NULL);
725 if (status) 726 if (status)
726 goto fail; 727 goto fail;
727 return 0; 728 return 0;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 044ca79d3cb5..186d4b162524 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1100,7 +1100,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
1100 hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; 1100 hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
1101 hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; 1101 hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
1102 1102
1103 ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL); 1103 ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
1104 NULL);
1104 if (ret) 1105 if (ret)
1105 goto err; 1106 goto err;
1106 1107
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 87fb0fd6aaab..5cdaf0150a4e 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1699,28 +1699,6 @@ static struct usb_gadget_driver gadgetfs_driver = {
1699}; 1699};
1700 1700
1701/*----------------------------------------------------------------------*/ 1701/*----------------------------------------------------------------------*/
1702
1703static void gadgetfs_nop(struct usb_gadget *arg) { }
1704
1705static int gadgetfs_probe(struct usb_gadget *gadget,
1706 struct usb_gadget_driver *driver)
1707{
1708 CHIP = gadget->name;
1709 return -EISNAM;
1710}
1711
1712static struct usb_gadget_driver probe_driver = {
1713 .max_speed = USB_SPEED_HIGH,
1714 .bind = gadgetfs_probe,
1715 .unbind = gadgetfs_nop,
1716 .setup = (void *)gadgetfs_nop,
1717 .disconnect = gadgetfs_nop,
1718 .driver = {
1719 .name = "nop",
1720 },
1721};
1722
1723
1724/* DEVICE INITIALIZATION 1702/* DEVICE INITIALIZATION
1725 * 1703 *
1726 * fd = open ("/dev/gadget/$CHIP", O_RDWR) 1704 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
@@ -1971,9 +1949,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
1971 if (the_device) 1949 if (the_device)
1972 return -ESRCH; 1950 return -ESRCH;
1973 1951
1974 /* fake probe to determine $CHIP */ 1952 CHIP = usb_get_gadget_udc_name();
1975 CHIP = NULL;
1976 usb_gadget_probe_driver(&probe_driver);
1977 if (!CHIP) 1953 if (!CHIP)
1978 return -ENODEV; 1954 return -ENODEV;
1979 1955
@@ -2034,6 +2010,8 @@ gadgetfs_kill_sb (struct super_block *sb)
2034 put_dev (the_device); 2010 put_dev (the_device);
2035 the_device = NULL; 2011 the_device = NULL;
2036 } 2012 }
2013 kfree(CHIP);
2014 CHIP = NULL;
2037} 2015}
2038 2016
2039/*----------------------------------------------------------------------*/ 2017/*----------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index d6ad7e6c978c..7c289416f87d 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -74,7 +74,6 @@ config USB_BCM63XX_UDC
74config USB_FSL_USB2 74config USB_FSL_USB2
75 tristate "Freescale Highspeed USB DR Peripheral Controller" 75 tristate "Freescale Highspeed USB DR Peripheral Controller"
76 depends on FSL_SOC || ARCH_MXC 76 depends on FSL_SOC || ARCH_MXC
77 select USB_FSL_MPH_DR_OF if OF
78 help 77 help
79 Some of Freescale PowerPC and i.MX processors have a High Speed 78 Some of Freescale PowerPC and i.MX processors have a High Speed
80 Dual-Role(DR) USB controller, which supports device mode. 79 Dual-Role(DR) USB controller, which supports device mode.
@@ -177,7 +176,7 @@ config USB_RENESAS_USBHS_UDC
177 176
178config USB_RENESAS_USB3 177config USB_RENESAS_USB3
179 tristate 'Renesas USB3.0 Peripheral controller' 178 tristate 'Renesas USB3.0 Peripheral controller'
180 depends on ARCH_SHMOBILE || COMPILE_TEST 179 depends on ARCH_RENESAS || COMPILE_TEST
181 help 180 help
182 Renesas USB3.0 Peripheral controller is a USB peripheral controller 181 Renesas USB3.0 Peripheral controller is a USB peripheral controller
183 that supports super, high, and full speed USB 3.0 data transfers. 182 that supports super, high, and full speed USB 3.0 data transfers.
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 7f77db5d1278..aae7458d8986 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -581,8 +581,13 @@ err0:
581 581
582void bdc_udc_exit(struct bdc *bdc) 582void bdc_udc_exit(struct bdc *bdc)
583{ 583{
584 unsigned long flags;
585
584 dev_dbg(bdc->dev, "%s()\n", __func__); 586 dev_dbg(bdc->dev, "%s()\n", __func__);
587 spin_lock_irqsave(&bdc->lock, flags);
585 bdc_ep_disable(bdc->bdc_ep_array[1]); 588 bdc_ep_disable(bdc->bdc_ep_array[1]);
589 spin_unlock_irqrestore(&bdc->lock, flags);
590
586 usb_del_gadget_udc(&bdc->gadget); 591 usb_del_gadget_udc(&bdc->gadget);
587 bdc_free_ep(bdc); 592 bdc_free_ep(bdc);
588} 593}
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 79fe6b77ee44..8f32b5ee7734 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -49,7 +49,6 @@
49#endif 49#endif
50 50
51#include <mach/hardware.h> 51#include <mach/hardware.h>
52#include <mach/platform.h>
53 52
54/* 53/*
55 * USB device configuration structure 54 * USB device configuration structure
@@ -147,9 +146,7 @@ struct lpc32xx_udc {
147 u32 io_p_size; 146 u32 io_p_size;
148 void __iomem *udp_baseaddr; 147 void __iomem *udp_baseaddr;
149 int udp_irq[4]; 148 int udp_irq[4];
150 struct clk *usb_pll_clk;
151 struct clk *usb_slv_clk; 149 struct clk *usb_slv_clk;
152 struct clk *usb_otg_clk;
153 150
154 /* DMA support */ 151 /* DMA support */
155 u32 *udca_v_base; 152 u32 *udca_v_base;
@@ -210,16 +207,6 @@ static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
210 207
211#define UDCA_BUFF_SIZE (128) 208#define UDCA_BUFF_SIZE (128)
212 209
213/* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will
214 * be replaced with an inremap()ed pointer
215 * */
216#define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64)
217
218/* USB_CTRL bit defines */
219#define USB_SLAVE_HCLK_EN (1 << 24)
220#define USB_HOST_NEED_CLK_EN (1 << 21)
221#define USB_DEV_NEED_CLK_EN (1 << 22)
222
223/********************************************************************** 210/**********************************************************************
224 * USB device controller register offsets 211 * USB device controller register offsets
225 **********************************************************************/ 212 **********************************************************************/
@@ -639,9 +626,6 @@ static void isp1301_udc_configure(struct lpc32xx_udc *udc)
639 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 626 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
640 ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD); 627 ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD);
641 628
642 /* Enable usb_need_clk clock after transceiver is initialized */
643 writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL);
644
645 dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n", 629 dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
646 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00)); 630 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00));
647 dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n", 631 dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
@@ -980,31 +964,13 @@ static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
980 return; 964 return;
981 965
982 udc->clocked = 1; 966 udc->clocked = 1;
983 967 clk_prepare_enable(udc->usb_slv_clk);
984 /* 48MHz PLL up */
985 clk_enable(udc->usb_pll_clk);
986
987 /* Enable the USB device clock */
988 writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN,
989 USB_CTRL);
990
991 clk_enable(udc->usb_otg_clk);
992 } else { 968 } else {
993 if (!udc->clocked) 969 if (!udc->clocked)
994 return; 970 return;
995 971
996 udc->clocked = 0; 972 udc->clocked = 0;
997 973 clk_disable_unprepare(udc->usb_slv_clk);
998 /* Never disable the USB_HCLK during normal operation */
999
1000 /* 48MHz PLL dpwn */
1001 clk_disable(udc->usb_pll_clk);
1002
1003 /* Disable the USB device clock */
1004 writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN,
1005 USB_CTRL);
1006
1007 clk_disable(udc->usb_otg_clk);
1008 } 974 }
1009} 975}
1010 976
@@ -3125,58 +3091,21 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
3125 goto io_map_fail; 3091 goto io_map_fail;
3126 } 3092 }
3127 3093
3128 /* Enable AHB slave USB clock, needed for further USB clock control */ 3094 /* Get USB device clock */
3129 writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL); 3095 udc->usb_slv_clk = clk_get(&pdev->dev, NULL);
3130
3131 /* Get required clocks */
3132 udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
3133 if (IS_ERR(udc->usb_pll_clk)) {
3134 dev_err(udc->dev, "failed to acquire USB PLL\n");
3135 retval = PTR_ERR(udc->usb_pll_clk);
3136 goto pll_get_fail;
3137 }
3138 udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd");
3139 if (IS_ERR(udc->usb_slv_clk)) { 3096 if (IS_ERR(udc->usb_slv_clk)) {
3140 dev_err(udc->dev, "failed to acquire USB device clock\n"); 3097 dev_err(udc->dev, "failed to acquire USB device clock\n");
3141 retval = PTR_ERR(udc->usb_slv_clk); 3098 retval = PTR_ERR(udc->usb_slv_clk);
3142 goto usb_clk_get_fail; 3099 goto usb_clk_get_fail;
3143 } 3100 }
3144 udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
3145 if (IS_ERR(udc->usb_otg_clk)) {
3146 dev_err(udc->dev, "failed to acquire USB otg clock\n");
3147 retval = PTR_ERR(udc->usb_otg_clk);
3148 goto usb_otg_clk_get_fail;
3149 }
3150
3151 /* Setup PLL clock to 48MHz */
3152 retval = clk_enable(udc->usb_pll_clk);
3153 if (retval < 0) {
3154 dev_err(udc->dev, "failed to start USB PLL\n");
3155 goto pll_enable_fail;
3156 }
3157
3158 retval = clk_set_rate(udc->usb_pll_clk, 48000);
3159 if (retval < 0) {
3160 dev_err(udc->dev, "failed to set USB clock rate\n");
3161 goto pll_set_fail;
3162 }
3163
3164 writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL);
3165 3101
3166 /* Enable USB device clock */ 3102 /* Enable USB device clock */
3167 retval = clk_enable(udc->usb_slv_clk); 3103 retval = clk_prepare_enable(udc->usb_slv_clk);
3168 if (retval < 0) { 3104 if (retval < 0) {
3169 dev_err(udc->dev, "failed to start USB device clock\n"); 3105 dev_err(udc->dev, "failed to start USB device clock\n");
3170 goto usb_clk_enable_fail; 3106 goto usb_clk_enable_fail;
3171 } 3107 }
3172 3108
3173 /* Enable USB OTG clock */
3174 retval = clk_enable(udc->usb_otg_clk);
3175 if (retval < 0) {
3176 dev_err(udc->dev, "failed to start USB otg clock\n");
3177 goto usb_otg_clk_enable_fail;
3178 }
3179
3180 /* Setup deferred workqueue data */ 3109 /* Setup deferred workqueue data */
3181 udc->poweron = udc->pullup = 0; 3110 udc->poweron = udc->pullup = 0;
3182 INIT_WORK(&udc->pullup_job, pullup_work); 3111 INIT_WORK(&udc->pullup_job, pullup_work);
@@ -3287,19 +3216,10 @@ dma_alloc_fail:
3287 dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE, 3216 dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
3288 udc->udca_v_base, udc->udca_p_base); 3217 udc->udca_v_base, udc->udca_p_base);
3289i2c_fail: 3218i2c_fail:
3290 clk_disable(udc->usb_otg_clk); 3219 clk_disable_unprepare(udc->usb_slv_clk);
3291usb_otg_clk_enable_fail:
3292 clk_disable(udc->usb_slv_clk);
3293usb_clk_enable_fail: 3220usb_clk_enable_fail:
3294pll_set_fail:
3295 clk_disable(udc->usb_pll_clk);
3296pll_enable_fail:
3297 clk_put(udc->usb_otg_clk);
3298usb_otg_clk_get_fail:
3299 clk_put(udc->usb_slv_clk); 3221 clk_put(udc->usb_slv_clk);
3300usb_clk_get_fail: 3222usb_clk_get_fail:
3301 clk_put(udc->usb_pll_clk);
3302pll_get_fail:
3303 iounmap(udc->udp_baseaddr); 3223 iounmap(udc->udp_baseaddr);
3304io_map_fail: 3224io_map_fail:
3305 release_mem_region(udc->io_p_start, udc->io_p_size); 3225 release_mem_region(udc->io_p_start, udc->io_p_size);
@@ -3336,12 +3256,9 @@ static int lpc32xx_udc_remove(struct platform_device *pdev)
3336 free_irq(udc->udp_irq[IRQ_USB_HP], udc); 3256 free_irq(udc->udp_irq[IRQ_USB_HP], udc);
3337 free_irq(udc->udp_irq[IRQ_USB_LP], udc); 3257 free_irq(udc->udp_irq[IRQ_USB_LP], udc);
3338 3258
3339 clk_disable(udc->usb_otg_clk); 3259 clk_disable_unprepare(udc->usb_slv_clk);
3340 clk_put(udc->usb_otg_clk);
3341 clk_disable(udc->usb_slv_clk);
3342 clk_put(udc->usb_slv_clk); 3260 clk_put(udc->usb_slv_clk);
3343 clk_disable(udc->usb_pll_clk); 3261
3344 clk_put(udc->usb_pll_clk);
3345 iounmap(udc->udp_baseaddr); 3262 iounmap(udc->udp_baseaddr);
3346 release_mem_region(udc->io_p_start, udc->io_p_size); 3263 release_mem_region(udc->io_p_start, udc->io_p_size);
3347 kfree(udc); 3264 kfree(udc);
@@ -3367,7 +3284,7 @@ static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
3367 udc->clocked = 1; 3284 udc->clocked = 1;
3368 3285
3369 /* Kill global USB clock */ 3286 /* Kill global USB clock */
3370 clk_disable(udc->usb_slv_clk); 3287 clk_disable_unprepare(udc->usb_slv_clk);
3371 } 3288 }
3372 3289
3373 return 0; 3290 return 0;
@@ -3379,7 +3296,7 @@ static int lpc32xx_udc_resume(struct platform_device *pdev)
3379 3296
3380 if (udc->clocked) { 3297 if (udc->clocked) {
3381 /* Enable global USB clock */ 3298 /* Enable global USB clock */
3382 clk_enable(udc->usb_slv_clk); 3299 clk_prepare_enable(udc->usb_slv_clk);
3383 3300
3384 /* Enable clocking */ 3301 /* Enable clocking */
3385 udc_clk_set(udc, 1); 3302 udc_clk_set(udc, 1);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index b82cb14850b6..a238da906115 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -48,18 +48,157 @@
48#include <linux/usb/gadget.h> 48#include <linux/usb/gadget.h>
49#include <linux/usb/otg.h> 49#include <linux/usb/otg.h>
50 50
51/*
52 * This driver is PXA25x only. Grab the right register definitions.
53 */
54#ifdef CONFIG_ARCH_PXA
55#include <mach/pxa25x-udc.h>
56#include <mach/hardware.h>
57#endif
58
59#ifdef CONFIG_ARCH_LUBBOCK 51#ifdef CONFIG_ARCH_LUBBOCK
60#include <mach/lubbock.h> 52#include <mach/lubbock.h>
61#endif 53#endif
62 54
55#define UDCCR 0x0000 /* UDC Control Register */
56#define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */
57#define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */
58#define UDC_RES3 0x000C /* UDC Undocumented - Reserved3 */
59#define UDCCS0 0x0010 /* UDC Endpoint 0 Control/Status Register */
60#define UDCCS1 0x0014 /* UDC Endpoint 1 (IN) Control/Status Register */
61#define UDCCS2 0x0018 /* UDC Endpoint 2 (OUT) Control/Status Register */
62#define UDCCS3 0x001C /* UDC Endpoint 3 (IN) Control/Status Register */
63#define UDCCS4 0x0020 /* UDC Endpoint 4 (OUT) Control/Status Register */
64#define UDCCS5 0x0024 /* UDC Endpoint 5 (Interrupt) Control/Status Register */
65#define UDCCS6 0x0028 /* UDC Endpoint 6 (IN) Control/Status Register */
66#define UDCCS7 0x002C /* UDC Endpoint 7 (OUT) Control/Status Register */
67#define UDCCS8 0x0030 /* UDC Endpoint 8 (IN) Control/Status Register */
68#define UDCCS9 0x0034 /* UDC Endpoint 9 (OUT) Control/Status Register */
69#define UDCCS10 0x0038 /* UDC Endpoint 10 (Interrupt) Control/Status Register */
70#define UDCCS11 0x003C /* UDC Endpoint 11 (IN) Control/Status Register */
71#define UDCCS12 0x0040 /* UDC Endpoint 12 (OUT) Control/Status Register */
72#define UDCCS13 0x0044 /* UDC Endpoint 13 (IN) Control/Status Register */
73#define UDCCS14 0x0048 /* UDC Endpoint 14 (OUT) Control/Status Register */
74#define UDCCS15 0x004C /* UDC Endpoint 15 (Interrupt) Control/Status Register */
75#define UFNRH 0x0060 /* UDC Frame Number Register High */
76#define UFNRL 0x0064 /* UDC Frame Number Register Low */
77#define UBCR2 0x0068 /* UDC Byte Count Reg 2 */
78#define UBCR4 0x006c /* UDC Byte Count Reg 4 */
79#define UBCR7 0x0070 /* UDC Byte Count Reg 7 */
80#define UBCR9 0x0074 /* UDC Byte Count Reg 9 */
81#define UBCR12 0x0078 /* UDC Byte Count Reg 12 */
82#define UBCR14 0x007c /* UDC Byte Count Reg 14 */
83#define UDDR0 0x0080 /* UDC Endpoint 0 Data Register */
84#define UDDR1 0x0100 /* UDC Endpoint 1 Data Register */
85#define UDDR2 0x0180 /* UDC Endpoint 2 Data Register */
86#define UDDR3 0x0200 /* UDC Endpoint 3 Data Register */
87#define UDDR4 0x0400 /* UDC Endpoint 4 Data Register */
88#define UDDR5 0x00A0 /* UDC Endpoint 5 Data Register */
89#define UDDR6 0x0600 /* UDC Endpoint 6 Data Register */
90#define UDDR7 0x0680 /* UDC Endpoint 7 Data Register */
91#define UDDR8 0x0700 /* UDC Endpoint 8 Data Register */
92#define UDDR9 0x0900 /* UDC Endpoint 9 Data Register */
93#define UDDR10 0x00C0 /* UDC Endpoint 10 Data Register */
94#define UDDR11 0x0B00 /* UDC Endpoint 11 Data Register */
95#define UDDR12 0x0B80 /* UDC Endpoint 12 Data Register */
96#define UDDR13 0x0C00 /* UDC Endpoint 13 Data Register */
97#define UDDR14 0x0E00 /* UDC Endpoint 14 Data Register */
98#define UDDR15 0x00E0 /* UDC Endpoint 15 Data Register */
99
100#define UICR0 0x0050 /* UDC Interrupt Control Register 0 */
101#define UICR1 0x0054 /* UDC Interrupt Control Register 1 */
102
103#define USIR0 0x0058 /* UDC Status Interrupt Register 0 */
104#define USIR1 0x005C /* UDC Status Interrupt Register 1 */
105
106#define UDCCR_UDE (1 << 0) /* UDC enable */
107#define UDCCR_UDA (1 << 1) /* UDC active */
108#define UDCCR_RSM (1 << 2) /* Device resume */
109#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */
110#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */
111#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */
112#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */
113#define UDCCR_REM (1 << 7) /* Reset interrupt mask */
114
115#define UDCCS0_OPR (1 << 0) /* OUT packet ready */
116#define UDCCS0_IPR (1 << 1) /* IN packet ready */
117#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */
118#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */
119#define UDCCS0_SST (1 << 4) /* Sent stall */
120#define UDCCS0_FST (1 << 5) /* Force stall */
121#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */
122#define UDCCS0_SA (1 << 7) /* Setup active */
123
124#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */
125#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */
126#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */
127#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */
128#define UDCCS_BI_SST (1 << 4) /* Sent stall */
129#define UDCCS_BI_FST (1 << 5) /* Force stall */
130#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */
131
132#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */
133#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */
134#define UDCCS_BO_DME (1 << 3) /* DMA enable */
135#define UDCCS_BO_SST (1 << 4) /* Sent stall */
136#define UDCCS_BO_FST (1 << 5) /* Force stall */
137#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */
138#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */
139
140#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */
141#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */
142#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */
143#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */
144#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */
145
146#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */
147#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */
148#ifdef CONFIG_ARCH_IXP4XX /* FIXME: is this right?, datasheed says '2' */
149#define UDCCS_IO_ROF (1 << 3) /* Receive overflow */
150#endif
151#ifdef CONFIG_ARCH_PXA
152#define UDCCS_IO_ROF (1 << 2) /* Receive overflow */
153#endif
154#define UDCCS_IO_DME (1 << 3) /* DMA enable */
155#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */
156#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */
157
158#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */
159#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */
160#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */
161#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */
162#define UDCCS_INT_SST (1 << 4) /* Sent stall */
163#define UDCCS_INT_FST (1 << 5) /* Force stall */
164#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */
165
166#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
167#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
168#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
169#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
170#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
171#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
172#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
173#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
174
175#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
176#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
177#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
178#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
179#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
180#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
181#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
182#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
183
184#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
185#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
186#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
187#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
188#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
189#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
190#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
191#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
192
193#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
194#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
195#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
196#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
197#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
198#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
199#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
200#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
201
63/* 202/*
64 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x 203 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
65 * series processors. The UDC for the IXP 4xx series is very similar. 204 * series processors. The UDC for the IXP 4xx series is very similar.
@@ -150,25 +289,61 @@ static void pullup_on(void)
150 mach->udc_command(PXA2XX_UDC_CMD_CONNECT); 289 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
151} 290}
152 291
153static void pio_irq_enable(int bEndpointAddress) 292#if defined(CONFIG_CPU_BIG_ENDIAN)
293/*
294 * IXP4xx has its buses wired up in a way that relies on never doing any
295 * byte swaps, independent of whether it runs in big-endian or little-endian
296 * mode, as explained by Krzysztof Hałasa.
297 *
298 * We only support pxa25x in little-endian mode, but it is very likely
299 * that it works the same way.
300 */
301static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
302{
303 iowrite32be(val, dev->regs + reg);
304}
305
306static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
154{ 307{
155 bEndpointAddress &= 0xf; 308 return ioread32be(dev->regs + reg);
309}
310#else
311static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
312{
313 writel(val, dev->regs + reg);
314}
315
316static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
317{
318 return readl(dev->regs + reg);
319}
320#endif
321
322static void pio_irq_enable(struct pxa25x_ep *ep)
323{
324 u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
325
156 if (bEndpointAddress < 8) 326 if (bEndpointAddress < 8)
157 UICR0 &= ~(1 << bEndpointAddress); 327 udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) &
328 ~(1 << bEndpointAddress));
158 else { 329 else {
159 bEndpointAddress -= 8; 330 bEndpointAddress -= 8;
160 UICR1 &= ~(1 << bEndpointAddress); 331 udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) &
332 ~(1 << bEndpointAddress));
161 } 333 }
162} 334}
163 335
164static void pio_irq_disable(int bEndpointAddress) 336static void pio_irq_disable(struct pxa25x_ep *ep)
165{ 337{
166 bEndpointAddress &= 0xf; 338 u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
339
167 if (bEndpointAddress < 8) 340 if (bEndpointAddress < 8)
168 UICR0 |= 1 << bEndpointAddress; 341 udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) |
342 (1 << bEndpointAddress));
169 else { 343 else {
170 bEndpointAddress -= 8; 344 bEndpointAddress -= 8;
171 UICR1 |= 1 << bEndpointAddress; 345 udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) |
346 (1 << bEndpointAddress));
172 } 347 }
173} 348}
174 349
@@ -177,22 +352,61 @@ static void pio_irq_disable(int bEndpointAddress)
177 */ 352 */
178#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE) 353#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
179 354
180static inline void udc_set_mask_UDCCR(int mask) 355static inline void udc_set_mask_UDCCR(struct pxa25x_udc *dev, int mask)
181{ 356{
182 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS); 357 u32 udccr = udc_get_reg(dev, UDCCR);
358
359 udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS), UDCCR);
183} 360}
184 361
185static inline void udc_clear_mask_UDCCR(int mask) 362static inline void udc_clear_mask_UDCCR(struct pxa25x_udc *dev, int mask)
186{ 363{
187 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS); 364 u32 udccr = udc_get_reg(dev, UDCCR);
365
366 udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS), UDCCR);
188} 367}
189 368
190static inline void udc_ack_int_UDCCR(int mask) 369static inline void udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask)
191{ 370{
192 /* udccr contains the bits we dont want to change */ 371 /* udccr contains the bits we dont want to change */
193 __u32 udccr = UDCCR & UDCCR_MASK_BITS; 372 u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS;
194 373
195 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS); 374 udc_set_reg(dev, udccr | (mask & ~UDCCR_MASK_BITS), UDCCR);
375}
376
377static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *ep)
378{
379 return udc_get_reg(ep->dev, ep->regoff_udccs);
380}
381
382static inline void udc_ep_set_UDCCS(struct pxa25x_ep *ep, u32 data)
383{
384 udc_set_reg(ep->dev, data, ep->regoff_udccs);
385}
386
387static inline u32 udc_ep0_get_UDCCS(struct pxa25x_udc *dev)
388{
389 return udc_get_reg(dev, UDCCS0);
390}
391
392static inline void udc_ep0_set_UDCCS(struct pxa25x_udc *dev, u32 data)
393{
394 udc_set_reg(dev, data, UDCCS0);
395}
396
397static inline u32 udc_ep_get_UDDR(struct pxa25x_ep *ep)
398{
399 return udc_get_reg(ep->dev, ep->regoff_uddr);
400}
401
402static inline void udc_ep_set_UDDR(struct pxa25x_ep *ep, u32 data)
403{
404 udc_set_reg(ep->dev, data, ep->regoff_uddr);
405}
406
407static inline u32 udc_ep_get_UBCR(struct pxa25x_ep *ep)
408{
409 return udc_get_reg(ep->dev, ep->regoff_ubcr);
196} 410}
197 411
198/* 412/*
@@ -358,7 +572,7 @@ static inline void ep0_idle (struct pxa25x_udc *dev)
358} 572}
359 573
360static int 574static int
361write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max) 575write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max)
362{ 576{
363 u8 *buf; 577 u8 *buf;
364 unsigned length, count; 578 unsigned length, count;
@@ -372,7 +586,7 @@ write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max)
372 586
373 count = length; 587 count = length;
374 while (likely(count--)) 588 while (likely(count--))
375 *uddr = *buf++; 589 udc_ep_set_UDDR(ep, *buf++);
376 590
377 return length; 591 return length;
378} 592}
@@ -392,7 +606,7 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
392 unsigned count; 606 unsigned count;
393 int is_last, is_short; 607 int is_last, is_short;
394 608
395 count = write_packet(ep->reg_uddr, req, max); 609 count = write_packet(ep, req, max);
396 610
397 /* last packet is usually short (or a zlp) */ 611 /* last packet is usually short (or a zlp) */
398 if (unlikely (count != max)) 612 if (unlikely (count != max))
@@ -416,15 +630,15 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
416 * double buffering might work. TSP, TPC, and TFS 630 * double buffering might work. TSP, TPC, and TFS
417 * bit values are the same for all normal IN endpoints. 631 * bit values are the same for all normal IN endpoints.
418 */ 632 */
419 *ep->reg_udccs = UDCCS_BI_TPC; 633 udc_ep_set_UDCCS(ep, UDCCS_BI_TPC);
420 if (is_short) 634 if (is_short)
421 *ep->reg_udccs = UDCCS_BI_TSP; 635 udc_ep_set_UDCCS(ep, UDCCS_BI_TSP);
422 636
423 /* requests complete when all IN data is in the FIFO */ 637 /* requests complete when all IN data is in the FIFO */
424 if (is_last) { 638 if (is_last) {
425 done (ep, req, 0); 639 done (ep, req, 0);
426 if (list_empty(&ep->queue)) 640 if (list_empty(&ep->queue))
427 pio_irq_disable (ep->bEndpointAddress); 641 pio_irq_disable(ep);
428 return 1; 642 return 1;
429 } 643 }
430 644
@@ -432,7 +646,7 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
432 // double buffering is off in the default fifo mode, which 646 // double buffering is off in the default fifo mode, which
433 // prevents TFS from being set here. 647 // prevents TFS from being set here.
434 648
435 } while (*ep->reg_udccs & UDCCS_BI_TFS); 649 } while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS);
436 return 0; 650 return 0;
437} 651}
438 652
@@ -442,20 +656,21 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
442static inline 656static inline
443void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag) 657void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag)
444{ 658{
445 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR; 659 udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR);
446 USIR0 = USIR0_IR0; 660 udc_set_reg(dev, USIR0, USIR0_IR0);
447 dev->req_pending = 0; 661 dev->req_pending = 0;
448 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n", 662 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
449 __func__, tag, UDCCS0, flags); 663 __func__, tag, udc_ep0_get_UDCCS(dev), flags);
450} 664}
451 665
452static int 666static int
453write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 667write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
454{ 668{
669 struct pxa25x_udc *dev = ep->dev;
455 unsigned count; 670 unsigned count;
456 int is_short; 671 int is_short;
457 672
458 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE); 673 count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE);
459 ep->dev->stats.write.bytes += count; 674 ep->dev->stats.write.bytes += count;
460 675
461 /* last packet "must be" short (or a zlp) */ 676 /* last packet "must be" short (or a zlp) */
@@ -468,7 +683,7 @@ write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
468 if (ep->dev->req_pending) 683 if (ep->dev->req_pending)
469 ep0start(ep->dev, UDCCS0_IPR, "short IN"); 684 ep0start(ep->dev, UDCCS0_IPR, "short IN");
470 else 685 else
471 UDCCS0 = UDCCS0_IPR; 686 udc_ep0_set_UDCCS(dev, UDCCS0_IPR);
472 687
473 count = req->req.length; 688 count = req->req.length;
474 done (ep, req, 0); 689 done (ep, req, 0);
@@ -484,9 +699,9 @@ write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
484 if (count >= EP0_FIFO_SIZE) { 699 if (count >= EP0_FIFO_SIZE) {
485 count = 100; 700 count = 100;
486 do { 701 do {
487 if ((UDCCS0 & UDCCS0_OPR) != 0) { 702 if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) {
488 /* clear OPR, generate ack */ 703 /* clear OPR, generate ack */
489 UDCCS0 = UDCCS0_OPR; 704 udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
490 break; 705 break;
491 } 706 }
492 count--; 707 count--;
@@ -521,7 +736,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
521 * UDCCS_{BO,IO}_RPC are all the same bit value. 736 * UDCCS_{BO,IO}_RPC are all the same bit value.
522 * UDCCS_{BO,IO}_RNE are all the same bit value. 737 * UDCCS_{BO,IO}_RNE are all the same bit value.
523 */ 738 */
524 udccs = *ep->reg_udccs; 739 udccs = udc_ep_get_UDCCS(ep);
525 if (unlikely ((udccs & UDCCS_BO_RPC) == 0)) 740 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
526 break; 741 break;
527 buf = req->req.buf + req->req.actual; 742 buf = req->req.buf + req->req.actual;
@@ -530,7 +745,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
530 745
531 /* read all bytes from this packet */ 746 /* read all bytes from this packet */
532 if (likely (udccs & UDCCS_BO_RNE)) { 747 if (likely (udccs & UDCCS_BO_RNE)) {
533 count = 1 + (0x0ff & *ep->reg_ubcr); 748 count = 1 + (0x0ff & udc_ep_get_UBCR(ep));
534 req->req.actual += min (count, bufferspace); 749 req->req.actual += min (count, bufferspace);
535 } else /* zlp */ 750 } else /* zlp */
536 count = 0; 751 count = 0;
@@ -540,7 +755,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
540 is_short ? "/S" : "", 755 is_short ? "/S" : "",
541 req, req->req.actual, req->req.length); 756 req, req->req.actual, req->req.length);
542 while (likely (count-- != 0)) { 757 while (likely (count-- != 0)) {
543 u8 byte = (u8) *ep->reg_uddr; 758 u8 byte = (u8) udc_ep_get_UDDR(ep);
544 759
545 if (unlikely (bufferspace == 0)) { 760 if (unlikely (bufferspace == 0)) {
546 /* this happens when the driver's buffer 761 /* this happens when the driver's buffer
@@ -556,7 +771,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
556 bufferspace--; 771 bufferspace--;
557 } 772 }
558 } 773 }
559 *ep->reg_udccs = UDCCS_BO_RPC; 774 udc_ep_set_UDCCS(ep, UDCCS_BO_RPC);
560 /* RPC/RSP/RNE could now reflect the other packet buffer */ 775 /* RPC/RSP/RNE could now reflect the other packet buffer */
561 776
562 /* iso is one request per packet */ 777 /* iso is one request per packet */
@@ -571,7 +786,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
571 if (is_short || req->req.actual == req->req.length) { 786 if (is_short || req->req.actual == req->req.length) {
572 done (ep, req, 0); 787 done (ep, req, 0);
573 if (list_empty(&ep->queue)) 788 if (list_empty(&ep->queue))
574 pio_irq_disable (ep->bEndpointAddress); 789 pio_irq_disable(ep);
575 return 1; 790 return 1;
576 } 791 }
577 792
@@ -595,7 +810,7 @@ read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
595 buf = req->req.buf + req->req.actual; 810 buf = req->req.buf + req->req.actual;
596 bufferspace = req->req.length - req->req.actual; 811 bufferspace = req->req.length - req->req.actual;
597 812
598 while (UDCCS0 & UDCCS0_RNE) { 813 while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) {
599 byte = (u8) UDDR0; 814 byte = (u8) UDDR0;
600 815
601 if (unlikely (bufferspace == 0)) { 816 if (unlikely (bufferspace == 0)) {
@@ -613,7 +828,7 @@ read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
613 } 828 }
614 } 829 }
615 830
616 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR; 831 udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR);
617 832
618 /* completion */ 833 /* completion */
619 if (req->req.actual >= req->req.length) 834 if (req->req.actual >= req->req.length)
@@ -687,8 +902,8 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
687 DBG(DBG_VERBOSE, "ep0 config ack%s\n", 902 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
688 dev->has_cfr ? "" : " raced"); 903 dev->has_cfr ? "" : " raced");
689 if (dev->has_cfr) 904 if (dev->has_cfr)
690 UDCCFR = UDCCFR_AREN|UDCCFR_ACM 905 udc_set_reg(dev, UDCCFR, UDCCFR_AREN |
691 |UDCCFR_MB1; 906 UDCCFR_ACM | UDCCFR_MB1);
692 done(ep, req, 0); 907 done(ep, req, 0);
693 dev->ep0state = EP0_END_XFER; 908 dev->ep0state = EP0_END_XFER;
694 local_irq_restore (flags); 909 local_irq_restore (flags);
@@ -696,7 +911,7 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
696 } 911 }
697 if (dev->req_pending) 912 if (dev->req_pending)
698 ep0start(dev, UDCCS0_IPR, "OUT"); 913 ep0start(dev, UDCCS0_IPR, "OUT");
699 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0 914 if (length == 0 || ((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0
700 && read_ep0_fifo(ep, req))) { 915 && read_ep0_fifo(ep, req))) {
701 ep0_idle(dev); 916 ep0_idle(dev);
702 done(ep, req, 0); 917 done(ep, req, 0);
@@ -711,16 +926,16 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
711 } 926 }
712 /* can the FIFO can satisfy the request immediately? */ 927 /* can the FIFO can satisfy the request immediately? */
713 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) { 928 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
714 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0 929 if ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) != 0
715 && write_fifo(ep, req)) 930 && write_fifo(ep, req))
716 req = NULL; 931 req = NULL;
717 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0 932 } else if ((udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) != 0
718 && read_fifo(ep, req)) { 933 && read_fifo(ep, req)) {
719 req = NULL; 934 req = NULL;
720 } 935 }
721 936
722 if (likely(req && ep->ep.desc)) 937 if (likely(req && ep->ep.desc))
723 pio_irq_enable(ep->bEndpointAddress); 938 pio_irq_enable(ep);
724 } 939 }
725 940
726 /* pio or dma irq handler advances the queue. */ 941 /* pio or dma irq handler advances the queue. */
@@ -747,7 +962,7 @@ static void nuke(struct pxa25x_ep *ep, int status)
747 done(ep, req, status); 962 done(ep, req, status);
748 } 963 }
749 if (ep->ep.desc) 964 if (ep->ep.desc)
750 pio_irq_disable (ep->bEndpointAddress); 965 pio_irq_disable(ep);
751} 966}
752 967
753 968
@@ -807,14 +1022,14 @@ static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
807 local_irq_save(flags); 1022 local_irq_save(flags);
808 1023
809 if ((ep->bEndpointAddress & USB_DIR_IN) != 0 1024 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
810 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0 1025 && ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) == 0
811 || !list_empty(&ep->queue))) { 1026 || !list_empty(&ep->queue))) {
812 local_irq_restore(flags); 1027 local_irq_restore(flags);
813 return -EAGAIN; 1028 return -EAGAIN;
814 } 1029 }
815 1030
816 /* FST bit is the same for control, bulk in, bulk out, interrupt in */ 1031 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
817 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF; 1032 udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF);
818 1033
819 /* ep0 needs special care */ 1034 /* ep0 needs special care */
820 if (!ep->ep.desc) { 1035 if (!ep->ep.desc) {
@@ -826,7 +1041,7 @@ static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
826 } else { 1041 } else {
827 unsigned i; 1042 unsigned i;
828 for (i = 0; i < 1000; i += 20) { 1043 for (i = 0; i < 1000; i += 20) {
829 if (*ep->reg_udccs & UDCCS_BI_SST) 1044 if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST)
830 break; 1045 break;
831 udelay(20); 1046 udelay(20);
832 } 1047 }
@@ -850,10 +1065,10 @@ static int pxa25x_ep_fifo_status(struct usb_ep *_ep)
850 if ((ep->bEndpointAddress & USB_DIR_IN) != 0) 1065 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
851 return -EOPNOTSUPP; 1066 return -EOPNOTSUPP;
852 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN 1067 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
853 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0) 1068 || (udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) == 0)
854 return 0; 1069 return 0;
855 else 1070 else
856 return (*ep->reg_ubcr & 0xfff) + 1; 1071 return (udc_ep_get_UBCR(ep) & 0xfff) + 1;
857} 1072}
858 1073
859static void pxa25x_ep_fifo_flush(struct usb_ep *_ep) 1074static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
@@ -870,15 +1085,15 @@ static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
870 1085
871 /* for OUT, just read and discard the FIFO contents. */ 1086 /* for OUT, just read and discard the FIFO contents. */
872 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) { 1087 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
873 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0) 1088 while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0)
874 (void) *ep->reg_uddr; 1089 (void)udc_ep_get_UDDR(ep);
875 return; 1090 return;
876 } 1091 }
877 1092
878 /* most IN status is the same, but ISO can't stall */ 1093 /* most IN status is the same, but ISO can't stall */
879 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR 1094 udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
880 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC 1095 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
881 ? 0 : UDCCS_BI_SST); 1096 ? 0 : UDCCS_BI_SST));
882} 1097}
883 1098
884 1099
@@ -905,15 +1120,23 @@ static struct usb_ep_ops pxa25x_ep_ops = {
905 1120
906static int pxa25x_udc_get_frame(struct usb_gadget *_gadget) 1121static int pxa25x_udc_get_frame(struct usb_gadget *_gadget)
907{ 1122{
908 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff); 1123 struct pxa25x_udc *dev;
1124
1125 dev = container_of(_gadget, struct pxa25x_udc, gadget);
1126 return ((udc_get_reg(dev, UFNRH) & 0x07) << 8) |
1127 (udc_get_reg(dev, UFNRL) & 0xff);
909} 1128}
910 1129
911static int pxa25x_udc_wakeup(struct usb_gadget *_gadget) 1130static int pxa25x_udc_wakeup(struct usb_gadget *_gadget)
912{ 1131{
1132 struct pxa25x_udc *udc;
1133
1134 udc = container_of(_gadget, struct pxa25x_udc, gadget);
1135
913 /* host may not have enabled remote wakeup */ 1136 /* host may not have enabled remote wakeup */
914 if ((UDCCS0 & UDCCS0_DRWF) == 0) 1137 if ((udc_ep0_get_UDCCS(udc) & UDCCS0_DRWF) == 0)
915 return -EHOSTUNREACH; 1138 return -EHOSTUNREACH;
916 udc_set_mask_UDCCR(UDCCR_RSM); 1139 udc_set_mask_UDCCR(udc, UDCCR_RSM);
917 return 0; 1140 return 0;
918} 1141}
919 1142
@@ -1034,9 +1257,11 @@ udc_seq_show(struct seq_file *m, void *_d)
1034 /* registers for device and ep0 */ 1257 /* registers for device and ep0 */
1035 seq_printf(m, 1258 seq_printf(m,
1036 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n", 1259 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1037 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL); 1260 udc_get_reg(dev, UICR1), udc_get_reg(dev, UICR0),
1261 udc_get_reg(dev, USIR1), udc_get_reg(dev, USIR0),
1262 udc_get_reg(dev, UFNRH), udc_get_reg(dev, UFNRL));
1038 1263
1039 tmp = UDCCR; 1264 tmp = udc_get_reg(dev, UDCCR);
1040 seq_printf(m, 1265 seq_printf(m,
1041 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp, 1266 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1042 (tmp & UDCCR_REM) ? " rem" : "", 1267 (tmp & UDCCR_REM) ? " rem" : "",
@@ -1048,7 +1273,7 @@ udc_seq_show(struct seq_file *m, void *_d)
1048 (tmp & UDCCR_UDA) ? " uda" : "", 1273 (tmp & UDCCR_UDA) ? " uda" : "",
1049 (tmp & UDCCR_UDE) ? " ude" : ""); 1274 (tmp & UDCCR_UDE) ? " ude" : "");
1050 1275
1051 tmp = UDCCS0; 1276 tmp = udc_ep0_get_UDCCS(dev);
1052 seq_printf(m, 1277 seq_printf(m,
1053 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp, 1278 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1054 (tmp & UDCCS0_SA) ? " sa" : "", 1279 (tmp & UDCCS0_SA) ? " sa" : "",
@@ -1061,7 +1286,7 @@ udc_seq_show(struct seq_file *m, void *_d)
1061 (tmp & UDCCS0_OPR) ? " opr" : ""); 1286 (tmp & UDCCS0_OPR) ? " opr" : "");
1062 1287
1063 if (dev->has_cfr) { 1288 if (dev->has_cfr) {
1064 tmp = UDCCFR; 1289 tmp = udc_get_reg(dev, UDCCFR);
1065 seq_printf(m, 1290 seq_printf(m,
1066 "udccfr %02X =%s%s\n", tmp, 1291 "udccfr %02X =%s%s\n", tmp,
1067 (tmp & UDCCFR_AREN) ? " aren" : "", 1292 (tmp & UDCCFR_AREN) ? " aren" : "",
@@ -1087,7 +1312,7 @@ udc_seq_show(struct seq_file *m, void *_d)
1087 desc = ep->ep.desc; 1312 desc = ep->ep.desc;
1088 if (!desc) 1313 if (!desc)
1089 continue; 1314 continue;
1090 tmp = *dev->ep [i].reg_udccs; 1315 tmp = udc_ep_get_UDCCS(&dev->ep[i]);
1091 seq_printf(m, 1316 seq_printf(m,
1092 "%s max %d %s udccs %02x irqs %lu\n", 1317 "%s max %d %s udccs %02x irqs %lu\n",
1093 ep->ep.name, usb_endpoint_maxp(desc), 1318 ep->ep.name, usb_endpoint_maxp(desc),
@@ -1151,14 +1376,15 @@ static const struct file_operations debug_fops = {
1151static void udc_disable(struct pxa25x_udc *dev) 1376static void udc_disable(struct pxa25x_udc *dev)
1152{ 1377{
1153 /* block all irqs */ 1378 /* block all irqs */
1154 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM); 1379 udc_set_mask_UDCCR(dev, UDCCR_SRM|UDCCR_REM);
1155 UICR0 = UICR1 = 0xff; 1380 udc_set_reg(dev, UICR0, 0xff);
1156 UFNRH = UFNRH_SIM; 1381 udc_set_reg(dev, UICR1, 0xff);
1382 udc_set_reg(dev, UFNRH, UFNRH_SIM);
1157 1383
1158 /* if hardware supports it, disconnect from usb */ 1384 /* if hardware supports it, disconnect from usb */
1159 pullup_off(); 1385 pullup_off();
1160 1386
1161 udc_clear_mask_UDCCR(UDCCR_UDE); 1387 udc_clear_mask_UDCCR(dev, UDCCR_UDE);
1162 1388
1163 ep0_idle (dev); 1389 ep0_idle (dev);
1164 dev->gadget.speed = USB_SPEED_UNKNOWN; 1390 dev->gadget.speed = USB_SPEED_UNKNOWN;
@@ -1200,10 +1426,10 @@ static void udc_reinit(struct pxa25x_udc *dev)
1200 */ 1426 */
1201static void udc_enable (struct pxa25x_udc *dev) 1427static void udc_enable (struct pxa25x_udc *dev)
1202{ 1428{
1203 udc_clear_mask_UDCCR(UDCCR_UDE); 1429 udc_clear_mask_UDCCR(dev, UDCCR_UDE);
1204 1430
1205 /* try to clear these bits before we enable the udc */ 1431 /* try to clear these bits before we enable the udc */
1206 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); 1432 udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1207 1433
1208 ep0_idle(dev); 1434 ep0_idle(dev);
1209 dev->gadget.speed = USB_SPEED_UNKNOWN; 1435 dev->gadget.speed = USB_SPEED_UNKNOWN;
@@ -1215,15 +1441,15 @@ static void udc_enable (struct pxa25x_udc *dev)
1215 * - if RESET is already in progress, ack interrupt 1441 * - if RESET is already in progress, ack interrupt
1216 * - unmask reset interrupt 1442 * - unmask reset interrupt
1217 */ 1443 */
1218 udc_set_mask_UDCCR(UDCCR_UDE); 1444 udc_set_mask_UDCCR(dev, UDCCR_UDE);
1219 if (!(UDCCR & UDCCR_UDA)) 1445 if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA))
1220 udc_ack_int_UDCCR(UDCCR_RSTIR); 1446 udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
1221 1447
1222 if (dev->has_cfr /* UDC_RES2 is defined */) { 1448 if (dev->has_cfr /* UDC_RES2 is defined */) {
1223 /* pxa255 (a0+) can avoid a set_config race that could 1449 /* pxa255 (a0+) can avoid a set_config race that could
1224 * prevent gadget drivers from configuring correctly 1450 * prevent gadget drivers from configuring correctly
1225 */ 1451 */
1226 UDCCFR = UDCCFR_ACM | UDCCFR_MB1; 1452 udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1);
1227 } else { 1453 } else {
1228 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1) 1454 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1229 * which could result in missing packets and interrupts. 1455 * which could result in missing packets and interrupts.
@@ -1231,15 +1457,15 @@ static void udc_enable (struct pxa25x_udc *dev)
1231 * double buffers or not; ACM/AREN bits fit into the holes. 1457 * double buffers or not; ACM/AREN bits fit into the holes.
1232 * zero bits (like USIR0_IRx) disable double buffering. 1458 * zero bits (like USIR0_IRx) disable double buffering.
1233 */ 1459 */
1234 UDC_RES1 = 0x00; 1460 udc_set_reg(dev, UDC_RES1, 0x00);
1235 UDC_RES2 = 0x00; 1461 udc_set_reg(dev, UDC_RES2, 0x00);
1236 } 1462 }
1237 1463
1238 /* enable suspend/resume and reset irqs */ 1464 /* enable suspend/resume and reset irqs */
1239 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM); 1465 udc_clear_mask_UDCCR(dev, UDCCR_SRM | UDCCR_REM);
1240 1466
1241 /* enable ep0 irqs */ 1467 /* enable ep0 irqs */
1242 UICR0 &= ~UICR0_IM0; 1468 udc_set_reg(dev, UICR0, udc_get_reg(dev, UICR0) & ~UICR0_IM0);
1243 1469
1244 /* if hardware supports it, pullup D+ and wait for reset */ 1470 /* if hardware supports it, pullup D+ and wait for reset */
1245 pullup_on(); 1471 pullup_on();
@@ -1408,9 +1634,9 @@ static void udc_watchdog(unsigned long _dev)
1408 1634
1409 local_irq_disable(); 1635 local_irq_disable();
1410 if (dev->ep0state == EP0_STALL 1636 if (dev->ep0state == EP0_STALL
1411 && (UDCCS0 & UDCCS0_FST) == 0 1637 && (udc_ep0_get_UDCCS(dev) & UDCCS0_FST) == 0
1412 && (UDCCS0 & UDCCS0_SST) == 0) { 1638 && (udc_ep0_get_UDCCS(dev) & UDCCS0_SST) == 0) {
1413 UDCCS0 = UDCCS0_FST|UDCCS0_FTF; 1639 udc_ep0_set_UDCCS(dev, UDCCS0_FST|UDCCS0_FTF);
1414 DBG(DBG_VERBOSE, "ep0 re-stall\n"); 1640 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1415 start_watchdog(dev); 1641 start_watchdog(dev);
1416 } 1642 }
@@ -1419,7 +1645,7 @@ static void udc_watchdog(unsigned long _dev)
1419 1645
1420static void handle_ep0 (struct pxa25x_udc *dev) 1646static void handle_ep0 (struct pxa25x_udc *dev)
1421{ 1647{
1422 u32 udccs0 = UDCCS0; 1648 u32 udccs0 = udc_ep0_get_UDCCS(dev);
1423 struct pxa25x_ep *ep = &dev->ep [0]; 1649 struct pxa25x_ep *ep = &dev->ep [0];
1424 struct pxa25x_request *req; 1650 struct pxa25x_request *req;
1425 union { 1651 union {
@@ -1436,7 +1662,7 @@ static void handle_ep0 (struct pxa25x_udc *dev)
1436 /* clear stall status */ 1662 /* clear stall status */
1437 if (udccs0 & UDCCS0_SST) { 1663 if (udccs0 & UDCCS0_SST) {
1438 nuke(ep, -EPIPE); 1664 nuke(ep, -EPIPE);
1439 UDCCS0 = UDCCS0_SST; 1665 udc_ep0_set_UDCCS(dev, UDCCS0_SST);
1440 del_timer(&dev->timer); 1666 del_timer(&dev->timer);
1441 ep0_idle(dev); 1667 ep0_idle(dev);
1442 } 1668 }
@@ -1451,7 +1677,7 @@ static void handle_ep0 (struct pxa25x_udc *dev)
1451 switch (dev->ep0state) { 1677 switch (dev->ep0state) {
1452 case EP0_IDLE: 1678 case EP0_IDLE:
1453 /* late-breaking status? */ 1679 /* late-breaking status? */
1454 udccs0 = UDCCS0; 1680 udccs0 = udc_ep0_get_UDCCS(dev);
1455 1681
1456 /* start control request? */ 1682 /* start control request? */
1457 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE)) 1683 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
@@ -1462,14 +1688,14 @@ static void handle_ep0 (struct pxa25x_udc *dev)
1462 1688
1463 /* read SETUP packet */ 1689 /* read SETUP packet */
1464 for (i = 0; i < 8; i++) { 1690 for (i = 0; i < 8; i++) {
1465 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) { 1691 if (unlikely(!(udc_ep0_get_UDCCS(dev) & UDCCS0_RNE))) {
1466bad_setup: 1692bad_setup:
1467 DMSG("SETUP %d!\n", i); 1693 DMSG("SETUP %d!\n", i);
1468 goto stall; 1694 goto stall;
1469 } 1695 }
1470 u.raw [i] = (u8) UDDR0; 1696 u.raw [i] = (u8) UDDR0;
1471 } 1697 }
1472 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0)) 1698 if (unlikely((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0))
1473 goto bad_setup; 1699 goto bad_setup;
1474 1700
1475got_setup: 1701got_setup:
@@ -1545,7 +1771,7 @@ config_change:
1545 */ 1771 */
1546 } 1772 }
1547 DBG(DBG_VERBOSE, "protocol STALL, " 1773 DBG(DBG_VERBOSE, "protocol STALL, "
1548 "%02x err %d\n", UDCCS0, i); 1774 "%02x err %d\n", udc_ep0_get_UDCCS(dev), i);
1549stall: 1775stall:
1550 /* the watchdog timer helps deal with cases 1776 /* the watchdog timer helps deal with cases
1551 * where udc seems to clear FST wrongly, and 1777 * where udc seems to clear FST wrongly, and
@@ -1592,12 +1818,12 @@ stall:
1592 * - IPR cleared 1818 * - IPR cleared
1593 * - OPR got set, without SA (likely status stage) 1819 * - OPR got set, without SA (likely status stage)
1594 */ 1820 */
1595 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR); 1821 udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR));
1596 } 1822 }
1597 break; 1823 break;
1598 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */ 1824 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1599 if (udccs0 & UDCCS0_OPR) { 1825 if (udccs0 & UDCCS0_OPR) {
1600 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF; 1826 udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF);
1601 DBG(DBG_VERBOSE, "ep0in premature status\n"); 1827 DBG(DBG_VERBOSE, "ep0in premature status\n");
1602 if (req) 1828 if (req)
1603 done(ep, req, 0); 1829 done(ep, req, 0);
@@ -1631,14 +1857,14 @@ stall:
1631 * also appears after some config change events. 1857 * also appears after some config change events.
1632 */ 1858 */
1633 if (udccs0 & UDCCS0_OPR) 1859 if (udccs0 & UDCCS0_OPR)
1634 UDCCS0 = UDCCS0_OPR; 1860 udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
1635 ep0_idle(dev); 1861 ep0_idle(dev);
1636 break; 1862 break;
1637 case EP0_STALL: 1863 case EP0_STALL:
1638 UDCCS0 = UDCCS0_FST; 1864 udc_ep0_set_UDCCS(dev, UDCCS0_FST);
1639 break; 1865 break;
1640 } 1866 }
1641 USIR0 = USIR0_IR0; 1867 udc_set_reg(dev, USIR0, USIR0_IR0);
1642} 1868}
1643 1869
1644static void handle_ep(struct pxa25x_ep *ep) 1870static void handle_ep(struct pxa25x_ep *ep)
@@ -1658,14 +1884,14 @@ static void handle_ep(struct pxa25x_ep *ep)
1658 1884
1659 // TODO check FST handling 1885 // TODO check FST handling
1660 1886
1661 udccs = *ep->reg_udccs; 1887 udccs = udc_ep_get_UDCCS(ep);
1662 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */ 1888 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
1663 tmp = UDCCS_BI_TUR; 1889 tmp = UDCCS_BI_TUR;
1664 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) 1890 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
1665 tmp |= UDCCS_BI_SST; 1891 tmp |= UDCCS_BI_SST;
1666 tmp &= udccs; 1892 tmp &= udccs;
1667 if (likely (tmp)) 1893 if (likely (tmp))
1668 *ep->reg_udccs = tmp; 1894 udc_ep_set_UDCCS(ep, tmp);
1669 if (req && likely ((udccs & UDCCS_BI_TFS) != 0)) 1895 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
1670 completed = write_fifo(ep, req); 1896 completed = write_fifo(ep, req);
1671 1897
@@ -1676,13 +1902,13 @@ static void handle_ep(struct pxa25x_ep *ep)
1676 tmp = UDCCS_IO_ROF | UDCCS_IO_DME; 1902 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
1677 tmp &= udccs; 1903 tmp &= udccs;
1678 if (likely(tmp)) 1904 if (likely(tmp))
1679 *ep->reg_udccs = tmp; 1905 udc_ep_set_UDCCS(ep, tmp);
1680 1906
1681 /* fifos can hold packets, ready for reading... */ 1907 /* fifos can hold packets, ready for reading... */
1682 if (likely(req)) { 1908 if (likely(req)) {
1683 completed = read_fifo(ep, req); 1909 completed = read_fifo(ep, req);
1684 } else 1910 } else
1685 pio_irq_disable (ep->bEndpointAddress); 1911 pio_irq_disable(ep);
1686 } 1912 }
1687 ep->pio_irqs++; 1913 ep->pio_irqs++;
1688 } while (completed); 1914 } while (completed);
@@ -1703,13 +1929,13 @@ pxa25x_udc_irq(int irq, void *_dev)
1703 1929
1704 dev->stats.irqs++; 1930 dev->stats.irqs++;
1705 do { 1931 do {
1706 u32 udccr = UDCCR; 1932 u32 udccr = udc_get_reg(dev, UDCCR);
1707 1933
1708 handled = 0; 1934 handled = 0;
1709 1935
1710 /* SUSpend Interrupt Request */ 1936 /* SUSpend Interrupt Request */
1711 if (unlikely(udccr & UDCCR_SUSIR)) { 1937 if (unlikely(udccr & UDCCR_SUSIR)) {
1712 udc_ack_int_UDCCR(UDCCR_SUSIR); 1938 udc_ack_int_UDCCR(dev, UDCCR_SUSIR);
1713 handled = 1; 1939 handled = 1;
1714 DBG(DBG_VERBOSE, "USB suspend\n"); 1940 DBG(DBG_VERBOSE, "USB suspend\n");
1715 1941
@@ -1722,7 +1948,7 @@ pxa25x_udc_irq(int irq, void *_dev)
1722 1948
1723 /* RESume Interrupt Request */ 1949 /* RESume Interrupt Request */
1724 if (unlikely(udccr & UDCCR_RESIR)) { 1950 if (unlikely(udccr & UDCCR_RESIR)) {
1725 udc_ack_int_UDCCR(UDCCR_RESIR); 1951 udc_ack_int_UDCCR(dev, UDCCR_RESIR);
1726 handled = 1; 1952 handled = 1;
1727 DBG(DBG_VERBOSE, "USB resume\n"); 1953 DBG(DBG_VERBOSE, "USB resume\n");
1728 1954
@@ -1734,10 +1960,10 @@ pxa25x_udc_irq(int irq, void *_dev)
1734 1960
1735 /* ReSeT Interrupt Request - USB reset */ 1961 /* ReSeT Interrupt Request - USB reset */
1736 if (unlikely(udccr & UDCCR_RSTIR)) { 1962 if (unlikely(udccr & UDCCR_RSTIR)) {
1737 udc_ack_int_UDCCR(UDCCR_RSTIR); 1963 udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
1738 handled = 1; 1964 handled = 1;
1739 1965
1740 if ((UDCCR & UDCCR_UDA) == 0) { 1966 if ((udc_get_reg(dev, UDCCR) & UDCCR_UDA) == 0) {
1741 DBG(DBG_VERBOSE, "USB reset start\n"); 1967 DBG(DBG_VERBOSE, "USB reset start\n");
1742 1968
1743 /* reset driver and endpoints, 1969 /* reset driver and endpoints,
@@ -1753,8 +1979,10 @@ pxa25x_udc_irq(int irq, void *_dev)
1753 } 1979 }
1754 1980
1755 } else { 1981 } else {
1756 u32 usir0 = USIR0 & ~UICR0; 1982 u32 usir0 = udc_get_reg(dev, USIR0) &
1757 u32 usir1 = USIR1 & ~UICR1; 1983 ~udc_get_reg(dev, UICR0);
1984 u32 usir1 = udc_get_reg(dev, USIR1) &
1985 ~udc_get_reg(dev, UICR1);
1758 int i; 1986 int i;
1759 1987
1760 if (unlikely (!usir0 && !usir1)) 1988 if (unlikely (!usir0 && !usir1))
@@ -1775,13 +2003,15 @@ pxa25x_udc_irq(int irq, void *_dev)
1775 2003
1776 if (i && (usir0 & tmp)) { 2004 if (i && (usir0 & tmp)) {
1777 handle_ep(&dev->ep[i]); 2005 handle_ep(&dev->ep[i]);
1778 USIR0 |= tmp; 2006 udc_set_reg(dev, USIR0,
2007 udc_get_reg(dev, USIR0) | tmp);
1779 handled = 1; 2008 handled = 1;
1780 } 2009 }
1781#ifndef CONFIG_USB_PXA25X_SMALL 2010#ifndef CONFIG_USB_PXA25X_SMALL
1782 if (usir1 & tmp) { 2011 if (usir1 & tmp) {
1783 handle_ep(&dev->ep[i+8]); 2012 handle_ep(&dev->ep[i+8]);
1784 USIR1 |= tmp; 2013 udc_set_reg(dev, USIR1,
2014 udc_get_reg(dev, USIR1) | tmp);
1785 handled = 1; 2015 handled = 1;
1786 } 2016 }
1787#endif 2017#endif
@@ -1826,8 +2056,8 @@ static struct pxa25x_udc memory = {
1826 USB_EP_CAPS_DIR_ALL), 2056 USB_EP_CAPS_DIR_ALL),
1827 }, 2057 },
1828 .dev = &memory, 2058 .dev = &memory,
1829 .reg_udccs = &UDCCS0, 2059 .regoff_udccs = UDCCS0,
1830 .reg_uddr = &UDDR0, 2060 .regoff_uddr = UDDR0,
1831 }, 2061 },
1832 2062
1833 /* first group of endpoints */ 2063 /* first group of endpoints */
@@ -1843,8 +2073,8 @@ static struct pxa25x_udc memory = {
1843 .fifo_size = BULK_FIFO_SIZE, 2073 .fifo_size = BULK_FIFO_SIZE,
1844 .bEndpointAddress = USB_DIR_IN | 1, 2074 .bEndpointAddress = USB_DIR_IN | 1,
1845 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2075 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1846 .reg_udccs = &UDCCS1, 2076 .regoff_udccs = UDCCS1,
1847 .reg_uddr = &UDDR1, 2077 .regoff_uddr = UDDR1,
1848 }, 2078 },
1849 .ep[2] = { 2079 .ep[2] = {
1850 .ep = { 2080 .ep = {
@@ -1858,9 +2088,9 @@ static struct pxa25x_udc memory = {
1858 .fifo_size = BULK_FIFO_SIZE, 2088 .fifo_size = BULK_FIFO_SIZE,
1859 .bEndpointAddress = 2, 2089 .bEndpointAddress = 2,
1860 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2090 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1861 .reg_udccs = &UDCCS2, 2091 .regoff_udccs = UDCCS2,
1862 .reg_ubcr = &UBCR2, 2092 .regoff_ubcr = UBCR2,
1863 .reg_uddr = &UDDR2, 2093 .regoff_uddr = UDDR2,
1864 }, 2094 },
1865#ifndef CONFIG_USB_PXA25X_SMALL 2095#ifndef CONFIG_USB_PXA25X_SMALL
1866 .ep[3] = { 2096 .ep[3] = {
@@ -1875,8 +2105,8 @@ static struct pxa25x_udc memory = {
1875 .fifo_size = ISO_FIFO_SIZE, 2105 .fifo_size = ISO_FIFO_SIZE,
1876 .bEndpointAddress = USB_DIR_IN | 3, 2106 .bEndpointAddress = USB_DIR_IN | 3,
1877 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2107 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
1878 .reg_udccs = &UDCCS3, 2108 .regoff_udccs = UDCCS3,
1879 .reg_uddr = &UDDR3, 2109 .regoff_uddr = UDDR3,
1880 }, 2110 },
1881 .ep[4] = { 2111 .ep[4] = {
1882 .ep = { 2112 .ep = {
@@ -1890,9 +2120,9 @@ static struct pxa25x_udc memory = {
1890 .fifo_size = ISO_FIFO_SIZE, 2120 .fifo_size = ISO_FIFO_SIZE,
1891 .bEndpointAddress = 4, 2121 .bEndpointAddress = 4,
1892 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2122 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
1893 .reg_udccs = &UDCCS4, 2123 .regoff_udccs = UDCCS4,
1894 .reg_ubcr = &UBCR4, 2124 .regoff_ubcr = UBCR4,
1895 .reg_uddr = &UDDR4, 2125 .regoff_uddr = UDDR4,
1896 }, 2126 },
1897 .ep[5] = { 2127 .ep[5] = {
1898 .ep = { 2128 .ep = {
@@ -1905,8 +2135,8 @@ static struct pxa25x_udc memory = {
1905 .fifo_size = INT_FIFO_SIZE, 2135 .fifo_size = INT_FIFO_SIZE,
1906 .bEndpointAddress = USB_DIR_IN | 5, 2136 .bEndpointAddress = USB_DIR_IN | 5,
1907 .bmAttributes = USB_ENDPOINT_XFER_INT, 2137 .bmAttributes = USB_ENDPOINT_XFER_INT,
1908 .reg_udccs = &UDCCS5, 2138 .regoff_udccs = UDCCS5,
1909 .reg_uddr = &UDDR5, 2139 .regoff_uddr = UDDR5,
1910 }, 2140 },
1911 2141
1912 /* second group of endpoints */ 2142 /* second group of endpoints */
@@ -1922,8 +2152,8 @@ static struct pxa25x_udc memory = {
1922 .fifo_size = BULK_FIFO_SIZE, 2152 .fifo_size = BULK_FIFO_SIZE,
1923 .bEndpointAddress = USB_DIR_IN | 6, 2153 .bEndpointAddress = USB_DIR_IN | 6,
1924 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2154 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1925 .reg_udccs = &UDCCS6, 2155 .regoff_udccs = UDCCS6,
1926 .reg_uddr = &UDDR6, 2156 .regoff_uddr = UDDR6,
1927 }, 2157 },
1928 .ep[7] = { 2158 .ep[7] = {
1929 .ep = { 2159 .ep = {
@@ -1937,9 +2167,9 @@ static struct pxa25x_udc memory = {
1937 .fifo_size = BULK_FIFO_SIZE, 2167 .fifo_size = BULK_FIFO_SIZE,
1938 .bEndpointAddress = 7, 2168 .bEndpointAddress = 7,
1939 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2169 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1940 .reg_udccs = &UDCCS7, 2170 .regoff_udccs = UDCCS7,
1941 .reg_ubcr = &UBCR7, 2171 .regoff_ubcr = UBCR7,
1942 .reg_uddr = &UDDR7, 2172 .regoff_uddr = UDDR7,
1943 }, 2173 },
1944 .ep[8] = { 2174 .ep[8] = {
1945 .ep = { 2175 .ep = {
@@ -1953,8 +2183,8 @@ static struct pxa25x_udc memory = {
1953 .fifo_size = ISO_FIFO_SIZE, 2183 .fifo_size = ISO_FIFO_SIZE,
1954 .bEndpointAddress = USB_DIR_IN | 8, 2184 .bEndpointAddress = USB_DIR_IN | 8,
1955 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2185 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
1956 .reg_udccs = &UDCCS8, 2186 .regoff_udccs = UDCCS8,
1957 .reg_uddr = &UDDR8, 2187 .regoff_uddr = UDDR8,
1958 }, 2188 },
1959 .ep[9] = { 2189 .ep[9] = {
1960 .ep = { 2190 .ep = {
@@ -1968,9 +2198,9 @@ static struct pxa25x_udc memory = {
1968 .fifo_size = ISO_FIFO_SIZE, 2198 .fifo_size = ISO_FIFO_SIZE,
1969 .bEndpointAddress = 9, 2199 .bEndpointAddress = 9,
1970 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2200 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
1971 .reg_udccs = &UDCCS9, 2201 .regoff_udccs = UDCCS9,
1972 .reg_ubcr = &UBCR9, 2202 .regoff_ubcr = UBCR9,
1973 .reg_uddr = &UDDR9, 2203 .regoff_uddr = UDDR9,
1974 }, 2204 },
1975 .ep[10] = { 2205 .ep[10] = {
1976 .ep = { 2206 .ep = {
@@ -1983,8 +2213,8 @@ static struct pxa25x_udc memory = {
1983 .fifo_size = INT_FIFO_SIZE, 2213 .fifo_size = INT_FIFO_SIZE,
1984 .bEndpointAddress = USB_DIR_IN | 10, 2214 .bEndpointAddress = USB_DIR_IN | 10,
1985 .bmAttributes = USB_ENDPOINT_XFER_INT, 2215 .bmAttributes = USB_ENDPOINT_XFER_INT,
1986 .reg_udccs = &UDCCS10, 2216 .regoff_udccs = UDCCS10,
1987 .reg_uddr = &UDDR10, 2217 .regoff_uddr = UDDR10,
1988 }, 2218 },
1989 2219
1990 /* third group of endpoints */ 2220 /* third group of endpoints */
@@ -2000,8 +2230,8 @@ static struct pxa25x_udc memory = {
2000 .fifo_size = BULK_FIFO_SIZE, 2230 .fifo_size = BULK_FIFO_SIZE,
2001 .bEndpointAddress = USB_DIR_IN | 11, 2231 .bEndpointAddress = USB_DIR_IN | 11,
2002 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2232 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2003 .reg_udccs = &UDCCS11, 2233 .regoff_udccs = UDCCS11,
2004 .reg_uddr = &UDDR11, 2234 .regoff_uddr = UDDR11,
2005 }, 2235 },
2006 .ep[12] = { 2236 .ep[12] = {
2007 .ep = { 2237 .ep = {
@@ -2015,9 +2245,9 @@ static struct pxa25x_udc memory = {
2015 .fifo_size = BULK_FIFO_SIZE, 2245 .fifo_size = BULK_FIFO_SIZE,
2016 .bEndpointAddress = 12, 2246 .bEndpointAddress = 12,
2017 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2247 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2018 .reg_udccs = &UDCCS12, 2248 .regoff_udccs = UDCCS12,
2019 .reg_ubcr = &UBCR12, 2249 .regoff_ubcr = UBCR12,
2020 .reg_uddr = &UDDR12, 2250 .regoff_uddr = UDDR12,
2021 }, 2251 },
2022 .ep[13] = { 2252 .ep[13] = {
2023 .ep = { 2253 .ep = {
@@ -2031,8 +2261,8 @@ static struct pxa25x_udc memory = {
2031 .fifo_size = ISO_FIFO_SIZE, 2261 .fifo_size = ISO_FIFO_SIZE,
2032 .bEndpointAddress = USB_DIR_IN | 13, 2262 .bEndpointAddress = USB_DIR_IN | 13,
2033 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2263 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2034 .reg_udccs = &UDCCS13, 2264 .regoff_udccs = UDCCS13,
2035 .reg_uddr = &UDDR13, 2265 .regoff_uddr = UDDR13,
2036 }, 2266 },
2037 .ep[14] = { 2267 .ep[14] = {
2038 .ep = { 2268 .ep = {
@@ -2046,9 +2276,9 @@ static struct pxa25x_udc memory = {
2046 .fifo_size = ISO_FIFO_SIZE, 2276 .fifo_size = ISO_FIFO_SIZE,
2047 .bEndpointAddress = 14, 2277 .bEndpointAddress = 14,
2048 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2278 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2049 .reg_udccs = &UDCCS14, 2279 .regoff_udccs = UDCCS14,
2050 .reg_ubcr = &UBCR14, 2280 .regoff_ubcr = UBCR14,
2051 .reg_uddr = &UDDR14, 2281 .regoff_uddr = UDDR14,
2052 }, 2282 },
2053 .ep[15] = { 2283 .ep[15] = {
2054 .ep = { 2284 .ep = {
@@ -2061,8 +2291,8 @@ static struct pxa25x_udc memory = {
2061 .fifo_size = INT_FIFO_SIZE, 2291 .fifo_size = INT_FIFO_SIZE,
2062 .bEndpointAddress = USB_DIR_IN | 15, 2292 .bEndpointAddress = USB_DIR_IN | 15,
2063 .bmAttributes = USB_ENDPOINT_XFER_INT, 2293 .bmAttributes = USB_ENDPOINT_XFER_INT,
2064 .reg_udccs = &UDCCS15, 2294 .regoff_udccs = UDCCS15,
2065 .reg_uddr = &UDDR15, 2295 .regoff_uddr = UDDR15,
2066 }, 2296 },
2067#endif /* !CONFIG_USB_PXA25X_SMALL */ 2297#endif /* !CONFIG_USB_PXA25X_SMALL */
2068}; 2298};
@@ -2109,6 +2339,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
2109 struct pxa25x_udc *dev = &memory; 2339 struct pxa25x_udc *dev = &memory;
2110 int retval, irq; 2340 int retval, irq;
2111 u32 chiprev; 2341 u32 chiprev;
2342 struct resource *res;
2112 2343
2113 pr_info("%s: version %s\n", driver_name, DRIVER_VERSION); 2344 pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
2114 2345
@@ -2154,6 +2385,11 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
2154 if (irq < 0) 2385 if (irq < 0)
2155 return -ENODEV; 2386 return -ENODEV;
2156 2387
2388 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2389 dev->regs = devm_ioremap_resource(&pdev->dev, res);
2390 if (IS_ERR(dev->regs))
2391 return PTR_ERR(dev->regs);
2392
2157 dev->clk = devm_clk_get(&pdev->dev, NULL); 2393 dev->clk = devm_clk_get(&pdev->dev, NULL);
2158 if (IS_ERR(dev->clk)) 2394 if (IS_ERR(dev->clk))
2159 return PTR_ERR(dev->clk); 2395 return PTR_ERR(dev->clk);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index 3fe5931dc21a..4b8b72d7ab37 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -56,9 +56,9 @@ struct pxa25x_ep {
56 * UDDR = UDC Endpoint Data Register (the fifo) 56 * UDDR = UDC Endpoint Data Register (the fifo)
57 * DRCM = DMA Request Channel Map 57 * DRCM = DMA Request Channel Map
58 */ 58 */
59 volatile u32 *reg_udccs; 59 u32 regoff_udccs;
60 volatile u32 *reg_ubcr; 60 u32 regoff_ubcr;
61 volatile u32 *reg_uddr; 61 u32 regoff_uddr;
62}; 62};
63 63
64struct pxa25x_request { 64struct pxa25x_request {
@@ -125,6 +125,7 @@ struct pxa25x_udc {
125#ifdef CONFIG_USB_GADGET_DEBUG_FS 125#ifdef CONFIG_USB_GADGET_DEBUG_FS
126 struct dentry *debugfs_udc; 126 struct dentry *debugfs_udc;
127#endif 127#endif
128 void __iomem *regs;
128}; 129};
129#define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget)) 130#define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget))
130 131
@@ -197,6 +198,8 @@ dump_udccs0(const char *label)
197 (udccs0 & UDCCS0_OPR) ? " opr" : ""); 198 (udccs0 & UDCCS0_OPR) ? " opr" : "");
198} 199}
199 200
201static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *);
202
200static void __maybe_unused 203static void __maybe_unused
201dump_state(struct pxa25x_udc *dev) 204dump_state(struct pxa25x_udc *dev)
202{ 205{
@@ -228,7 +231,7 @@ dump_state(struct pxa25x_udc *dev)
228 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) { 231 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) {
229 if (dev->ep[i].ep.desc == NULL) 232 if (dev->ep[i].ep.desc == NULL)
230 continue; 233 continue;
231 DMSG ("udccs%d = %02x\n", i, *dev->ep->reg_udccs); 234 DMSG ("udccs%d = %02x\n", i, udc_ep_get_UDCCS(&dev->ep[i]));
232 } 235 }
233} 236}
234 237
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index b86a6f03592e..4151597e9d28 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -443,6 +443,36 @@ err1:
443EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release); 443EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
444 444
445/** 445/**
446 * usb_get_gadget_udc_name - get the name of the first UDC controller
447 * This functions returns the name of the first UDC controller in the system.
448 * Please note that this interface is usefull only for legacy drivers which
449 * assume that there is only one UDC controller in the system and they need to
450 * get its name before initialization. There is no guarantee that the UDC
451 * of the returned name will be still available, when gadget driver registers
452 * itself.
453 *
454 * Returns pointer to string with UDC controller name on success, NULL
455 * otherwise. Caller should kfree() returned string.
456 */
457char *usb_get_gadget_udc_name(void)
458{
459 struct usb_udc *udc;
460 char *name = NULL;
461
462 /* For now we take the first available UDC */
463 mutex_lock(&udc_lock);
464 list_for_each_entry(udc, &udc_list, list) {
465 if (!udc->driver) {
466 name = kstrdup(udc->gadget->name, GFP_KERNEL);
467 break;
468 }
469 }
470 mutex_unlock(&udc_lock);
471 return name;
472}
473EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
474
475/**
446 * usb_add_gadget_udc - adds a new gadget to the udc class driver list 476 * usb_add_gadget_udc - adds a new gadget to the udc class driver list
447 * @parent: the parent device to this udc. Usually the controller 477 * @parent: the parent device to this udc. Usually the controller
448 * driver's device. 478 * driver's device.
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d7dab45b8543..3050b18b2447 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -125,9 +125,6 @@ config USB_EHCI_TT_NEWSCHED
125 125
126 If unsure, say Y. 126 If unsure, say Y.
127 127
128config USB_FSL_MPH_DR_OF
129 tristate
130
131if USB_EHCI_HCD 128if USB_EHCI_HCD
132 129
133config USB_EHCI_PCI 130config USB_EHCI_PCI
@@ -160,7 +157,6 @@ config USB_EHCI_FSL
160 tristate "Support for Freescale PPC on-chip EHCI USB controller" 157 tristate "Support for Freescale PPC on-chip EHCI USB controller"
161 depends on FSL_SOC 158 depends on FSL_SOC
162 select USB_EHCI_ROOT_HUB_TT 159 select USB_EHCI_ROOT_HUB_TT
163 select USB_FSL_MPH_DR_OF if OF
164 ---help--- 160 ---help---
165 Variation of ARC USB block used in some Freescale chips. 161 Variation of ARC USB block used in some Freescale chips.
166 162
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 65a06b4382bf..a9ddd3c9ec94 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -74,7 +74,8 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
74obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o 74obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
75obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o 75obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
76obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o 76obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
77obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o 77obj-$(CONFIG_USB_FSL_USB2) += fsl-mph-dr-of.o
78obj-$(CONFIG_USB_EHCI_FSL) += fsl-mph-dr-of.o
78obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o 79obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o
79obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o 80obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
80obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o 81obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c3791a01ab31..39fd95833eb8 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1901,7 +1901,7 @@ static void musb_recover_from_babble(struct musb *musb)
1901 */ 1901 */
1902 1902
1903static struct musb *allocate_instance(struct device *dev, 1903static struct musb *allocate_instance(struct device *dev,
1904 struct musb_hdrc_config *config, void __iomem *mbase) 1904 const struct musb_hdrc_config *config, void __iomem *mbase)
1905{ 1905{
1906 struct musb *musb; 1906 struct musb *musb;
1907 struct musb_hw_ep *ep; 1907 struct musb_hw_ep *ep;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index fd215fb45fd4..b6afe9e43305 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -438,7 +438,7 @@ struct musb {
438 */ 438 */
439 unsigned double_buffer_not_ok:1; 439 unsigned double_buffer_not_ok:1;
440 440
441 struct musb_hdrc_config *config; 441 const struct musb_hdrc_config *config;
442 442
443 int xceiv_old_state; 443 int xceiv_old_state;
444#ifdef CONFIG_DEBUG_FS 444#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 7539c3188ffc..8abfe4ec62fb 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -117,8 +117,8 @@ static void configure_channel(struct dma_channel *channel,
117 u8 bchannel = musb_channel->idx; 117 u8 bchannel = musb_channel->idx;
118 u16 csr = 0; 118 u16 csr = 0;
119 119
120 dev_dbg(musb->controller, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", 120 dev_dbg(musb->controller, "%p, pkt_sz %d, addr %pad, len %d, mode %d\n",
121 channel, packet_sz, dma_addr, len, mode); 121 channel, packet_sz, &dma_addr, len, mode);
122 122
123 if (mode) { 123 if (mode) {
124 csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; 124 csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
@@ -152,10 +152,10 @@ static int dma_channel_program(struct dma_channel *channel,
152 struct musb_dma_controller *controller = musb_channel->controller; 152 struct musb_dma_controller *controller = musb_channel->controller;
153 struct musb *musb = controller->private_data; 153 struct musb *musb = controller->private_data;
154 154
155 dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", 155 dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d\n",
156 musb_channel->epnum, 156 musb_channel->epnum,
157 musb_channel->transmit ? "Tx" : "Rx", 157 musb_channel->transmit ? "Tx" : "Rx",
158 packet_sz, dma_addr, len, mode); 158 packet_sz, &dma_addr, len, mode);
159 159
160 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 160 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
161 channel->status == MUSB_DMA_STATUS_BUSY); 161 channel->status == MUSB_DMA_STATUS_BUSY);
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index d9b0dc461439..fdab4232cfbf 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -752,6 +752,7 @@ static const struct of_device_id sunxi_musb_match[] = {
752 { .compatible = "allwinner,sun8i-a33-musb", }, 752 { .compatible = "allwinner,sun8i-a33-musb", },
753 {} 753 {}
754}; 754};
755MODULE_DEVICE_TABLE(of, sunxi_musb_match);
755 756
756static struct platform_driver sunxi_musb_driver = { 757static struct platform_driver sunxi_musb_driver = {
757 .probe = sunxi_musb_probe, 758 .probe = sunxi_musb_probe,
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 4c82077da475..e6959ccb4453 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -310,9 +310,9 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
310 310
311 dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ 311 dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */
312 312
313 dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", 313 dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
314 chdat->epnum, chdat->tx ? "tx" : "rx", 314 chdat->epnum, chdat->tx ? "tx" : "rx",
315 ch, dma_addr, chdat->transfer_len, len, 315 ch, &dma_addr, chdat->transfer_len, len,
316 chdat->transfer_packet_sz, packet_sz); 316 chdat->transfer_packet_sz, packet_sz);
317 317
318 /* 318 /*
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d0b6a1cd7f62..c92a295049ad 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -207,9 +207,6 @@ static int ux500_dma_channel_program(struct dma_channel *channel,
207 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 207 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
208 channel->status == MUSB_DMA_STATUS_BUSY); 208 channel->status == MUSB_DMA_STATUS_BUSY);
209 209
210 if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len))
211 return false;
212
213 channel->status = MUSB_DMA_STATUS_BUSY; 210 channel->status = MUSB_DMA_STATUS_BUSY;
214 channel->actual_len = 0; 211 channel->actual_len = 0;
215 ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len); 212 ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len);
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 39b424f7f629..a262a4343f29 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -5,7 +5,6 @@
5#include <linux/usb/usb_phy_generic.h> 5#include <linux/usb/usb_phy_generic.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/clk.h> 7#include <linux/clk.h>
8#include <linux/regulator/consumer.h>
9#include <linux/of.h> 8#include <linux/of.h>
10#include <linux/of_address.h> 9#include <linux/of_address.h>
11#include <linux/usb/of.h> 10#include <linux/usb/of.h>
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 5320cb8642cb..980c9dee09eb 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -118,7 +118,8 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
118 status = USB_EVENT_VBUS; 118 status = USB_EVENT_VBUS;
119 otg->state = OTG_STATE_B_PERIPHERAL; 119 otg->state = OTG_STATE_B_PERIPHERAL;
120 nop->phy.last_event = status; 120 nop->phy.last_event = status;
121 usb_gadget_vbus_connect(otg->gadget); 121 if (otg->gadget)
122 usb_gadget_vbus_connect(otg->gadget);
122 123
123 /* drawing a "unit load" is *always* OK, except for OTG */ 124 /* drawing a "unit load" is *always* OK, except for OTG */
124 nop_set_vbus_draw(nop, 100); 125 nop_set_vbus_draw(nop, 100);
@@ -128,7 +129,8 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
128 } else { 129 } else {
129 nop_set_vbus_draw(nop, 0); 130 nop_set_vbus_draw(nop, 0);
130 131
131 usb_gadget_vbus_disconnect(otg->gadget); 132 if (otg->gadget)
133 usb_gadget_vbus_disconnect(otg->gadget);
132 status = USB_EVENT_NONE; 134 status = USB_EVENT_NONE;
133 otg->state = OTG_STATE_B_IDLE; 135 otg->state = OTG_STATE_B_IDLE;
134 nop->phy.last_event = status; 136 nop->phy.last_event = status;
@@ -184,7 +186,10 @@ static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
184 } 186 }
185 187
186 otg->gadget = gadget; 188 otg->gadget = gadget;
187 otg->state = OTG_STATE_B_IDLE; 189 if (otg->state == OTG_STATE_B_PERIPHERAL)
190 usb_gadget_vbus_connect(gadget);
191 else
192 otg->state = OTG_STATE_B_IDLE;
188 return 0; 193 return 0;
189} 194}
190 195
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 3af263cc0caa..8d111ec653e4 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -258,7 +258,7 @@ static void power_down(struct isp1301 *isp)
258 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0); 258 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
259} 259}
260 260
261static void power_up(struct isp1301 *isp) 261static void __maybe_unused power_up(struct isp1301 *isp)
262{ 262{
263 // isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN); 263 // isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
264 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND); 264 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND);
diff --git a/drivers/usb/renesas_usbhs/Makefile b/drivers/usb/renesas_usbhs/Makefile
index 9e47f477b6d2..d787d05f6546 100644
--- a/drivers/usb/renesas_usbhs/Makefile
+++ b/drivers/usb/renesas_usbhs/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs.o 5obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs.o
6 6
7renesas_usbhs-y := common.o mod.o pipe.o fifo.o rcar2.o 7renesas_usbhs-y := common.o mod.o pipe.o fifo.o rcar2.o rcar3.o
8 8
9ifneq ($(CONFIG_USB_RENESAS_USBHS_HCD),) 9ifneq ($(CONFIG_USB_RENESAS_USBHS_HCD),)
10 renesas_usbhs-y += mod_host.o 10 renesas_usbhs-y += mod_host.o
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 5af9ca5d54ab..baeb7d23bf24 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -25,6 +25,7 @@
25#include <linux/sysfs.h> 25#include <linux/sysfs.h>
26#include "common.h" 26#include "common.h"
27#include "rcar2.h" 27#include "rcar2.h"
28#include "rcar3.h"
28 29
29/* 30/*
30 * image of renesas_usbhs 31 * image of renesas_usbhs
@@ -477,18 +478,16 @@ static const struct of_device_id usbhs_of_match[] = {
477 .data = (void *)USBHS_TYPE_RCAR_GEN2, 478 .data = (void *)USBHS_TYPE_RCAR_GEN2,
478 }, 479 },
479 { 480 {
480 /* Gen3 is compatible with Gen2 */
481 .compatible = "renesas,usbhs-r8a7795", 481 .compatible = "renesas,usbhs-r8a7795",
482 .data = (void *)USBHS_TYPE_RCAR_GEN2, 482 .data = (void *)USBHS_TYPE_RCAR_GEN3,
483 }, 483 },
484 { 484 {
485 .compatible = "renesas,rcar-gen2-usbhs", 485 .compatible = "renesas,rcar-gen2-usbhs",
486 .data = (void *)USBHS_TYPE_RCAR_GEN2, 486 .data = (void *)USBHS_TYPE_RCAR_GEN2,
487 }, 487 },
488 { 488 {
489 /* Gen3 is compatible with Gen2 */
490 .compatible = "renesas,rcar-gen3-usbhs", 489 .compatible = "renesas,rcar-gen3-usbhs",
491 .data = (void *)USBHS_TYPE_RCAR_GEN2, 490 .data = (void *)USBHS_TYPE_RCAR_GEN3,
492 }, 491 },
493 { }, 492 { },
494}; 493};
@@ -578,6 +577,13 @@ static int usbhs_probe(struct platform_device *pdev)
578 priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe); 577 priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
579 } 578 }
580 break; 579 break;
580 case USBHS_TYPE_RCAR_GEN3:
581 priv->pfunc = usbhs_rcar3_ops;
582 if (!priv->dparam.pipe_configs) {
583 priv->dparam.pipe_configs = usbhsc_new_pipe;
584 priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
585 }
586 break;
581 default: 587 default:
582 if (!info->platform_callback.get_id) { 588 if (!info->platform_callback.get_id) {
583 dev_err(&pdev->dev, "no platform callbacks"); 589 dev_err(&pdev->dev, "no platform callbacks");
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 657f9672ceba..664b263e4b20 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -561,7 +561,7 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
561 if (!pkt) 561 if (!pkt)
562 break; 562 break;
563 563
564 usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET); 564 usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ESHUTDOWN);
565 } 565 }
566 566
567 usbhs_pipe_disable(pipe); 567 usbhs_pipe_disable(pipe);
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 0e95d2925dc5..78e9dba701c4 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -241,7 +241,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
241{ 241{
242 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 242 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
243 int timeout = 1024; 243 int timeout = 1024;
244 u16 val; 244 u16 mask = usbhs_mod_is_host(priv) ? (CSSTS | PID_MASK) : PID_MASK;
245 245
246 /* 246 /*
247 * make sure.... 247 * make sure....
@@ -265,9 +265,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
265 usbhs_pipe_disable(pipe); 265 usbhs_pipe_disable(pipe);
266 266
267 do { 267 do {
268 val = usbhsp_pipectrl_get(pipe); 268 if (!(usbhsp_pipectrl_get(pipe) & mask))
269 val &= CSSTS | PID_MASK;
270 if (!val)
271 return 0; 269 return 0;
272 270
273 udelay(10); 271 udelay(10);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
new file mode 100644
index 000000000000..38b01f2aeeb0
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -0,0 +1,54 @@
1/*
2 * Renesas USB driver R-Car Gen. 3 initialization and power control
3 *
4 * Copyright (C) 2016 Renesas Electronics Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/io.h>
13#include "common.h"
14#include "rcar3.h"
15
16#define LPSTS 0x102
17#define UGCTRL2 0x184 /* 32-bit register */
18
19/* Low Power Status register (LPSTS) */
20#define LPSTS_SUSPM 0x4000
21
22/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
23#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
24#define UGCTRL2_USB0SEL_OTG 0x00000030
25
26void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
27{
28 iowrite32(data, priv->base + reg);
29}
30
31static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
32 void __iomem *base, int enable)
33{
34 struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
35
36 usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
37
38 if (enable)
39 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
40 else
41 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
42
43 return 0;
44}
45
46static int usbhs_rcar3_get_id(struct platform_device *pdev)
47{
48 return USBHS_GADGET;
49}
50
51const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
52 .power_ctrl = usbhs_rcar3_power_ctrl,
53 .get_id = usbhs_rcar3_get_id,
54};
diff --git a/drivers/usb/renesas_usbhs/rcar3.h b/drivers/usb/renesas_usbhs/rcar3.h
new file mode 100644
index 000000000000..5f850b23ff18
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/rcar3.h
@@ -0,0 +1,3 @@
1#include "common.h"
2
3extern const struct renesas_usbhs_platform_callback usbhs_rcar3_ops;
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 1074b8921a5d..2b81b24eb5aa 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -126,6 +126,10 @@ struct usb_os_desc_table {
126 * string identifiers assigned during @bind(). If this 126 * string identifiers assigned during @bind(). If this
127 * pointer is null after initiation, the function will not 127 * pointer is null after initiation, the function will not
128 * be available at super speed. 128 * be available at super speed.
129 * @ssp_descriptors: Table of super speed plus descriptors, using
130 * interface and string identifiers assigned during @bind(). If
131 * this pointer is null after initiation, the function will not
132 * be available at super speed plus.
129 * @config: assigned when @usb_add_function() is called; this is the 133 * @config: assigned when @usb_add_function() is called; this is the
130 * configuration with which this function is associated. 134 * configuration with which this function is associated.
131 * @os_desc_table: Table of (interface id, os descriptors) pairs. The function 135 * @os_desc_table: Table of (interface id, os descriptors) pairs. The function
@@ -186,6 +190,7 @@ struct usb_function {
186 struct usb_descriptor_header **fs_descriptors; 190 struct usb_descriptor_header **fs_descriptors;
187 struct usb_descriptor_header **hs_descriptors; 191 struct usb_descriptor_header **hs_descriptors;
188 struct usb_descriptor_header **ss_descriptors; 192 struct usb_descriptor_header **ss_descriptors;
193 struct usb_descriptor_header **ssp_descriptors;
189 194
190 struct usb_configuration *config; 195 struct usb_configuration *config;
191 196
@@ -317,6 +322,7 @@ struct usb_configuration {
317 unsigned superspeed:1; 322 unsigned superspeed:1;
318 unsigned highspeed:1; 323 unsigned highspeed:1;
319 unsigned fullspeed:1; 324 unsigned fullspeed:1;
325 unsigned superspeed_plus:1;
320 struct usb_function *interface[MAX_CONFIG_INTERFACES]; 326 struct usb_function *interface[MAX_CONFIG_INTERFACES];
321}; 327};
322 328
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index d82d0068872b..5d4e151c49bf 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -595,6 +595,10 @@ struct usb_gadget_ops {
595 * only supports HNP on a different root port. 595 * only supports HNP on a different root port.
596 * @b_hnp_enable: OTG device feature flag, indicating that the A-Host 596 * @b_hnp_enable: OTG device feature flag, indicating that the A-Host
597 * enabled HNP support. 597 * enabled HNP support.
598 * @hnp_polling_support: OTG device feature flag, indicating if the OTG device
599 * in peripheral mode can support HNP polling.
600 * @host_request_flag: OTG device feature flag, indicating if A-Peripheral
601 * or B-Peripheral wants to take host role.
598 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to 602 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
599 * MaxPacketSize. 603 * MaxPacketSize.
600 * @is_selfpowered: if the gadget is self-powered. 604 * @is_selfpowered: if the gadget is self-powered.
@@ -642,6 +646,8 @@ struct usb_gadget {
642 unsigned b_hnp_enable:1; 646 unsigned b_hnp_enable:1;
643 unsigned a_hnp_support:1; 647 unsigned a_hnp_support:1;
644 unsigned a_alt_hnp_support:1; 648 unsigned a_alt_hnp_support:1;
649 unsigned hnp_polling_support:1;
650 unsigned host_request_flag:1;
645 unsigned quirk_ep_out_aligned_size:1; 651 unsigned quirk_ep_out_aligned_size:1;
646 unsigned quirk_altset_not_supp:1; 652 unsigned quirk_altset_not_supp:1;
647 unsigned quirk_stall_not_supp:1; 653 unsigned quirk_stall_not_supp:1;
@@ -729,6 +735,16 @@ static inline int gadget_is_superspeed(struct usb_gadget *g)
729} 735}
730 736
731/** 737/**
738 * gadget_is_superspeed_plus() - return true if the hardware handles
739 * superspeed plus
740 * @g: controller that might support superspeed plus
741 */
742static inline int gadget_is_superspeed_plus(struct usb_gadget *g)
743{
744 return g->max_speed >= USB_SPEED_SUPER_PLUS;
745}
746
747/**
732 * gadget_is_otg - return true iff the hardware is OTG-ready 748 * gadget_is_otg - return true iff the hardware is OTG-ready
733 * @g: controller that might have a Mini-AB connector 749 * @g: controller that might have a Mini-AB connector
734 * 750 *
@@ -1126,6 +1142,7 @@ extern int usb_add_gadget_udc_release(struct device *parent,
1126 struct usb_gadget *gadget, void (*release)(struct device *dev)); 1142 struct usb_gadget *gadget, void (*release)(struct device *dev));
1127extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); 1143extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
1128extern void usb_del_gadget_udc(struct usb_gadget *gadget); 1144extern void usb_del_gadget_udc(struct usb_gadget *gadget);
1145extern char *usb_get_gadget_udc_name(void);
1129 1146
1130/*-------------------------------------------------------------------------*/ 1147/*-------------------------------------------------------------------------*/
1131 1148
@@ -1194,7 +1211,8 @@ struct usb_function;
1194int usb_assign_descriptors(struct usb_function *f, 1211int usb_assign_descriptors(struct usb_function *f,
1195 struct usb_descriptor_header **fs, 1212 struct usb_descriptor_header **fs,
1196 struct usb_descriptor_header **hs, 1213 struct usb_descriptor_header **hs,
1197 struct usb_descriptor_header **ss); 1214 struct usb_descriptor_header **ss,
1215 struct usb_descriptor_header **ssp);
1198void usb_free_all_descriptors(struct usb_function *f); 1216void usb_free_all_descriptors(struct usb_function *f);
1199 1217
1200struct usb_descriptor_header *usb_otg_descriptor_alloc( 1218struct usb_descriptor_header *usb_otg_descriptor_alloc(
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 96ddfb7ab018..0b3da40a525e 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -124,7 +124,7 @@ struct musb_hdrc_platform_data {
124 int (*set_power)(int state); 124 int (*set_power)(int state);
125 125
126 /* MUSB configuration-specific details */ 126 /* MUSB configuration-specific details */
127 struct musb_hdrc_config *config; 127 const struct musb_hdrc_config *config;
128 128
129 /* Architecture specific board data */ 129 /* Architecture specific board data */
130 void *board_data; 130 void *board_data;
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index f728f1854829..24198e16f849 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -40,6 +40,18 @@
40#define PROTO_HOST (1) 40#define PROTO_HOST (1)
41#define PROTO_GADGET (2) 41#define PROTO_GADGET (2)
42 42
43#define OTG_STS_SELECTOR 0xF000 /* OTG status selector, according to
44 * OTG and EH 2.0 Chapter 6.2.3
45 * Table:6-4
46 */
47
48#define HOST_REQUEST_FLAG 1 /* Host request flag, according to
49 * OTG and EH 2.0 Charpter 6.2.3
50 * Table:6-5
51 */
52
53#define T_HOST_REQ_POLL (1500) /* 1500ms, HNP polling interval */
54
43enum otg_fsm_timer { 55enum otg_fsm_timer {
44 /* Standard OTG timers */ 56 /* Standard OTG timers */
45 A_WAIT_VRISE, 57 A_WAIT_VRISE,
@@ -48,6 +60,7 @@ enum otg_fsm_timer {
48 A_AIDL_BDIS, 60 A_AIDL_BDIS,
49 B_ASE0_BRST, 61 B_ASE0_BRST,
50 A_BIDL_ADIS, 62 A_BIDL_ADIS,
63 B_AIDL_BDIS,
51 64
52 /* Auxiliary timers */ 65 /* Auxiliary timers */
53 B_SE0_SRP, 66 B_SE0_SRP,
@@ -119,6 +132,8 @@ struct otg_fsm {
119 /* Current usb protocol used: 0:undefine; 1:host; 2:client */ 132 /* Current usb protocol used: 0:undefine; 1:host; 2:client */
120 int protocol; 133 int protocol;
121 struct mutex lock; 134 struct mutex lock;
135 u8 *host_req_flag;
136 struct delayed_work hnp_polling_work;
122}; 137};
123 138
124struct otg_fsm_ops { 139struct otg_fsm_ops {
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 4db191fe8c2c..00a47d058d83 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -184,6 +184,7 @@ struct renesas_usbhs_driver_param {
184}; 184};
185 185
186#define USBHS_TYPE_RCAR_GEN2 1 186#define USBHS_TYPE_RCAR_GEN2 1
187#define USBHS_TYPE_RCAR_GEN3 2
187 188
188/* 189/*
189 * option: 190 * option:
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 252ac16635dc..06d6c6228a7a 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -708,6 +708,7 @@ struct usb_otg20_descriptor {
708#define USB_OTG_HNP (1 << 1) /* swap host/device roles */ 708#define USB_OTG_HNP (1 << 1) /* swap host/device roles */
709#define USB_OTG_ADP (1 << 2) /* support ADP */ 709#define USB_OTG_ADP (1 << 2) /* support ADP */
710 710
711#define OTG_STS_SELECTOR 0xF000 /* OTG status selector */
711/*-------------------------------------------------------------------------*/ 712/*-------------------------------------------------------------------------*/
712 713
713/* USB_DT_DEBUG: for special highspeed devices, replacing serial console */ 714/* USB_DT_DEBUG: for special highspeed devices, replacing serial console */
@@ -923,6 +924,12 @@ struct usb_ptm_cap_descriptor {
923 __u8 bDevCapabilityType; 924 __u8 bDevCapabilityType;
924} __attribute__((packed)); 925} __attribute__((packed));
925 926
927/*
928 * The size of the descriptor for the Sublink Speed Attribute Count
929 * (SSAC) specified in bmAttributes[4:0].
930 */
931#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
932
926/*-------------------------------------------------------------------------*/ 933/*-------------------------------------------------------------------------*/
927 934
928/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with 935/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with