diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/usb/musb | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/usb/musb')
26 files changed, 3902 insertions, 1141 deletions
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index cfd38edfcf9e..13093481f918 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -12,6 +12,7 @@ config USB_MUSB_HDRC | |||
12 | depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523)) | 12 | depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523)) |
13 | select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) | 13 | select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) |
14 | select TWL4030_USB if MACH_OMAP_3430SDP | 14 | select TWL4030_USB if MACH_OMAP_3430SDP |
15 | select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA | ||
15 | select USB_OTG_UTILS | 16 | select USB_OTG_UTILS |
16 | tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' | 17 | tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' |
17 | help | 18 | help |
@@ -30,43 +31,41 @@ config USB_MUSB_HDRC | |||
30 | If you do not know what this is, please say N. | 31 | If you do not know what this is, please say N. |
31 | 32 | ||
32 | To compile this driver as a module, choose M here; the | 33 | To compile this driver as a module, choose M here; the |
33 | module will be called "musb_hdrc". | 34 | module will be called "musb-hdrc". |
34 | 35 | ||
35 | config USB_MUSB_SOC | 36 | choice |
36 | boolean | 37 | prompt "Platform Glue Layer" |
37 | depends on USB_MUSB_HDRC | 38 | depends on USB_MUSB_HDRC |
38 | default y if ARCH_DAVINCI | ||
39 | default y if ARCH_OMAP2430 | ||
40 | default y if ARCH_OMAP3 | ||
41 | default y if ARCH_OMAP4 | ||
42 | default y if (BF54x && !BF544) | ||
43 | default y if (BF52x && !BF522 && !BF523) | ||
44 | 39 | ||
45 | comment "DaVinci 35x and 644x USB support" | 40 | config USB_MUSB_DAVINCI |
46 | depends on USB_MUSB_HDRC && ARCH_DAVINCI_DMx | 41 | tristate "DaVinci" |
42 | depends on ARCH_DAVINCI_DMx | ||
47 | 43 | ||
48 | comment "OMAP 243x high speed USB support" | 44 | config USB_MUSB_DA8XX |
49 | depends on USB_MUSB_HDRC && ARCH_OMAP2430 | 45 | tristate "DA8xx/OMAP-L1x" |
46 | depends on ARCH_DAVINCI_DA8XX | ||
50 | 47 | ||
51 | comment "OMAP 343x high speed USB support" | 48 | config USB_MUSB_TUSB6010 |
52 | depends on USB_MUSB_HDRC && ARCH_OMAP3 | 49 | tristate "TUSB6010" |
50 | depends on ARCH_OMAP | ||
53 | 51 | ||
54 | comment "OMAP 44xx high speed USB support" | 52 | config USB_MUSB_OMAP2PLUS |
55 | depends on USB_MUSB_HDRC && ARCH_OMAP4 | 53 | tristate "OMAP2430 and onwards" |
54 | depends on ARCH_OMAP2PLUS | ||
56 | 55 | ||
57 | comment "Blackfin high speed USB Support" | 56 | config USB_MUSB_AM35X |
58 | depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523)) | 57 | tristate "AM35x" |
58 | depends on ARCH_OMAP | ||
59 | 59 | ||
60 | config USB_TUSB6010 | 60 | config USB_MUSB_BLACKFIN |
61 | boolean "TUSB 6010 support" | 61 | tristate "Blackfin" |
62 | depends on USB_MUSB_HDRC && !USB_MUSB_SOC | 62 | depends on (BF54x && !BF544) || (BF52x && ! BF522 && !BF523) |
63 | select NOP_USB_XCEIV | 63 | |
64 | default y | 64 | config USB_MUSB_UX500 |
65 | help | 65 | tristate "U8500 and U5500" |
66 | The TUSB 6010 chip, from Texas Instruments, connects a discrete | 66 | depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500) |
67 | HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ | 67 | |
68 | (a high speed serial link). It can use system-specific external | 68 | endchoice |
69 | DMA controllers. | ||
70 | 69 | ||
71 | choice | 70 | choice |
72 | prompt "Driver Mode" | 71 | prompt "Driver Mode" |
@@ -144,7 +143,7 @@ config USB_MUSB_HDRC_HCD | |||
144 | config MUSB_PIO_ONLY | 143 | config MUSB_PIO_ONLY |
145 | bool 'Disable DMA (always use PIO)' | 144 | bool 'Disable DMA (always use PIO)' |
146 | depends on USB_MUSB_HDRC | 145 | depends on USB_MUSB_HDRC |
147 | default y if USB_TUSB6010 | 146 | default USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X |
148 | help | 147 | help |
149 | All data is copied between memory and FIFO by the CPU. | 148 | All data is copied between memory and FIFO by the CPU. |
150 | DMA controllers are ignored. | 149 | DMA controllers are ignored. |
@@ -154,34 +153,33 @@ config MUSB_PIO_ONLY | |||
154 | you can still disable it at run time using the "use_dma=n" module | 153 | you can still disable it at run time using the "use_dma=n" module |
155 | parameter. | 154 | parameter. |
156 | 155 | ||
156 | config USB_UX500_DMA | ||
157 | bool | ||
158 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | ||
159 | default USB_MUSB_UX500 | ||
160 | help | ||
161 | Enable DMA transfers on UX500 platforms. | ||
162 | |||
157 | config USB_INVENTRA_DMA | 163 | config USB_INVENTRA_DMA |
158 | bool | 164 | bool |
159 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | 165 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY |
160 | default ARCH_OMAP2430 || ARCH_OMAP3 || BLACKFIN || ARCH_OMAP4 | 166 | default USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN |
161 | help | 167 | help |
162 | Enable DMA transfers using Mentor's engine. | 168 | Enable DMA transfers using Mentor's engine. |
163 | 169 | ||
164 | config USB_TI_CPPI_DMA | 170 | config USB_TI_CPPI_DMA |
165 | bool | 171 | bool |
166 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | 172 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY |
167 | default ARCH_DAVINCI | 173 | default USB_MUSB_DAVINCI |
168 | help | 174 | help |
169 | Enable DMA transfers when TI CPPI DMA is available. | 175 | Enable DMA transfers when TI CPPI DMA is available. |
170 | 176 | ||
171 | config USB_TUSB_OMAP_DMA | 177 | config USB_TUSB_OMAP_DMA |
172 | bool | 178 | bool |
173 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | 179 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY |
174 | depends on USB_TUSB6010 | 180 | depends on USB_MUSB_TUSB6010 |
175 | depends on ARCH_OMAP | 181 | depends on ARCH_OMAP |
176 | default y | 182 | default y |
177 | help | 183 | help |
178 | Enable DMA transfers on TUSB 6010 when OMAP DMA is available. | 184 | Enable DMA transfers on TUSB 6010 when OMAP DMA is available. |
179 | 185 | ||
180 | config USB_MUSB_DEBUG | ||
181 | depends on USB_MUSB_HDRC | ||
182 | bool "Enable debugging messages" | ||
183 | default n | ||
184 | help | ||
185 | This enables musb debugging. To set the logging level use the debug | ||
186 | module parameter. Starting at level 3, per-transfer (urb, usb_request, | ||
187 | packet, or dma transfer) tracing may kick in. | ||
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile index 9705f716386e..c4d228b6ef8a 100644 --- a/drivers/usb/musb/Makefile +++ b/drivers/usb/musb/Makefile | |||
@@ -2,49 +2,22 @@ | |||
2 | # for USB OTG silicon based on Mentor Graphics INVENTRA designs | 2 | # for USB OTG silicon based on Mentor Graphics INVENTRA designs |
3 | # | 3 | # |
4 | 4 | ||
5 | musb_hdrc-objs := musb_core.o | 5 | obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o |
6 | 6 | ||
7 | obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o | 7 | musb_hdrc-y := musb_core.o |
8 | 8 | ||
9 | ifeq ($(CONFIG_ARCH_DAVINCI_DMx),y) | 9 | musb_hdrc-$(CONFIG_USB_GADGET_MUSB_HDRC) += musb_gadget_ep0.o musb_gadget.o |
10 | musb_hdrc-objs += davinci.o | 10 | musb_hdrc-$(CONFIG_USB_MUSB_HDRC_HCD) += musb_virthub.o musb_host.o |
11 | endif | 11 | musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o |
12 | |||
13 | ifeq ($(CONFIG_USB_TUSB6010),y) | ||
14 | musb_hdrc-objs += tusb6010.o | ||
15 | endif | ||
16 | |||
17 | ifeq ($(CONFIG_ARCH_OMAP2430),y) | ||
18 | musb_hdrc-objs += omap2430.o | ||
19 | endif | ||
20 | |||
21 | ifeq ($(CONFIG_ARCH_OMAP3430),y) | ||
22 | musb_hdrc-objs += omap2430.o | ||
23 | endif | ||
24 | |||
25 | ifeq ($(CONFIG_ARCH_OMAP4),y) | ||
26 | musb_hdrc-objs += omap2430.o | ||
27 | endif | ||
28 | |||
29 | ifeq ($(CONFIG_BF54x),y) | ||
30 | musb_hdrc-objs += blackfin.o | ||
31 | endif | ||
32 | |||
33 | ifeq ($(CONFIG_BF52x),y) | ||
34 | musb_hdrc-objs += blackfin.o | ||
35 | endif | ||
36 | |||
37 | ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y) | ||
38 | musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o | ||
39 | endif | ||
40 | 12 | ||
41 | ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y) | 13 | # Hardware Glue Layer |
42 | musb_hdrc-objs += musb_virthub.o musb_host.o | 14 | obj-$(CONFIG_USB_MUSB_OMAP2PLUS) += omap2430.o |
43 | endif | 15 | obj-$(CONFIG_USB_MUSB_AM35X) += am35x.o |
44 | 16 | obj-$(CONFIG_USB_MUSB_TUSB6010) += tusb6010.o | |
45 | ifeq ($(CONFIG_DEBUG_FS),y) | 17 | obj-$(CONFIG_USB_MUSB_DAVINCI) += davinci.o |
46 | musb_hdrc-objs += musb_debugfs.o | 18 | obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o |
47 | endif | 19 | obj-$(CONFIG_USB_MUSB_BLACKFIN) += blackfin.o |
20 | obj-$(CONFIG_USB_MUSB_UX500) += ux500.o | ||
48 | 21 | ||
49 | # the kconfig must guarantee that only one of the | 22 | # the kconfig must guarantee that only one of the |
50 | # possible I/O schemes will be enabled at a time ... | 23 | # possible I/O schemes will be enabled at a time ... |
@@ -54,26 +27,22 @@ endif | |||
54 | ifneq ($(CONFIG_MUSB_PIO_ONLY),y) | 27 | ifneq ($(CONFIG_MUSB_PIO_ONLY),y) |
55 | 28 | ||
56 | ifeq ($(CONFIG_USB_INVENTRA_DMA),y) | 29 | ifeq ($(CONFIG_USB_INVENTRA_DMA),y) |
57 | musb_hdrc-objs += musbhsdma.o | 30 | musb_hdrc-y += musbhsdma.o |
58 | 31 | ||
59 | else | 32 | else |
60 | ifeq ($(CONFIG_USB_TI_CPPI_DMA),y) | 33 | ifeq ($(CONFIG_USB_TI_CPPI_DMA),y) |
61 | musb_hdrc-objs += cppi_dma.o | 34 | musb_hdrc-y += cppi_dma.o |
62 | 35 | ||
63 | else | 36 | else |
64 | ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y) | 37 | ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y) |
65 | musb_hdrc-objs += tusb6010_omap.o | 38 | musb_hdrc-y += tusb6010_omap.o |
66 | 39 | ||
40 | else | ||
41 | ifeq ($(CONFIG_USB_UX500_DMA),y) | ||
42 | musb_hdrc-y += ux500_dma.o | ||
43 | |||
44 | endif | ||
67 | endif | 45 | endif |
68 | endif | 46 | endif |
69 | endif | 47 | endif |
70 | endif | 48 | endif |
71 | |||
72 | |||
73 | ################################################################################ | ||
74 | |||
75 | # Debugging | ||
76 | |||
77 | ifeq ($(CONFIG_USB_MUSB_DEBUG),y) | ||
78 | EXTRA_CFLAGS += -DDEBUG | ||
79 | endif | ||
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c new file mode 100644 index 000000000000..23ac28f98d91 --- /dev/null +++ b/drivers/usb/musb/am35x.c | |||
@@ -0,0 +1,657 @@ | |||
1 | /* | ||
2 | * Texas Instruments AM35x "glue layer" | ||
3 | * | ||
4 | * Copyright (c) 2010, by Texas Instruments | ||
5 | * | ||
6 | * Based on the DA8xx "glue layer" code. | ||
7 | * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com> | ||
8 | * | ||
9 | * This file is part of the Inventra Controller Driver for Linux. | ||
10 | * | ||
11 | * The Inventra Controller Driver for Linux is free software; you | ||
12 | * can redistribute it and/or modify it under the terms of the GNU | ||
13 | * General Public License version 2 as published by the Free Software | ||
14 | * Foundation. | ||
15 | * | ||
16 | * The Inventra Controller Driver for Linux is distributed in | ||
17 | * the hope that it will be useful, but WITHOUT ANY WARRANTY; | ||
18 | * without even the implied warranty of MERCHANTABILITY or | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | ||
20 | * License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with The Inventra Controller Driver for Linux ; if not, | ||
24 | * write to the Free Software Foundation, Inc., 59 Temple Place, | ||
25 | * Suite 330, Boston, MA 02111-1307 USA | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include <linux/init.h> | ||
30 | #include <linux/clk.h> | ||
31 | #include <linux/io.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/dma-mapping.h> | ||
34 | |||
35 | #include <plat/usb.h> | ||
36 | |||
37 | #include "musb_core.h" | ||
38 | |||
39 | /* | ||
40 | * AM35x specific definitions | ||
41 | */ | ||
42 | /* USB 2.0 OTG module registers */ | ||
43 | #define USB_REVISION_REG 0x00 | ||
44 | #define USB_CTRL_REG 0x04 | ||
45 | #define USB_STAT_REG 0x08 | ||
46 | #define USB_EMULATION_REG 0x0c | ||
47 | /* 0x10 Reserved */ | ||
48 | #define USB_AUTOREQ_REG 0x14 | ||
49 | #define USB_SRP_FIX_TIME_REG 0x18 | ||
50 | #define USB_TEARDOWN_REG 0x1c | ||
51 | #define EP_INTR_SRC_REG 0x20 | ||
52 | #define EP_INTR_SRC_SET_REG 0x24 | ||
53 | #define EP_INTR_SRC_CLEAR_REG 0x28 | ||
54 | #define EP_INTR_MASK_REG 0x2c | ||
55 | #define EP_INTR_MASK_SET_REG 0x30 | ||
56 | #define EP_INTR_MASK_CLEAR_REG 0x34 | ||
57 | #define EP_INTR_SRC_MASKED_REG 0x38 | ||
58 | #define CORE_INTR_SRC_REG 0x40 | ||
59 | #define CORE_INTR_SRC_SET_REG 0x44 | ||
60 | #define CORE_INTR_SRC_CLEAR_REG 0x48 | ||
61 | #define CORE_INTR_MASK_REG 0x4c | ||
62 | #define CORE_INTR_MASK_SET_REG 0x50 | ||
63 | #define CORE_INTR_MASK_CLEAR_REG 0x54 | ||
64 | #define CORE_INTR_SRC_MASKED_REG 0x58 | ||
65 | /* 0x5c Reserved */ | ||
66 | #define USB_END_OF_INTR_REG 0x60 | ||
67 | |||
68 | /* Control register bits */ | ||
69 | #define AM35X_SOFT_RESET_MASK 1 | ||
70 | |||
71 | /* USB interrupt register bits */ | ||
72 | #define AM35X_INTR_USB_SHIFT 16 | ||
73 | #define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT) | ||
74 | #define AM35X_INTR_DRVVBUS 0x100 | ||
75 | #define AM35X_INTR_RX_SHIFT 16 | ||
76 | #define AM35X_INTR_TX_SHIFT 0 | ||
77 | #define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */ | ||
78 | #define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */ | ||
79 | #define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT) | ||
80 | #define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT) | ||
81 | |||
82 | #define USB_MENTOR_CORE_OFFSET 0x400 | ||
83 | |||
84 | struct am35x_glue { | ||
85 | struct device *dev; | ||
86 | struct platform_device *musb; | ||
87 | struct clk *phy_clk; | ||
88 | struct clk *clk; | ||
89 | }; | ||
90 | #define glue_to_musb(g) platform_get_drvdata(g->musb) | ||
91 | |||
92 | /* | ||
93 | * am35x_musb_enable - enable interrupts | ||
94 | */ | ||
95 | static void am35x_musb_enable(struct musb *musb) | ||
96 | { | ||
97 | void __iomem *reg_base = musb->ctrl_base; | ||
98 | u32 epmask; | ||
99 | |||
100 | /* Workaround: setup IRQs through both register sets. */ | ||
101 | epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) | | ||
102 | ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT); | ||
103 | |||
104 | musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask); | ||
105 | musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK); | ||
106 | |||
107 | /* Force the DRVVBUS IRQ so we can start polling for ID change. */ | ||
108 | if (is_otg_enabled(musb)) | ||
109 | musb_writel(reg_base, CORE_INTR_SRC_SET_REG, | ||
110 | AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * am35x_musb_disable - disable HDRC and flush interrupts | ||
115 | */ | ||
116 | static void am35x_musb_disable(struct musb *musb) | ||
117 | { | ||
118 | void __iomem *reg_base = musb->ctrl_base; | ||
119 | |||
120 | musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK); | ||
121 | musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG, | ||
122 | AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK); | ||
123 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
124 | musb_writel(reg_base, USB_END_OF_INTR_REG, 0); | ||
125 | } | ||
126 | |||
127 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
128 | #define portstate(stmt) stmt | ||
129 | #else | ||
130 | #define portstate(stmt) | ||
131 | #endif | ||
132 | |||
133 | static void am35x_musb_set_vbus(struct musb *musb, int is_on) | ||
134 | { | ||
135 | WARN_ON(is_on && is_peripheral_active(musb)); | ||
136 | } | ||
137 | |||
138 | #define POLL_SECONDS 2 | ||
139 | |||
140 | static struct timer_list otg_workaround; | ||
141 | |||
142 | static void otg_timer(unsigned long _musb) | ||
143 | { | ||
144 | struct musb *musb = (void *)_musb; | ||
145 | void __iomem *mregs = musb->mregs; | ||
146 | u8 devctl; | ||
147 | unsigned long flags; | ||
148 | |||
149 | /* | ||
150 | * We poll because AM35x's won't expose several OTG-critical | ||
151 | * status change events (from the transceiver) otherwise. | ||
152 | */ | ||
153 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
154 | dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, | ||
155 | otg_state_string(musb->xceiv->state)); | ||
156 | |||
157 | spin_lock_irqsave(&musb->lock, flags); | ||
158 | switch (musb->xceiv->state) { | ||
159 | case OTG_STATE_A_WAIT_BCON: | ||
160 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
161 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
162 | |||
163 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
164 | if (devctl & MUSB_DEVCTL_BDEVICE) { | ||
165 | musb->xceiv->state = OTG_STATE_B_IDLE; | ||
166 | MUSB_DEV_MODE(musb); | ||
167 | } else { | ||
168 | musb->xceiv->state = OTG_STATE_A_IDLE; | ||
169 | MUSB_HST_MODE(musb); | ||
170 | } | ||
171 | break; | ||
172 | case OTG_STATE_A_WAIT_VFALL: | ||
173 | musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; | ||
174 | musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG, | ||
175 | MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT); | ||
176 | break; | ||
177 | case OTG_STATE_B_IDLE: | ||
178 | if (!is_peripheral_enabled(musb)) | ||
179 | break; | ||
180 | |||
181 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
182 | if (devctl & MUSB_DEVCTL_BDEVICE) | ||
183 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
184 | else | ||
185 | musb->xceiv->state = OTG_STATE_A_IDLE; | ||
186 | break; | ||
187 | default: | ||
188 | break; | ||
189 | } | ||
190 | spin_unlock_irqrestore(&musb->lock, flags); | ||
191 | } | ||
192 | |||
193 | static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout) | ||
194 | { | ||
195 | static unsigned long last_timer; | ||
196 | |||
197 | if (!is_otg_enabled(musb)) | ||
198 | return; | ||
199 | |||
200 | if (timeout == 0) | ||
201 | timeout = jiffies + msecs_to_jiffies(3); | ||
202 | |||
203 | /* Never idle if active, or when VBUS timeout is not set as host */ | ||
204 | if (musb->is_active || (musb->a_wait_bcon == 0 && | ||
205 | musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { | ||
206 | dev_dbg(musb->controller, "%s active, deleting timer\n", | ||
207 | otg_state_string(musb->xceiv->state)); | ||
208 | del_timer(&otg_workaround); | ||
209 | last_timer = jiffies; | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) { | ||
214 | dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); | ||
215 | return; | ||
216 | } | ||
217 | last_timer = timeout; | ||
218 | |||
219 | dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", | ||
220 | otg_state_string(musb->xceiv->state), | ||
221 | jiffies_to_msecs(timeout - jiffies)); | ||
222 | mod_timer(&otg_workaround, timeout); | ||
223 | } | ||
224 | |||
225 | static irqreturn_t am35x_musb_interrupt(int irq, void *hci) | ||
226 | { | ||
227 | struct musb *musb = hci; | ||
228 | void __iomem *reg_base = musb->ctrl_base; | ||
229 | struct device *dev = musb->controller; | ||
230 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
231 | struct omap_musb_board_data *data = plat->board_data; | ||
232 | unsigned long flags; | ||
233 | irqreturn_t ret = IRQ_NONE; | ||
234 | u32 epintr, usbintr; | ||
235 | |||
236 | spin_lock_irqsave(&musb->lock, flags); | ||
237 | |||
238 | /* Get endpoint interrupts */ | ||
239 | epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG); | ||
240 | |||
241 | if (epintr) { | ||
242 | musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr); | ||
243 | |||
244 | musb->int_rx = | ||
245 | (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT; | ||
246 | musb->int_tx = | ||
247 | (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT; | ||
248 | } | ||
249 | |||
250 | /* Get usb core interrupts */ | ||
251 | usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG); | ||
252 | if (!usbintr && !epintr) | ||
253 | goto eoi; | ||
254 | |||
255 | if (usbintr) { | ||
256 | musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr); | ||
257 | |||
258 | musb->int_usb = | ||
259 | (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT; | ||
260 | } | ||
261 | /* | ||
262 | * DRVVBUS IRQs are the only proxy we have (a very poor one!) for | ||
263 | * AM35x's missing ID change IRQ. We need an ID change IRQ to | ||
264 | * switch appropriately between halves of the OTG state machine. | ||
265 | * Managing DEVCTL.SESSION per Mentor docs requires that we know its | ||
266 | * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. | ||
267 | * Also, DRVVBUS pulses for SRP (but not at 5V) ... | ||
268 | */ | ||
269 | if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) { | ||
270 | int drvvbus = musb_readl(reg_base, USB_STAT_REG); | ||
271 | void __iomem *mregs = musb->mregs; | ||
272 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
273 | int err; | ||
274 | |||
275 | err = is_host_enabled(musb) && (musb->int_usb & | ||
276 | MUSB_INTR_VBUSERROR); | ||
277 | if (err) { | ||
278 | /* | ||
279 | * The Mentor core doesn't debounce VBUS as needed | ||
280 | * to cope with device connect current spikes. This | ||
281 | * means it's not uncommon for bus-powered devices | ||
282 | * to get VBUS errors during enumeration. | ||
283 | * | ||
284 | * This is a workaround, but newer RTL from Mentor | ||
285 | * seems to allow a better one: "re"-starting sessions | ||
286 | * without waiting for VBUS to stop registering in | ||
287 | * devctl. | ||
288 | */ | ||
289 | musb->int_usb &= ~MUSB_INTR_VBUSERROR; | ||
290 | musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; | ||
291 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
292 | WARNING("VBUS error workaround (delay coming)\n"); | ||
293 | } else if (is_host_enabled(musb) && drvvbus) { | ||
294 | MUSB_HST_MODE(musb); | ||
295 | musb->xceiv->default_a = 1; | ||
296 | musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; | ||
297 | portstate(musb->port1_status |= USB_PORT_STAT_POWER); | ||
298 | del_timer(&otg_workaround); | ||
299 | } else { | ||
300 | musb->is_active = 0; | ||
301 | MUSB_DEV_MODE(musb); | ||
302 | musb->xceiv->default_a = 0; | ||
303 | musb->xceiv->state = OTG_STATE_B_IDLE; | ||
304 | portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); | ||
305 | } | ||
306 | |||
307 | /* NOTE: this must complete power-on within 100 ms. */ | ||
308 | dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", | ||
309 | drvvbus ? "on" : "off", | ||
310 | otg_state_string(musb->xceiv->state), | ||
311 | err ? " ERROR" : "", | ||
312 | devctl); | ||
313 | ret = IRQ_HANDLED; | ||
314 | } | ||
315 | |||
316 | if (musb->int_tx || musb->int_rx || musb->int_usb) | ||
317 | ret |= musb_interrupt(musb); | ||
318 | |||
319 | eoi: | ||
320 | /* EOI needs to be written for the IRQ to be re-asserted. */ | ||
321 | if (ret == IRQ_HANDLED || epintr || usbintr) { | ||
322 | /* clear level interrupt */ | ||
323 | if (data->clear_irq) | ||
324 | data->clear_irq(); | ||
325 | /* write EOI */ | ||
326 | musb_writel(reg_base, USB_END_OF_INTR_REG, 0); | ||
327 | } | ||
328 | |||
329 | /* Poll for ID change */ | ||
330 | if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) | ||
331 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
332 | |||
333 | spin_unlock_irqrestore(&musb->lock, flags); | ||
334 | |||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode) | ||
339 | { | ||
340 | struct device *dev = musb->controller; | ||
341 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
342 | struct omap_musb_board_data *data = plat->board_data; | ||
343 | int retval = 0; | ||
344 | |||
345 | if (data->set_mode) | ||
346 | data->set_mode(musb_mode); | ||
347 | else | ||
348 | retval = -EIO; | ||
349 | |||
350 | return retval; | ||
351 | } | ||
352 | |||
353 | static int am35x_musb_init(struct musb *musb) | ||
354 | { | ||
355 | struct device *dev = musb->controller; | ||
356 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
357 | struct omap_musb_board_data *data = plat->board_data; | ||
358 | void __iomem *reg_base = musb->ctrl_base; | ||
359 | u32 rev; | ||
360 | |||
361 | musb->mregs += USB_MENTOR_CORE_OFFSET; | ||
362 | |||
363 | /* Returns zero if e.g. not clocked */ | ||
364 | rev = musb_readl(reg_base, USB_REVISION_REG); | ||
365 | if (!rev) | ||
366 | return -ENODEV; | ||
367 | |||
368 | usb_nop_xceiv_register(); | ||
369 | musb->xceiv = otg_get_transceiver(); | ||
370 | if (!musb->xceiv) | ||
371 | return -ENODEV; | ||
372 | |||
373 | if (is_host_enabled(musb)) | ||
374 | setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); | ||
375 | |||
376 | /* Reset the musb */ | ||
377 | if (data->reset) | ||
378 | data->reset(); | ||
379 | |||
380 | /* Reset the controller */ | ||
381 | musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK); | ||
382 | |||
383 | /* Start the on-chip PHY and its PLL. */ | ||
384 | if (data->set_phy_power) | ||
385 | data->set_phy_power(1); | ||
386 | |||
387 | msleep(5); | ||
388 | |||
389 | musb->isr = am35x_musb_interrupt; | ||
390 | |||
391 | /* clear level interrupt */ | ||
392 | if (data->clear_irq) | ||
393 | data->clear_irq(); | ||
394 | |||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int am35x_musb_exit(struct musb *musb) | ||
399 | { | ||
400 | struct device *dev = musb->controller; | ||
401 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
402 | struct omap_musb_board_data *data = plat->board_data; | ||
403 | |||
404 | if (is_host_enabled(musb)) | ||
405 | del_timer_sync(&otg_workaround); | ||
406 | |||
407 | /* Shutdown the on-chip PHY and its PLL. */ | ||
408 | if (data->set_phy_power) | ||
409 | data->set_phy_power(0); | ||
410 | |||
411 | otg_put_transceiver(musb->xceiv); | ||
412 | usb_nop_xceiv_unregister(); | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | /* AM35x supports only 32bit read operation */ | ||
418 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) | ||
419 | { | ||
420 | void __iomem *fifo = hw_ep->fifo; | ||
421 | u32 val; | ||
422 | int i; | ||
423 | |||
424 | /* Read for 32bit-aligned destination address */ | ||
425 | if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) { | ||
426 | readsl(fifo, dst, len >> 2); | ||
427 | dst += len & ~0x03; | ||
428 | len &= 0x03; | ||
429 | } | ||
430 | /* | ||
431 | * Now read the remaining 1 to 3 byte or complete length if | ||
432 | * unaligned address. | ||
433 | */ | ||
434 | if (len > 4) { | ||
435 | for (i = 0; i < (len >> 2); i++) { | ||
436 | *(u32 *) dst = musb_readl(fifo, 0); | ||
437 | dst += 4; | ||
438 | } | ||
439 | len &= 0x03; | ||
440 | } | ||
441 | if (len > 0) { | ||
442 | val = musb_readl(fifo, 0); | ||
443 | memcpy(dst, &val, len); | ||
444 | } | ||
445 | } | ||
446 | |||
447 | static const struct musb_platform_ops am35x_ops = { | ||
448 | .init = am35x_musb_init, | ||
449 | .exit = am35x_musb_exit, | ||
450 | |||
451 | .enable = am35x_musb_enable, | ||
452 | .disable = am35x_musb_disable, | ||
453 | |||
454 | .set_mode = am35x_musb_set_mode, | ||
455 | .try_idle = am35x_musb_try_idle, | ||
456 | |||
457 | .set_vbus = am35x_musb_set_vbus, | ||
458 | }; | ||
459 | |||
460 | static u64 am35x_dmamask = DMA_BIT_MASK(32); | ||
461 | |||
462 | static int __init am35x_probe(struct platform_device *pdev) | ||
463 | { | ||
464 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | ||
465 | struct platform_device *musb; | ||
466 | struct am35x_glue *glue; | ||
467 | |||
468 | struct clk *phy_clk; | ||
469 | struct clk *clk; | ||
470 | |||
471 | int ret = -ENOMEM; | ||
472 | |||
473 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); | ||
474 | if (!glue) { | ||
475 | dev_err(&pdev->dev, "failed to allocate glue context\n"); | ||
476 | goto err0; | ||
477 | } | ||
478 | |||
479 | musb = platform_device_alloc("musb-hdrc", -1); | ||
480 | if (!musb) { | ||
481 | dev_err(&pdev->dev, "failed to allocate musb device\n"); | ||
482 | goto err1; | ||
483 | } | ||
484 | |||
485 | phy_clk = clk_get(&pdev->dev, "fck"); | ||
486 | if (IS_ERR(phy_clk)) { | ||
487 | dev_err(&pdev->dev, "failed to get PHY clock\n"); | ||
488 | ret = PTR_ERR(phy_clk); | ||
489 | goto err2; | ||
490 | } | ||
491 | |||
492 | clk = clk_get(&pdev->dev, "ick"); | ||
493 | if (IS_ERR(clk)) { | ||
494 | dev_err(&pdev->dev, "failed to get clock\n"); | ||
495 | ret = PTR_ERR(clk); | ||
496 | goto err3; | ||
497 | } | ||
498 | |||
499 | ret = clk_enable(phy_clk); | ||
500 | if (ret) { | ||
501 | dev_err(&pdev->dev, "failed to enable PHY clock\n"); | ||
502 | goto err4; | ||
503 | } | ||
504 | |||
505 | ret = clk_enable(clk); | ||
506 | if (ret) { | ||
507 | dev_err(&pdev->dev, "failed to enable clock\n"); | ||
508 | goto err5; | ||
509 | } | ||
510 | |||
511 | musb->dev.parent = &pdev->dev; | ||
512 | musb->dev.dma_mask = &am35x_dmamask; | ||
513 | musb->dev.coherent_dma_mask = am35x_dmamask; | ||
514 | |||
515 | glue->dev = &pdev->dev; | ||
516 | glue->musb = musb; | ||
517 | glue->phy_clk = phy_clk; | ||
518 | glue->clk = clk; | ||
519 | |||
520 | pdata->platform_ops = &am35x_ops; | ||
521 | |||
522 | platform_set_drvdata(pdev, glue); | ||
523 | |||
524 | ret = platform_device_add_resources(musb, pdev->resource, | ||
525 | pdev->num_resources); | ||
526 | if (ret) { | ||
527 | dev_err(&pdev->dev, "failed to add resources\n"); | ||
528 | goto err6; | ||
529 | } | ||
530 | |||
531 | ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); | ||
532 | if (ret) { | ||
533 | dev_err(&pdev->dev, "failed to add platform_data\n"); | ||
534 | goto err6; | ||
535 | } | ||
536 | |||
537 | ret = platform_device_add(musb); | ||
538 | if (ret) { | ||
539 | dev_err(&pdev->dev, "failed to register musb device\n"); | ||
540 | goto err6; | ||
541 | } | ||
542 | |||
543 | return 0; | ||
544 | |||
545 | err6: | ||
546 | clk_disable(clk); | ||
547 | |||
548 | err5: | ||
549 | clk_disable(phy_clk); | ||
550 | |||
551 | err4: | ||
552 | clk_put(clk); | ||
553 | |||
554 | err3: | ||
555 | clk_put(phy_clk); | ||
556 | |||
557 | err2: | ||
558 | platform_device_put(musb); | ||
559 | |||
560 | err1: | ||
561 | kfree(glue); | ||
562 | |||
563 | err0: | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | static int __exit am35x_remove(struct platform_device *pdev) | ||
568 | { | ||
569 | struct am35x_glue *glue = platform_get_drvdata(pdev); | ||
570 | |||
571 | platform_device_del(glue->musb); | ||
572 | platform_device_put(glue->musb); | ||
573 | clk_disable(glue->clk); | ||
574 | clk_disable(glue->phy_clk); | ||
575 | clk_put(glue->clk); | ||
576 | clk_put(glue->phy_clk); | ||
577 | kfree(glue); | ||
578 | |||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | #ifdef CONFIG_PM | ||
583 | static int am35x_suspend(struct device *dev) | ||
584 | { | ||
585 | struct am35x_glue *glue = dev_get_drvdata(dev); | ||
586 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
587 | struct omap_musb_board_data *data = plat->board_data; | ||
588 | |||
589 | /* Shutdown the on-chip PHY and its PLL. */ | ||
590 | if (data->set_phy_power) | ||
591 | data->set_phy_power(0); | ||
592 | |||
593 | clk_disable(glue->phy_clk); | ||
594 | clk_disable(glue->clk); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static int am35x_resume(struct device *dev) | ||
600 | { | ||
601 | struct am35x_glue *glue = dev_get_drvdata(dev); | ||
602 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
603 | struct omap_musb_board_data *data = plat->board_data; | ||
604 | int ret; | ||
605 | |||
606 | /* Start the on-chip PHY and its PLL. */ | ||
607 | if (data->set_phy_power) | ||
608 | data->set_phy_power(1); | ||
609 | |||
610 | ret = clk_enable(glue->phy_clk); | ||
611 | if (ret) { | ||
612 | dev_err(dev, "failed to enable PHY clock\n"); | ||
613 | return ret; | ||
614 | } | ||
615 | |||
616 | ret = clk_enable(glue->clk); | ||
617 | if (ret) { | ||
618 | dev_err(dev, "failed to enable clock\n"); | ||
619 | return ret; | ||
620 | } | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | static struct dev_pm_ops am35x_pm_ops = { | ||
626 | .suspend = am35x_suspend, | ||
627 | .resume = am35x_resume, | ||
628 | }; | ||
629 | |||
630 | #define DEV_PM_OPS &am35x_pm_ops | ||
631 | #else | ||
632 | #define DEV_PM_OPS NULL | ||
633 | #endif | ||
634 | |||
635 | static struct platform_driver am35x_driver = { | ||
636 | .remove = __exit_p(am35x_remove), | ||
637 | .driver = { | ||
638 | .name = "musb-am35x", | ||
639 | .pm = DEV_PM_OPS, | ||
640 | }, | ||
641 | }; | ||
642 | |||
643 | MODULE_DESCRIPTION("AM35x MUSB Glue Layer"); | ||
644 | MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>"); | ||
645 | MODULE_LICENSE("GPL v2"); | ||
646 | |||
647 | static int __init am35x_init(void) | ||
648 | { | ||
649 | return platform_driver_probe(&am35x_driver, am35x_probe); | ||
650 | } | ||
651 | subsys_initcall(am35x_init); | ||
652 | |||
653 | static void __exit am35x_exit(void) | ||
654 | { | ||
655 | platform_driver_unregister(&am35x_driver); | ||
656 | } | ||
657 | module_exit(am35x_exit); | ||
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index b611420a8050..ae8c39617743 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c | |||
@@ -15,17 +15,27 @@ | |||
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/gpio.h> | 16 | #include <linux/gpio.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/dma-mapping.h> | ||
18 | 20 | ||
19 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
20 | 22 | ||
21 | #include "musb_core.h" | 23 | #include "musb_core.h" |
24 | #include "musbhsdma.h" | ||
22 | #include "blackfin.h" | 25 | #include "blackfin.h" |
23 | 26 | ||
27 | struct bfin_glue { | ||
28 | struct device *dev; | ||
29 | struct platform_device *musb; | ||
30 | }; | ||
31 | #define glue_to_musb(g) platform_get_drvdata(g->musb) | ||
32 | |||
24 | /* | 33 | /* |
25 | * Load an endpoint's FIFO | 34 | * Load an endpoint's FIFO |
26 | */ | 35 | */ |
27 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) | 36 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) |
28 | { | 37 | { |
38 | struct musb *musb = hw_ep->musb; | ||
29 | void __iomem *fifo = hw_ep->fifo; | 39 | void __iomem *fifo = hw_ep->fifo; |
30 | void __iomem *epio = hw_ep->regs; | 40 | void __iomem *epio = hw_ep->regs; |
31 | u8 epnum = hw_ep->epnum; | 41 | u8 epnum = hw_ep->epnum; |
@@ -34,7 +44,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) | |||
34 | 44 | ||
35 | musb_writew(epio, MUSB_TXCOUNT, len); | 45 | musb_writew(epio, MUSB_TXCOUNT, len); |
36 | 46 | ||
37 | DBG(4, "TX ep%d fifo %p count %d buf %p, epio %p\n", | 47 | dev_dbg(musb->controller, "TX ep%d fifo %p count %d buf %p, epio %p\n", |
38 | hw_ep->epnum, fifo, len, src, epio); | 48 | hw_ep->epnum, fifo, len, src, epio); |
39 | 49 | ||
40 | dump_fifo_data(src, len); | 50 | dump_fifo_data(src, len); |
@@ -89,6 +99,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) | |||
89 | */ | 99 | */ |
90 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) | 100 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) |
91 | { | 101 | { |
102 | struct musb *musb = hw_ep->musb; | ||
92 | void __iomem *fifo = hw_ep->fifo; | 103 | void __iomem *fifo = hw_ep->fifo; |
93 | u8 epnum = hw_ep->epnum; | 104 | u8 epnum = hw_ep->epnum; |
94 | 105 | ||
@@ -145,7 +156,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) | |||
145 | *(dst + len - 1) = (u8)inw((unsigned long)fifo + 4); | 156 | *(dst + len - 1) = (u8)inw((unsigned long)fifo + 4); |
146 | } | 157 | } |
147 | } | 158 | } |
148 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | 159 | dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", |
149 | 'R', hw_ep->epnum, fifo, len, dst); | 160 | 'R', hw_ep->epnum, fifo, len, dst); |
150 | 161 | ||
151 | dump_fifo_data(dst, len); | 162 | dump_fifo_data(dst, len); |
@@ -171,8 +182,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci) | |||
171 | } | 182 | } |
172 | 183 | ||
173 | /* Start sampling ID pin, when plug is removed from MUSB */ | 184 | /* Start sampling ID pin, when plug is removed from MUSB */ |
174 | if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE | 185 | if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE |
175 | || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { | 186 | || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) || |
187 | (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) { | ||
176 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); | 188 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); |
177 | musb->a_wait_bcon = TIMER_DELAY; | 189 | musb->a_wait_bcon = TIMER_DELAY; |
178 | } | 190 | } |
@@ -269,15 +281,17 @@ static void musb_conn_timer_handler(unsigned long _musb) | |||
269 | } | 281 | } |
270 | break; | 282 | break; |
271 | default: | 283 | default: |
272 | DBG(1, "%s state not handled\n", otg_state_string(musb)); | 284 | dev_dbg(musb->controller, "%s state not handled\n", |
285 | otg_state_string(musb->xceiv->state)); | ||
273 | break; | 286 | break; |
274 | } | 287 | } |
275 | spin_unlock_irqrestore(&musb->lock, flags); | 288 | spin_unlock_irqrestore(&musb->lock, flags); |
276 | 289 | ||
277 | DBG(4, "state is %s\n", otg_state_string(musb)); | 290 | dev_dbg(musb->controller, "state is %s\n", |
291 | otg_state_string(musb->xceiv->state)); | ||
278 | } | 292 | } |
279 | 293 | ||
280 | void musb_platform_enable(struct musb *musb) | 294 | static void bfin_musb_enable(struct musb *musb) |
281 | { | 295 | { |
282 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) { | 296 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) { |
283 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); | 297 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); |
@@ -285,66 +299,67 @@ void musb_platform_enable(struct musb *musb) | |||
285 | } | 299 | } |
286 | } | 300 | } |
287 | 301 | ||
288 | void musb_platform_disable(struct musb *musb) | 302 | static void bfin_musb_disable(struct musb *musb) |
289 | { | 303 | { |
290 | } | 304 | } |
291 | 305 | ||
292 | static void bfin_set_vbus(struct musb *musb, int is_on) | 306 | static void bfin_musb_set_vbus(struct musb *musb, int is_on) |
293 | { | 307 | { |
294 | int value = musb->config->gpio_vrsel_active; | 308 | int value = musb->config->gpio_vrsel_active; |
295 | if (!is_on) | 309 | if (!is_on) |
296 | value = !value; | 310 | value = !value; |
297 | gpio_set_value(musb->config->gpio_vrsel, value); | 311 | gpio_set_value(musb->config->gpio_vrsel, value); |
298 | 312 | ||
299 | DBG(1, "VBUS %s, devctl %02x " | 313 | dev_dbg(musb->controller, "VBUS %s, devctl %02x " |
300 | /* otg %3x conf %08x prcm %08x */ "\n", | 314 | /* otg %3x conf %08x prcm %08x */ "\n", |
301 | otg_state_string(musb), | 315 | otg_state_string(musb->xceiv->state), |
302 | musb_readb(musb->mregs, MUSB_DEVCTL)); | 316 | musb_readb(musb->mregs, MUSB_DEVCTL)); |
303 | } | 317 | } |
304 | 318 | ||
305 | static int bfin_set_power(struct otg_transceiver *x, unsigned mA) | 319 | static int bfin_musb_set_power(struct otg_transceiver *x, unsigned mA) |
306 | { | 320 | { |
307 | return 0; | 321 | return 0; |
308 | } | 322 | } |
309 | 323 | ||
310 | void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | 324 | static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout) |
311 | { | 325 | { |
312 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) | 326 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) |
313 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); | 327 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); |
314 | } | 328 | } |
315 | 329 | ||
316 | int musb_platform_get_vbus_status(struct musb *musb) | 330 | static int bfin_musb_vbus_status(struct musb *musb) |
317 | { | 331 | { |
318 | return 0; | 332 | return 0; |
319 | } | 333 | } |
320 | 334 | ||
321 | int musb_platform_set_mode(struct musb *musb, u8 musb_mode) | 335 | static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode) |
322 | { | 336 | { |
323 | return -EIO; | 337 | return -EIO; |
324 | } | 338 | } |
325 | 339 | ||
326 | int __init musb_platform_init(struct musb *musb, void *board_data) | 340 | static int bfin_musb_adjust_channel_params(struct dma_channel *channel, |
341 | u16 packet_sz, u8 *mode, | ||
342 | dma_addr_t *dma_addr, u32 *len) | ||
327 | { | 343 | { |
344 | struct musb_dma_channel *musb_channel = channel->private_data; | ||
328 | 345 | ||
329 | /* | 346 | /* |
330 | * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE | 347 | * Anomaly 05000450 might cause data corruption when using DMA |
331 | * and OTG HOST modes, while rev 1.1 and greater require PE7 to | 348 | * MODE 1 transmits with short packet. So to work around this, |
332 | * be low for DEVICE mode and high for HOST mode. We set it high | 349 | * we truncate all MODE 1 transfers down to a multiple of the |
333 | * here because we are in host mode | 350 | * max packet size, and then do the last short packet transfer |
351 | * (if there is any) using MODE 0. | ||
334 | */ | 352 | */ |
335 | 353 | if (ANOMALY_05000450) { | |
336 | if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { | 354 | if (musb_channel->transmit && *mode == 1) |
337 | printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n", | 355 | *len = *len - (*len % packet_sz); |
338 | musb->config->gpio_vrsel); | ||
339 | return -ENODEV; | ||
340 | } | 356 | } |
341 | gpio_direction_output(musb->config->gpio_vrsel, 0); | ||
342 | 357 | ||
343 | usb_nop_xceiv_register(); | 358 | return 0; |
344 | musb->xceiv = otg_get_transceiver(); | 359 | } |
345 | if (!musb->xceiv) | ||
346 | return -ENODEV; | ||
347 | 360 | ||
361 | static void bfin_musb_reg_init(struct musb *musb) | ||
362 | { | ||
348 | if (ANOMALY_05000346) { | 363 | if (ANOMALY_05000346) { |
349 | bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); | 364 | bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); |
350 | SSYNC(); | 365 | SSYNC(); |
@@ -356,7 +371,8 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
356 | } | 371 | } |
357 | 372 | ||
358 | /* Configure PLL oscillator register */ | 373 | /* Configure PLL oscillator register */ |
359 | bfin_write_USB_PLLOSC_CTRL(0x30a8); | 374 | bfin_write_USB_PLLOSC_CTRL(0x3080 | |
375 | ((480/musb->config->clkin) << 1)); | ||
360 | SSYNC(); | 376 | SSYNC(); |
361 | 377 | ||
362 | bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); | 378 | bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); |
@@ -378,24 +394,205 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
378 | EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | | 394 | EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | |
379 | EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); | 395 | EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); |
380 | SSYNC(); | 396 | SSYNC(); |
397 | } | ||
398 | |||
399 | static int bfin_musb_init(struct musb *musb) | ||
400 | { | ||
401 | |||
402 | /* | ||
403 | * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE | ||
404 | * and OTG HOST modes, while rev 1.1 and greater require PE7 to | ||
405 | * be low for DEVICE mode and high for HOST mode. We set it high | ||
406 | * here because we are in host mode | ||
407 | */ | ||
408 | |||
409 | if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { | ||
410 | printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n", | ||
411 | musb->config->gpio_vrsel); | ||
412 | return -ENODEV; | ||
413 | } | ||
414 | gpio_direction_output(musb->config->gpio_vrsel, 0); | ||
415 | |||
416 | usb_nop_xceiv_register(); | ||
417 | musb->xceiv = otg_get_transceiver(); | ||
418 | if (!musb->xceiv) { | ||
419 | gpio_free(musb->config->gpio_vrsel); | ||
420 | return -ENODEV; | ||
421 | } | ||
422 | |||
423 | bfin_musb_reg_init(musb); | ||
381 | 424 | ||
382 | if (is_host_enabled(musb)) { | 425 | if (is_host_enabled(musb)) { |
383 | musb->board_set_vbus = bfin_set_vbus; | ||
384 | setup_timer(&musb_conn_timer, | 426 | setup_timer(&musb_conn_timer, |
385 | musb_conn_timer_handler, (unsigned long) musb); | 427 | musb_conn_timer_handler, (unsigned long) musb); |
386 | } | 428 | } |
387 | if (is_peripheral_enabled(musb)) | 429 | if (is_peripheral_enabled(musb)) |
388 | musb->xceiv->set_power = bfin_set_power; | 430 | musb->xceiv->set_power = bfin_musb_set_power; |
389 | 431 | ||
390 | musb->isr = blackfin_interrupt; | 432 | musb->isr = blackfin_interrupt; |
433 | musb->double_buffer_not_ok = true; | ||
391 | 434 | ||
392 | return 0; | 435 | return 0; |
393 | } | 436 | } |
394 | 437 | ||
395 | int musb_platform_exit(struct musb *musb) | 438 | static int bfin_musb_exit(struct musb *musb) |
396 | { | 439 | { |
397 | |||
398 | gpio_free(musb->config->gpio_vrsel); | 440 | gpio_free(musb->config->gpio_vrsel); |
399 | 441 | ||
442 | otg_put_transceiver(musb->xceiv); | ||
443 | usb_nop_xceiv_unregister(); | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static const struct musb_platform_ops bfin_ops = { | ||
448 | .init = bfin_musb_init, | ||
449 | .exit = bfin_musb_exit, | ||
450 | |||
451 | .enable = bfin_musb_enable, | ||
452 | .disable = bfin_musb_disable, | ||
453 | |||
454 | .set_mode = bfin_musb_set_mode, | ||
455 | .try_idle = bfin_musb_try_idle, | ||
456 | |||
457 | .vbus_status = bfin_musb_vbus_status, | ||
458 | .set_vbus = bfin_musb_set_vbus, | ||
459 | |||
460 | .adjust_channel_params = bfin_musb_adjust_channel_params, | ||
461 | }; | ||
462 | |||
463 | static u64 bfin_dmamask = DMA_BIT_MASK(32); | ||
464 | |||
465 | static int __init bfin_probe(struct platform_device *pdev) | ||
466 | { | ||
467 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | ||
468 | struct platform_device *musb; | ||
469 | struct bfin_glue *glue; | ||
470 | |||
471 | int ret = -ENOMEM; | ||
472 | |||
473 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); | ||
474 | if (!glue) { | ||
475 | dev_err(&pdev->dev, "failed to allocate glue context\n"); | ||
476 | goto err0; | ||
477 | } | ||
478 | |||
479 | musb = platform_device_alloc("musb-hdrc", -1); | ||
480 | if (!musb) { | ||
481 | dev_err(&pdev->dev, "failed to allocate musb device\n"); | ||
482 | goto err1; | ||
483 | } | ||
484 | |||
485 | musb->dev.parent = &pdev->dev; | ||
486 | musb->dev.dma_mask = &bfin_dmamask; | ||
487 | musb->dev.coherent_dma_mask = bfin_dmamask; | ||
488 | |||
489 | glue->dev = &pdev->dev; | ||
490 | glue->musb = musb; | ||
491 | |||
492 | pdata->platform_ops = &bfin_ops; | ||
493 | |||
494 | platform_set_drvdata(pdev, glue); | ||
495 | |||
496 | ret = platform_device_add_resources(musb, pdev->resource, | ||
497 | pdev->num_resources); | ||
498 | if (ret) { | ||
499 | dev_err(&pdev->dev, "failed to add resources\n"); | ||
500 | goto err2; | ||
501 | } | ||
502 | |||
503 | ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); | ||
504 | if (ret) { | ||
505 | dev_err(&pdev->dev, "failed to add platform_data\n"); | ||
506 | goto err2; | ||
507 | } | ||
508 | |||
509 | ret = platform_device_add(musb); | ||
510 | if (ret) { | ||
511 | dev_err(&pdev->dev, "failed to register musb device\n"); | ||
512 | goto err2; | ||
513 | } | ||
514 | |||
515 | return 0; | ||
516 | |||
517 | err2: | ||
518 | platform_device_put(musb); | ||
519 | |||
520 | err1: | ||
521 | kfree(glue); | ||
522 | |||
523 | err0: | ||
524 | return ret; | ||
525 | } | ||
526 | |||
527 | static int __exit bfin_remove(struct platform_device *pdev) | ||
528 | { | ||
529 | struct bfin_glue *glue = platform_get_drvdata(pdev); | ||
530 | |||
531 | platform_device_del(glue->musb); | ||
532 | platform_device_put(glue->musb); | ||
533 | kfree(glue); | ||
534 | |||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | #ifdef CONFIG_PM | ||
539 | static int bfin_suspend(struct device *dev) | ||
540 | { | ||
541 | struct bfin_glue *glue = dev_get_drvdata(dev); | ||
542 | struct musb *musb = glue_to_musb(glue); | ||
543 | |||
544 | if (is_host_active(musb)) | ||
545 | /* | ||
546 | * During hibernate gpio_vrsel will change from high to low | ||
547 | * low which will generate wakeup event resume the system | ||
548 | * immediately. Set it to 0 before hibernate to avoid this | ||
549 | * wakeup event. | ||
550 | */ | ||
551 | gpio_set_value(musb->config->gpio_vrsel, 0); | ||
552 | |||
400 | return 0; | 553 | return 0; |
401 | } | 554 | } |
555 | |||
556 | static int bfin_resume(struct device *dev) | ||
557 | { | ||
558 | struct bfin_glue *glue = dev_get_drvdata(dev); | ||
559 | struct musb *musb = glue_to_musb(glue); | ||
560 | |||
561 | bfin_musb_reg_init(musb); | ||
562 | |||
563 | return 0; | ||
564 | } | ||
565 | |||
566 | static struct dev_pm_ops bfin_pm_ops = { | ||
567 | .suspend = bfin_suspend, | ||
568 | .resume = bfin_resume, | ||
569 | }; | ||
570 | |||
571 | #define DEV_PM_OPS &bfin_pm_ops | ||
572 | #else | ||
573 | #define DEV_PM_OPS NULL | ||
574 | #endif | ||
575 | |||
576 | static struct platform_driver bfin_driver = { | ||
577 | .remove = __exit_p(bfin_remove), | ||
578 | .driver = { | ||
579 | .name = "musb-blackfin", | ||
580 | .pm = DEV_PM_OPS, | ||
581 | }, | ||
582 | }; | ||
583 | |||
584 | MODULE_DESCRIPTION("Blackfin MUSB Glue Layer"); | ||
585 | MODULE_AUTHOR("Bryan Wy <cooloney@kernel.org>"); | ||
586 | MODULE_LICENSE("GPL v2"); | ||
587 | |||
588 | static int __init bfin_init(void) | ||
589 | { | ||
590 | return platform_driver_probe(&bfin_driver, bfin_probe); | ||
591 | } | ||
592 | subsys_initcall(bfin_init); | ||
593 | |||
594 | static void __exit bfin_exit(void) | ||
595 | { | ||
596 | platform_driver_unregister(&bfin_driver); | ||
597 | } | ||
598 | module_exit(bfin_exit); | ||
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 5ab5bb89bae3..149f3f310a0a 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c | |||
@@ -236,7 +236,7 @@ static int cppi_controller_stop(struct dma_controller *c) | |||
236 | musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, | 236 | musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, |
237 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | 237 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); |
238 | 238 | ||
239 | DBG(1, "Tearing down RX and TX Channels\n"); | 239 | dev_dbg(musb->controller, "Tearing down RX and TX Channels\n"); |
240 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | 240 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { |
241 | /* FIXME restructure of txdma to use bds like rxdma */ | 241 | /* FIXME restructure of txdma to use bds like rxdma */ |
242 | controller->tx[i].last_processed = NULL; | 242 | controller->tx[i].last_processed = NULL; |
@@ -301,13 +301,13 @@ cppi_channel_allocate(struct dma_controller *c, | |||
301 | */ | 301 | */ |
302 | if (transmit) { | 302 | if (transmit) { |
303 | if (index >= ARRAY_SIZE(controller->tx)) { | 303 | if (index >= ARRAY_SIZE(controller->tx)) { |
304 | DBG(1, "no %cX%d CPPI channel\n", 'T', index); | 304 | dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'T', index); |
305 | return NULL; | 305 | return NULL; |
306 | } | 306 | } |
307 | cppi_ch = controller->tx + index; | 307 | cppi_ch = controller->tx + index; |
308 | } else { | 308 | } else { |
309 | if (index >= ARRAY_SIZE(controller->rx)) { | 309 | if (index >= ARRAY_SIZE(controller->rx)) { |
310 | DBG(1, "no %cX%d CPPI channel\n", 'R', index); | 310 | dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'R', index); |
311 | return NULL; | 311 | return NULL; |
312 | } | 312 | } |
313 | cppi_ch = controller->rx + index; | 313 | cppi_ch = controller->rx + index; |
@@ -318,13 +318,13 @@ cppi_channel_allocate(struct dma_controller *c, | |||
318 | * with the other DMA engine too | 318 | * with the other DMA engine too |
319 | */ | 319 | */ |
320 | if (cppi_ch->hw_ep) | 320 | if (cppi_ch->hw_ep) |
321 | DBG(1, "re-allocating DMA%d %cX channel %p\n", | 321 | dev_dbg(musb->controller, "re-allocating DMA%d %cX channel %p\n", |
322 | index, transmit ? 'T' : 'R', cppi_ch); | 322 | index, transmit ? 'T' : 'R', cppi_ch); |
323 | cppi_ch->hw_ep = ep; | 323 | cppi_ch->hw_ep = ep; |
324 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; | 324 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; |
325 | cppi_ch->channel.max_len = 0x7fffffff; | 325 | cppi_ch->channel.max_len = 0x7fffffff; |
326 | 326 | ||
327 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); | 327 | dev_dbg(musb->controller, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); |
328 | return &cppi_ch->channel; | 328 | return &cppi_ch->channel; |
329 | } | 329 | } |
330 | 330 | ||
@@ -339,7 +339,7 @@ static void cppi_channel_release(struct dma_channel *channel) | |||
339 | c = container_of(channel, struct cppi_channel, channel); | 339 | c = container_of(channel, struct cppi_channel, channel); |
340 | tibase = c->controller->tibase; | 340 | tibase = c->controller->tibase; |
341 | if (!c->hw_ep) | 341 | if (!c->hw_ep) |
342 | DBG(1, "releasing idle DMA channel %p\n", c); | 342 | dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c); |
343 | else if (!c->transmit) | 343 | else if (!c->transmit) |
344 | core_rxirq_enable(tibase, c->index + 1); | 344 | core_rxirq_enable(tibase, c->index + 1); |
345 | 345 | ||
@@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) | |||
597 | length = min(n_bds * maxpacket, length); | 597 | length = min(n_bds * maxpacket, length); |
598 | } | 598 | } |
599 | 599 | ||
600 | DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", | 600 | dev_dbg(musb->controller, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n", |
601 | tx->index, | 601 | tx->index, |
602 | maxpacket, | 602 | maxpacket, |
603 | rndis ? "rndis" : "transparent", | 603 | rndis ? "rndis" : "transparent", |
604 | n_bds, | 604 | n_bds, |
605 | addr, length); | 605 | (unsigned long long)addr, length); |
606 | 606 | ||
607 | cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); | 607 | cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); |
608 | 608 | ||
@@ -654,7 +654,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) | |||
654 | bd->hw_options |= CPPI_ZERO_SET; | 654 | bd->hw_options |= CPPI_ZERO_SET; |
655 | } | 655 | } |
656 | 656 | ||
657 | DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", | 657 | dev_dbg(musb->controller, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", |
658 | bd, bd->hw_next, bd->hw_bufp, | 658 | bd, bd->hw_next, bd->hw_bufp, |
659 | bd->hw_off_len, bd->hw_options); | 659 | bd->hw_off_len, bd->hw_options); |
660 | 660 | ||
@@ -819,8 +819,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) | |||
819 | 819 | ||
820 | length = min(n_bds * maxpacket, length); | 820 | length = min(n_bds * maxpacket, length); |
821 | 821 | ||
822 | DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " | 822 | dev_dbg(musb->controller, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " |
823 | "dma 0x%x len %u %u/%u\n", | 823 | "dma 0x%llx len %u %u/%u\n", |
824 | rx->index, maxpacket, | 824 | rx->index, maxpacket, |
825 | onepacket | 825 | onepacket |
826 | ? (is_rndis ? "rndis" : "onepacket") | 826 | ? (is_rndis ? "rndis" : "onepacket") |
@@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) | |||
829 | musb_readl(tibase, | 829 | musb_readl(tibase, |
830 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | 830 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) |
831 | & 0xffff, | 831 | & 0xffff, |
832 | addr, length, rx->channel.actual_len, rx->buf_len); | 832 | (unsigned long long)addr, length, |
833 | rx->channel.actual_len, rx->buf_len); | ||
833 | 834 | ||
834 | /* only queue one segment at a time, since the hardware prevents | 835 | /* only queue one segment at a time, since the hardware prevents |
835 | * correct queue shutdown after unexpected short packets | 836 | * correct queue shutdown after unexpected short packets |
@@ -935,7 +936,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) | |||
935 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | 936 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) |
936 | & 0xffff; | 937 | & 0xffff; |
937 | if (i < (2 + n_bds)) { | 938 | if (i < (2 + n_bds)) { |
938 | DBG(2, "bufcnt%d underrun - %d (for %d)\n", | 939 | dev_dbg(musb->controller, "bufcnt%d underrun - %d (for %d)\n", |
939 | rx->index, i, n_bds); | 940 | rx->index, i, n_bds); |
940 | musb_writel(tibase, | 941 | musb_writel(tibase, |
941 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | 942 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), |
@@ -984,7 +985,7 @@ static int cppi_channel_program(struct dma_channel *ch, | |||
984 | /* WARN_ON(1); */ | 985 | /* WARN_ON(1); */ |
985 | break; | 986 | break; |
986 | case MUSB_DMA_STATUS_UNKNOWN: | 987 | case MUSB_DMA_STATUS_UNKNOWN: |
987 | DBG(1, "%cX DMA%d not allocated!\n", | 988 | dev_dbg(musb->controller, "%cX DMA%d not allocated!\n", |
988 | cppi_ch->transmit ? 'T' : 'R', | 989 | cppi_ch->transmit ? 'T' : 'R', |
989 | cppi_ch->index); | 990 | cppi_ch->index); |
990 | /* FALLTHROUGH */ | 991 | /* FALLTHROUGH */ |
@@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | |||
1039 | if (!completed && (bd->hw_options & CPPI_OWN_SET)) | 1040 | if (!completed && (bd->hw_options & CPPI_OWN_SET)) |
1040 | break; | 1041 | break; |
1041 | 1042 | ||
1042 | DBG(5, "C/RXBD %08x: nxt %08x buf %08x " | 1043 | dev_dbg(musb->controller, "C/RXBD %llx: nxt %08x buf %08x " |
1043 | "off.len %08x opt.len %08x (%d)\n", | 1044 | "off.len %08x opt.len %08x (%d)\n", |
1044 | bd->dma, bd->hw_next, bd->hw_bufp, | 1045 | (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp, |
1045 | bd->hw_off_len, bd->hw_options, | 1046 | bd->hw_off_len, bd->hw_options, |
1046 | rx->channel.actual_len); | 1047 | rx->channel.actual_len); |
1047 | 1048 | ||
@@ -1061,7 +1062,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | |||
1061 | * CPPI ignores those BDs even though OWN is still set. | 1062 | * CPPI ignores those BDs even though OWN is still set. |
1062 | */ | 1063 | */ |
1063 | completed = true; | 1064 | completed = true; |
1064 | DBG(3, "rx short %d/%d (%d)\n", | 1065 | dev_dbg(musb->controller, "rx short %d/%d (%d)\n", |
1065 | len, bd->buflen, | 1066 | len, bd->buflen, |
1066 | rx->channel.actual_len); | 1067 | rx->channel.actual_len); |
1067 | } | 1068 | } |
@@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | |||
1111 | musb_ep_select(cppi->mregs, rx->index + 1); | 1112 | musb_ep_select(cppi->mregs, rx->index + 1); |
1112 | csr = musb_readw(regs, MUSB_RXCSR); | 1113 | csr = musb_readw(regs, MUSB_RXCSR); |
1113 | if (csr & MUSB_RXCSR_DMAENAB) { | 1114 | if (csr & MUSB_RXCSR_DMAENAB) { |
1114 | DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", | 1115 | dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n", |
1115 | rx->index, | 1116 | rx->index, |
1116 | rx->head, rx->tail, | 1117 | rx->head, rx->tail, |
1117 | rx->last_processed | 1118 | rx->last_processed |
1118 | ? rx->last_processed->dma | 1119 | ? (unsigned long long) |
1120 | rx->last_processed->dma | ||
1119 | : 0, | 1121 | : 0, |
1120 | completed ? ", completed" : "", | 1122 | completed ? ", completed" : "", |
1121 | csr); | 1123 | csr); |
@@ -1156,7 +1158,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) | |||
1156 | struct musb_hw_ep *hw_ep = NULL; | 1158 | struct musb_hw_ep *hw_ep = NULL; |
1157 | u32 rx, tx; | 1159 | u32 rx, tx; |
1158 | int i, index; | 1160 | int i, index; |
1159 | unsigned long flags; | 1161 | unsigned long uninitialized_var(flags); |
1160 | 1162 | ||
1161 | cppi = container_of(musb->dma_controller, struct cppi, controller); | 1163 | cppi = container_of(musb->dma_controller, struct cppi, controller); |
1162 | if (cppi->irq) | 1164 | if (cppi->irq) |
@@ -1167,10 +1169,13 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) | |||
1167 | tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); | 1169 | tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); |
1168 | rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); | 1170 | rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); |
1169 | 1171 | ||
1170 | if (!tx && !rx) | 1172 | if (!tx && !rx) { |
1173 | if (cppi->irq) | ||
1174 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1171 | return IRQ_NONE; | 1175 | return IRQ_NONE; |
1176 | } | ||
1172 | 1177 | ||
1173 | DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); | 1178 | dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx); |
1174 | 1179 | ||
1175 | /* process TX channels */ | 1180 | /* process TX channels */ |
1176 | for (index = 0; tx; tx = tx >> 1, index++) { | 1181 | for (index = 0; tx; tx = tx >> 1, index++) { |
@@ -1198,8 +1203,8 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) | |||
1198 | * that needs to be acknowledged. | 1203 | * that needs to be acknowledged. |
1199 | */ | 1204 | */ |
1200 | if (NULL == bd) { | 1205 | if (NULL == bd) { |
1201 | DBG(1, "null BD\n"); | 1206 | dev_dbg(musb->controller, "null BD\n"); |
1202 | tx_ram->tx_complete = 0; | 1207 | musb_writel(&tx_ram->tx_complete, 0, 0); |
1203 | continue; | 1208 | continue; |
1204 | } | 1209 | } |
1205 | 1210 | ||
@@ -1213,7 +1218,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) | |||
1213 | if (bd->hw_options & CPPI_OWN_SET) | 1218 | if (bd->hw_options & CPPI_OWN_SET) |
1214 | break; | 1219 | break; |
1215 | 1220 | ||
1216 | DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", | 1221 | dev_dbg(musb->controller, "C/TXBD %p n %x b %x off %x opt %x\n", |
1217 | bd, bd->hw_next, bd->hw_bufp, | 1222 | bd, bd->hw_next, bd->hw_bufp, |
1218 | bd->hw_off_len, bd->hw_options); | 1223 | bd->hw_off_len, bd->hw_options); |
1219 | 1224 | ||
@@ -1308,7 +1313,7 @@ dma_controller_create(struct musb *musb, void __iomem *mregs) | |||
1308 | struct cppi *controller; | 1313 | struct cppi *controller; |
1309 | struct device *dev = musb->controller; | 1314 | struct device *dev = musb->controller; |
1310 | struct platform_device *pdev = to_platform_device(dev); | 1315 | struct platform_device *pdev = to_platform_device(dev); |
1311 | int irq = platform_get_irq(pdev, 1); | 1316 | int irq = platform_get_irq_byname(pdev, "dma"); |
1312 | 1317 | ||
1313 | controller = kzalloc(sizeof *controller, GFP_KERNEL); | 1318 | controller = kzalloc(sizeof *controller, GFP_KERNEL); |
1314 | if (!controller) | 1319 | if (!controller) |
@@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel) | |||
1452 | * compare mode by writing 1 to the tx_complete register. | 1457 | * compare mode by writing 1 to the tx_complete register. |
1453 | */ | 1458 | */ |
1454 | cppi_reset_tx(tx_ram, 1); | 1459 | cppi_reset_tx(tx_ram, 1); |
1455 | cppi_ch->head = 0; | 1460 | cppi_ch->head = NULL; |
1456 | musb_writel(&tx_ram->tx_complete, 0, 1); | 1461 | musb_writel(&tx_ram->tx_complete, 0, 1); |
1457 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); | 1462 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); |
1458 | 1463 | ||
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c new file mode 100644 index 000000000000..662ed34980bd --- /dev/null +++ b/drivers/usb/musb/da8xx.c | |||
@@ -0,0 +1,608 @@ | |||
1 | /* | ||
2 | * Texas Instruments DA8xx/OMAP-L1x "glue layer" | ||
3 | * | ||
4 | * Copyright (c) 2008-2009 MontaVista Software, Inc. <source@mvista.com> | ||
5 | * | ||
6 | * Based on the DaVinci "glue layer" code. | ||
7 | * Copyright (C) 2005-2006 by Texas Instruments | ||
8 | * | ||
9 | * This file is part of the Inventra Controller Driver for Linux. | ||
10 | * | ||
11 | * The Inventra Controller Driver for Linux is free software; you | ||
12 | * can redistribute it and/or modify it under the terms of the GNU | ||
13 | * General Public License version 2 as published by the Free Software | ||
14 | * Foundation. | ||
15 | * | ||
16 | * The Inventra Controller Driver for Linux is distributed in | ||
17 | * the hope that it will be useful, but WITHOUT ANY WARRANTY; | ||
18 | * without even the implied warranty of MERCHANTABILITY or | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | ||
20 | * License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with The Inventra Controller Driver for Linux ; if not, | ||
24 | * write to the Free Software Foundation, Inc., 59 Temple Place, | ||
25 | * Suite 330, Boston, MA 02111-1307 USA | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include <linux/init.h> | ||
30 | #include <linux/clk.h> | ||
31 | #include <linux/io.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/dma-mapping.h> | ||
34 | |||
35 | #include <mach/da8xx.h> | ||
36 | #include <mach/usb.h> | ||
37 | |||
38 | #include "musb_core.h" | ||
39 | |||
40 | /* | ||
41 | * DA8XX specific definitions | ||
42 | */ | ||
43 | |||
44 | /* USB 2.0 OTG module registers */ | ||
45 | #define DA8XX_USB_REVISION_REG 0x00 | ||
46 | #define DA8XX_USB_CTRL_REG 0x04 | ||
47 | #define DA8XX_USB_STAT_REG 0x08 | ||
48 | #define DA8XX_USB_EMULATION_REG 0x0c | ||
49 | #define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */ | ||
50 | #define DA8XX_USB_AUTOREQ_REG 0x14 | ||
51 | #define DA8XX_USB_SRP_FIX_TIME_REG 0x18 | ||
52 | #define DA8XX_USB_TEARDOWN_REG 0x1c | ||
53 | #define DA8XX_USB_INTR_SRC_REG 0x20 | ||
54 | #define DA8XX_USB_INTR_SRC_SET_REG 0x24 | ||
55 | #define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28 | ||
56 | #define DA8XX_USB_INTR_MASK_REG 0x2c | ||
57 | #define DA8XX_USB_INTR_MASK_SET_REG 0x30 | ||
58 | #define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34 | ||
59 | #define DA8XX_USB_INTR_SRC_MASKED_REG 0x38 | ||
60 | #define DA8XX_USB_END_OF_INTR_REG 0x3c | ||
61 | #define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2)) | ||
62 | |||
63 | /* Control register bits */ | ||
64 | #define DA8XX_SOFT_RESET_MASK 1 | ||
65 | |||
66 | #define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */ | ||
67 | #define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */ | ||
68 | |||
69 | /* USB interrupt register bits */ | ||
70 | #define DA8XX_INTR_USB_SHIFT 16 | ||
71 | #define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */ | ||
72 | /* interrupts and DRVVBUS interrupt */ | ||
73 | #define DA8XX_INTR_DRVVBUS 0x100 | ||
74 | #define DA8XX_INTR_RX_SHIFT 8 | ||
75 | #define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT) | ||
76 | #define DA8XX_INTR_TX_SHIFT 0 | ||
77 | #define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT) | ||
78 | |||
79 | #define DA8XX_MENTOR_CORE_OFFSET 0x400 | ||
80 | |||
81 | #define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG) | ||
82 | |||
83 | struct da8xx_glue { | ||
84 | struct device *dev; | ||
85 | struct platform_device *musb; | ||
86 | struct clk *clk; | ||
87 | }; | ||
88 | |||
89 | /* | ||
90 | * REVISIT (PM): we should be able to keep the PHY in low power mode most | ||
91 | * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0 | ||
92 | * and, when in host mode, autosuspending idle root ports... PHY_PLLON | ||
93 | * (overriding SUSPENDM?) then likely needs to stay off. | ||
94 | */ | ||
95 | |||
96 | static inline void phy_on(void) | ||
97 | { | ||
98 | u32 cfgchip2 = __raw_readl(CFGCHIP2); | ||
99 | |||
100 | /* | ||
101 | * Start the on-chip PHY and its PLL. | ||
102 | */ | ||
103 | cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN); | ||
104 | cfgchip2 |= CFGCHIP2_PHY_PLLON; | ||
105 | __raw_writel(cfgchip2, CFGCHIP2); | ||
106 | |||
107 | pr_info("Waiting for USB PHY clock good...\n"); | ||
108 | while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD)) | ||
109 | cpu_relax(); | ||
110 | } | ||
111 | |||
112 | static inline void phy_off(void) | ||
113 | { | ||
114 | u32 cfgchip2 = __raw_readl(CFGCHIP2); | ||
115 | |||
116 | /* | ||
117 | * Ensure that USB 1.1 reference clock is not being sourced from | ||
118 | * USB 2.0 PHY. Otherwise do not power down the PHY. | ||
119 | */ | ||
120 | if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX) && | ||
121 | (cfgchip2 & CFGCHIP2_USB1SUSPENDM)) { | ||
122 | pr_warning("USB 1.1 clocked from USB 2.0 PHY -- " | ||
123 | "can't power it down\n"); | ||
124 | return; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Power down the on-chip PHY. | ||
129 | */ | ||
130 | cfgchip2 |= CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN; | ||
131 | __raw_writel(cfgchip2, CFGCHIP2); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Because we don't set CTRL.UINT, it's "important" to: | ||
136 | * - not read/write INTRUSB/INTRUSBE (except during | ||
137 | * initial setup, as a workaround); | ||
138 | * - use INTSET/INTCLR instead. | ||
139 | */ | ||
140 | |||
141 | /** | ||
142 | * da8xx_musb_enable - enable interrupts | ||
143 | */ | ||
144 | static void da8xx_musb_enable(struct musb *musb) | ||
145 | { | ||
146 | void __iomem *reg_base = musb->ctrl_base; | ||
147 | u32 mask; | ||
148 | |||
149 | /* Workaround: setup IRQs through both register sets. */ | ||
150 | mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) | | ||
151 | ((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) | | ||
152 | DA8XX_INTR_USB_MASK; | ||
153 | musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask); | ||
154 | |||
155 | /* Force the DRVVBUS IRQ so we can start polling for ID change. */ | ||
156 | if (is_otg_enabled(musb)) | ||
157 | musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG, | ||
158 | DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT); | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * da8xx_musb_disable - disable HDRC and flush interrupts | ||
163 | */ | ||
164 | static void da8xx_musb_disable(struct musb *musb) | ||
165 | { | ||
166 | void __iomem *reg_base = musb->ctrl_base; | ||
167 | |||
168 | musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG, | ||
169 | DA8XX_INTR_USB_MASK | | ||
170 | DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK); | ||
171 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
172 | musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); | ||
173 | } | ||
174 | |||
175 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
176 | #define portstate(stmt) stmt | ||
177 | #else | ||
178 | #define portstate(stmt) | ||
179 | #endif | ||
180 | |||
181 | static void da8xx_musb_set_vbus(struct musb *musb, int is_on) | ||
182 | { | ||
183 | WARN_ON(is_on && is_peripheral_active(musb)); | ||
184 | } | ||
185 | |||
186 | #define POLL_SECONDS 2 | ||
187 | |||
188 | static struct timer_list otg_workaround; | ||
189 | |||
190 | static void otg_timer(unsigned long _musb) | ||
191 | { | ||
192 | struct musb *musb = (void *)_musb; | ||
193 | void __iomem *mregs = musb->mregs; | ||
194 | u8 devctl; | ||
195 | unsigned long flags; | ||
196 | |||
197 | /* | ||
198 | * We poll because DaVinci's won't expose several OTG-critical | ||
199 | * status change events (from the transceiver) otherwise. | ||
200 | */ | ||
201 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
202 | dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, | ||
203 | otg_state_string(musb->xceiv->state)); | ||
204 | |||
205 | spin_lock_irqsave(&musb->lock, flags); | ||
206 | switch (musb->xceiv->state) { | ||
207 | case OTG_STATE_A_WAIT_BCON: | ||
208 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
209 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
210 | |||
211 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
212 | if (devctl & MUSB_DEVCTL_BDEVICE) { | ||
213 | musb->xceiv->state = OTG_STATE_B_IDLE; | ||
214 | MUSB_DEV_MODE(musb); | ||
215 | } else { | ||
216 | musb->xceiv->state = OTG_STATE_A_IDLE; | ||
217 | MUSB_HST_MODE(musb); | ||
218 | } | ||
219 | break; | ||
220 | case OTG_STATE_A_WAIT_VFALL: | ||
221 | /* | ||
222 | * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3 | ||
223 | * RTL seems to mis-handle session "start" otherwise (or in | ||
224 | * our case "recover"), in routine "VBUS was valid by the time | ||
225 | * VBUSERR got reported during enumeration" cases. | ||
226 | */ | ||
227 | if (devctl & MUSB_DEVCTL_VBUS) { | ||
228 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
229 | break; | ||
230 | } | ||
231 | musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; | ||
232 | musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG, | ||
233 | MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT); | ||
234 | break; | ||
235 | case OTG_STATE_B_IDLE: | ||
236 | if (!is_peripheral_enabled(musb)) | ||
237 | break; | ||
238 | |||
239 | /* | ||
240 | * There's no ID-changed IRQ, so we have no good way to tell | ||
241 | * when to switch to the A-Default state machine (by setting | ||
242 | * the DEVCTL.Session bit). | ||
243 | * | ||
244 | * Workaround: whenever we're in B_IDLE, try setting the | ||
245 | * session flag every few seconds. If it works, ID was | ||
246 | * grounded and we're now in the A-Default state machine. | ||
247 | * | ||
248 | * NOTE: setting the session flag is _supposed_ to trigger | ||
249 | * SRP but clearly it doesn't. | ||
250 | */ | ||
251 | musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION); | ||
252 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
253 | if (devctl & MUSB_DEVCTL_BDEVICE) | ||
254 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
255 | else | ||
256 | musb->xceiv->state = OTG_STATE_A_IDLE; | ||
257 | break; | ||
258 | default: | ||
259 | break; | ||
260 | } | ||
261 | spin_unlock_irqrestore(&musb->lock, flags); | ||
262 | } | ||
263 | |||
264 | static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout) | ||
265 | { | ||
266 | static unsigned long last_timer; | ||
267 | |||
268 | if (!is_otg_enabled(musb)) | ||
269 | return; | ||
270 | |||
271 | if (timeout == 0) | ||
272 | timeout = jiffies + msecs_to_jiffies(3); | ||
273 | |||
274 | /* Never idle if active, or when VBUS timeout is not set as host */ | ||
275 | if (musb->is_active || (musb->a_wait_bcon == 0 && | ||
276 | musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { | ||
277 | dev_dbg(musb->controller, "%s active, deleting timer\n", | ||
278 | otg_state_string(musb->xceiv->state)); | ||
279 | del_timer(&otg_workaround); | ||
280 | last_timer = jiffies; | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) { | ||
285 | dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); | ||
286 | return; | ||
287 | } | ||
288 | last_timer = timeout; | ||
289 | |||
290 | dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", | ||
291 | otg_state_string(musb->xceiv->state), | ||
292 | jiffies_to_msecs(timeout - jiffies)); | ||
293 | mod_timer(&otg_workaround, timeout); | ||
294 | } | ||
295 | |||
296 | static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) | ||
297 | { | ||
298 | struct musb *musb = hci; | ||
299 | void __iomem *reg_base = musb->ctrl_base; | ||
300 | unsigned long flags; | ||
301 | irqreturn_t ret = IRQ_NONE; | ||
302 | u32 status; | ||
303 | |||
304 | spin_lock_irqsave(&musb->lock, flags); | ||
305 | |||
306 | /* | ||
307 | * NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through | ||
308 | * the Mentor registers (except for setup), use the TI ones and EOI. | ||
309 | */ | ||
310 | |||
311 | /* Acknowledge and handle non-CPPI interrupts */ | ||
312 | status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG); | ||
313 | if (!status) | ||
314 | goto eoi; | ||
315 | |||
316 | musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status); | ||
317 | dev_dbg(musb->controller, "USB IRQ %08x\n", status); | ||
318 | |||
319 | musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT; | ||
320 | musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT; | ||
321 | musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT; | ||
322 | |||
323 | /* | ||
324 | * DRVVBUS IRQs are the only proxy we have (a very poor one!) for | ||
325 | * DA8xx's missing ID change IRQ. We need an ID change IRQ to | ||
326 | * switch appropriately between halves of the OTG state machine. | ||
327 | * Managing DEVCTL.Session per Mentor docs requires that we know its | ||
328 | * value but DEVCTL.BDevice is invalid without DEVCTL.Session set. | ||
329 | * Also, DRVVBUS pulses for SRP (but not at 5 V)... | ||
330 | */ | ||
331 | if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) { | ||
332 | int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG); | ||
333 | void __iomem *mregs = musb->mregs; | ||
334 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
335 | int err; | ||
336 | |||
337 | err = is_host_enabled(musb) && (musb->int_usb & | ||
338 | MUSB_INTR_VBUSERROR); | ||
339 | if (err) { | ||
340 | /* | ||
341 | * The Mentor core doesn't debounce VBUS as needed | ||
342 | * to cope with device connect current spikes. This | ||
343 | * means it's not uncommon for bus-powered devices | ||
344 | * to get VBUS errors during enumeration. | ||
345 | * | ||
346 | * This is a workaround, but newer RTL from Mentor | ||
347 | * seems to allow a better one: "re"-starting sessions | ||
348 | * without waiting for VBUS to stop registering in | ||
349 | * devctl. | ||
350 | */ | ||
351 | musb->int_usb &= ~MUSB_INTR_VBUSERROR; | ||
352 | musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; | ||
353 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
354 | WARNING("VBUS error workaround (delay coming)\n"); | ||
355 | } else if (is_host_enabled(musb) && drvvbus) { | ||
356 | MUSB_HST_MODE(musb); | ||
357 | musb->xceiv->default_a = 1; | ||
358 | musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; | ||
359 | portstate(musb->port1_status |= USB_PORT_STAT_POWER); | ||
360 | del_timer(&otg_workaround); | ||
361 | } else { | ||
362 | musb->is_active = 0; | ||
363 | MUSB_DEV_MODE(musb); | ||
364 | musb->xceiv->default_a = 0; | ||
365 | musb->xceiv->state = OTG_STATE_B_IDLE; | ||
366 | portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); | ||
367 | } | ||
368 | |||
369 | dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", | ||
370 | drvvbus ? "on" : "off", | ||
371 | otg_state_string(musb->xceiv->state), | ||
372 | err ? " ERROR" : "", | ||
373 | devctl); | ||
374 | ret = IRQ_HANDLED; | ||
375 | } | ||
376 | |||
377 | if (musb->int_tx || musb->int_rx || musb->int_usb) | ||
378 | ret |= musb_interrupt(musb); | ||
379 | |||
380 | eoi: | ||
381 | /* EOI needs to be written for the IRQ to be re-asserted. */ | ||
382 | if (ret == IRQ_HANDLED || status) | ||
383 | musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); | ||
384 | |||
385 | /* Poll for ID change */ | ||
386 | if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) | ||
387 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
388 | |||
389 | spin_unlock_irqrestore(&musb->lock, flags); | ||
390 | |||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode) | ||
395 | { | ||
396 | u32 cfgchip2 = __raw_readl(CFGCHIP2); | ||
397 | |||
398 | cfgchip2 &= ~CFGCHIP2_OTGMODE; | ||
399 | switch (musb_mode) { | ||
400 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
401 | case MUSB_HOST: /* Force VBUS valid, ID = 0 */ | ||
402 | cfgchip2 |= CFGCHIP2_FORCE_HOST; | ||
403 | break; | ||
404 | #endif | ||
405 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
406 | case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ | ||
407 | cfgchip2 |= CFGCHIP2_FORCE_DEVICE; | ||
408 | break; | ||
409 | #endif | ||
410 | #ifdef CONFIG_USB_MUSB_OTG | ||
411 | case MUSB_OTG: /* Don't override the VBUS/ID comparators */ | ||
412 | cfgchip2 |= CFGCHIP2_NO_OVERRIDE; | ||
413 | break; | ||
414 | #endif | ||
415 | default: | ||
416 | dev_dbg(musb->controller, "Trying to set unsupported mode %u\n", musb_mode); | ||
417 | } | ||
418 | |||
419 | __raw_writel(cfgchip2, CFGCHIP2); | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static int da8xx_musb_init(struct musb *musb) | ||
424 | { | ||
425 | void __iomem *reg_base = musb->ctrl_base; | ||
426 | u32 rev; | ||
427 | |||
428 | musb->mregs += DA8XX_MENTOR_CORE_OFFSET; | ||
429 | |||
430 | /* Returns zero if e.g. not clocked */ | ||
431 | rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG); | ||
432 | if (!rev) | ||
433 | goto fail; | ||
434 | |||
435 | usb_nop_xceiv_register(); | ||
436 | musb->xceiv = otg_get_transceiver(); | ||
437 | if (!musb->xceiv) | ||
438 | goto fail; | ||
439 | |||
440 | if (is_host_enabled(musb)) | ||
441 | setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); | ||
442 | |||
443 | /* Reset the controller */ | ||
444 | musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK); | ||
445 | |||
446 | /* Start the on-chip PHY and its PLL. */ | ||
447 | phy_on(); | ||
448 | |||
449 | msleep(5); | ||
450 | |||
451 | /* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */ | ||
452 | pr_debug("DA8xx OTG revision %08x, PHY %03x, control %02x\n", | ||
453 | rev, __raw_readl(CFGCHIP2), | ||
454 | musb_readb(reg_base, DA8XX_USB_CTRL_REG)); | ||
455 | |||
456 | musb->isr = da8xx_musb_interrupt; | ||
457 | return 0; | ||
458 | fail: | ||
459 | return -ENODEV; | ||
460 | } | ||
461 | |||
462 | static int da8xx_musb_exit(struct musb *musb) | ||
463 | { | ||
464 | if (is_host_enabled(musb)) | ||
465 | del_timer_sync(&otg_workaround); | ||
466 | |||
467 | phy_off(); | ||
468 | |||
469 | otg_put_transceiver(musb->xceiv); | ||
470 | usb_nop_xceiv_unregister(); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | static const struct musb_platform_ops da8xx_ops = { | ||
476 | .init = da8xx_musb_init, | ||
477 | .exit = da8xx_musb_exit, | ||
478 | |||
479 | .enable = da8xx_musb_enable, | ||
480 | .disable = da8xx_musb_disable, | ||
481 | |||
482 | .set_mode = da8xx_musb_set_mode, | ||
483 | .try_idle = da8xx_musb_try_idle, | ||
484 | |||
485 | .set_vbus = da8xx_musb_set_vbus, | ||
486 | }; | ||
487 | |||
488 | static u64 da8xx_dmamask = DMA_BIT_MASK(32); | ||
489 | |||
490 | static int __init da8xx_probe(struct platform_device *pdev) | ||
491 | { | ||
492 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | ||
493 | struct platform_device *musb; | ||
494 | struct da8xx_glue *glue; | ||
495 | |||
496 | struct clk *clk; | ||
497 | |||
498 | int ret = -ENOMEM; | ||
499 | |||
500 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); | ||
501 | if (!glue) { | ||
502 | dev_err(&pdev->dev, "failed to allocate glue context\n"); | ||
503 | goto err0; | ||
504 | } | ||
505 | |||
506 | musb = platform_device_alloc("musb-hdrc", -1); | ||
507 | if (!musb) { | ||
508 | dev_err(&pdev->dev, "failed to allocate musb device\n"); | ||
509 | goto err1; | ||
510 | } | ||
511 | |||
512 | clk = clk_get(&pdev->dev, "usb20"); | ||
513 | if (IS_ERR(clk)) { | ||
514 | dev_err(&pdev->dev, "failed to get clock\n"); | ||
515 | ret = PTR_ERR(clk); | ||
516 | goto err2; | ||
517 | } | ||
518 | |||
519 | ret = clk_enable(clk); | ||
520 | if (ret) { | ||
521 | dev_err(&pdev->dev, "failed to enable clock\n"); | ||
522 | goto err3; | ||
523 | } | ||
524 | |||
525 | musb->dev.parent = &pdev->dev; | ||
526 | musb->dev.dma_mask = &da8xx_dmamask; | ||
527 | musb->dev.coherent_dma_mask = da8xx_dmamask; | ||
528 | |||
529 | glue->dev = &pdev->dev; | ||
530 | glue->musb = musb; | ||
531 | glue->clk = clk; | ||
532 | |||
533 | pdata->platform_ops = &da8xx_ops; | ||
534 | |||
535 | platform_set_drvdata(pdev, glue); | ||
536 | |||
537 | ret = platform_device_add_resources(musb, pdev->resource, | ||
538 | pdev->num_resources); | ||
539 | if (ret) { | ||
540 | dev_err(&pdev->dev, "failed to add resources\n"); | ||
541 | goto err4; | ||
542 | } | ||
543 | |||
544 | ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); | ||
545 | if (ret) { | ||
546 | dev_err(&pdev->dev, "failed to add platform_data\n"); | ||
547 | goto err4; | ||
548 | } | ||
549 | |||
550 | ret = platform_device_add(musb); | ||
551 | if (ret) { | ||
552 | dev_err(&pdev->dev, "failed to register musb device\n"); | ||
553 | goto err4; | ||
554 | } | ||
555 | |||
556 | return 0; | ||
557 | |||
558 | err4: | ||
559 | clk_disable(clk); | ||
560 | |||
561 | err3: | ||
562 | clk_put(clk); | ||
563 | |||
564 | err2: | ||
565 | platform_device_put(musb); | ||
566 | |||
567 | err1: | ||
568 | kfree(glue); | ||
569 | |||
570 | err0: | ||
571 | return ret; | ||
572 | } | ||
573 | |||
574 | static int __exit da8xx_remove(struct platform_device *pdev) | ||
575 | { | ||
576 | struct da8xx_glue *glue = platform_get_drvdata(pdev); | ||
577 | |||
578 | platform_device_del(glue->musb); | ||
579 | platform_device_put(glue->musb); | ||
580 | clk_disable(glue->clk); | ||
581 | clk_put(glue->clk); | ||
582 | kfree(glue); | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static struct platform_driver da8xx_driver = { | ||
588 | .remove = __exit_p(da8xx_remove), | ||
589 | .driver = { | ||
590 | .name = "musb-da8xx", | ||
591 | }, | ||
592 | }; | ||
593 | |||
594 | MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer"); | ||
595 | MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>"); | ||
596 | MODULE_LICENSE("GPL v2"); | ||
597 | |||
598 | static int __init da8xx_init(void) | ||
599 | { | ||
600 | return platform_driver_probe(&da8xx_driver, da8xx_probe); | ||
601 | } | ||
602 | subsys_initcall(da8xx_init); | ||
603 | |||
604 | static void __exit da8xx_exit(void) | ||
605 | { | ||
606 | platform_driver_unregister(&da8xx_driver); | ||
607 | } | ||
608 | module_exit(da8xx_exit); | ||
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index 57624361c1de..2a2adf6492cd 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/clk.h> | 30 | #include <linux/clk.h> |
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/gpio.h> | 32 | #include <linux/gpio.h> |
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/dma-mapping.h> | ||
33 | 35 | ||
34 | #include <mach/hardware.h> | 36 | #include <mach/hardware.h> |
35 | #include <mach/memory.h> | 37 | #include <mach/memory.h> |
@@ -51,6 +53,12 @@ | |||
51 | #define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR) | 53 | #define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR) |
52 | #define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR) | 54 | #define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR) |
53 | 55 | ||
56 | struct davinci_glue { | ||
57 | struct device *dev; | ||
58 | struct platform_device *musb; | ||
59 | struct clk *clk; | ||
60 | }; | ||
61 | |||
54 | /* REVISIT (PM) we should be able to keep the PHY in low power mode most | 62 | /* REVISIT (PM) we should be able to keep the PHY in low power mode most |
55 | * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 | 63 | * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 |
56 | * and, when in host mode, autosuspending idle root ports... PHYPLLON | 64 | * and, when in host mode, autosuspending idle root ports... PHYPLLON |
@@ -83,7 +91,7 @@ static inline void phy_off(void) | |||
83 | 91 | ||
84 | static int dma_off = 1; | 92 | static int dma_off = 1; |
85 | 93 | ||
86 | void musb_platform_enable(struct musb *musb) | 94 | static void davinci_musb_enable(struct musb *musb) |
87 | { | 95 | { |
88 | u32 tmp, old, val; | 96 | u32 tmp, old, val; |
89 | 97 | ||
@@ -116,7 +124,7 @@ void musb_platform_enable(struct musb *musb) | |||
116 | /* | 124 | /* |
117 | * Disable the HDRC and flush interrupts | 125 | * Disable the HDRC and flush interrupts |
118 | */ | 126 | */ |
119 | void musb_platform_disable(struct musb *musb) | 127 | static void davinci_musb_disable(struct musb *musb) |
120 | { | 128 | { |
121 | /* because we don't set CTRLR.UINT, "important" to: | 129 | /* because we don't set CTRLR.UINT, "important" to: |
122 | * - not read/write INTRUSB/INTRUSBE | 130 | * - not read/write INTRUSB/INTRUSBE |
@@ -167,7 +175,7 @@ static void evm_deferred_drvvbus(struct work_struct *ignored) | |||
167 | 175 | ||
168 | #endif /* EVM */ | 176 | #endif /* EVM */ |
169 | 177 | ||
170 | static void davinci_source_power(struct musb *musb, int is_on, int immediate) | 178 | static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate) |
171 | { | 179 | { |
172 | #ifdef CONFIG_MACH_DAVINCI_EVM | 180 | #ifdef CONFIG_MACH_DAVINCI_EVM |
173 | if (is_on) | 181 | if (is_on) |
@@ -190,10 +198,10 @@ static void davinci_source_power(struct musb *musb, int is_on, int immediate) | |||
190 | #endif | 198 | #endif |
191 | } | 199 | } |
192 | 200 | ||
193 | static void davinci_set_vbus(struct musb *musb, int is_on) | 201 | static void davinci_musb_set_vbus(struct musb *musb, int is_on) |
194 | { | 202 | { |
195 | WARN_ON(is_on && is_peripheral_active(musb)); | 203 | WARN_ON(is_on && is_peripheral_active(musb)); |
196 | davinci_source_power(musb, is_on, 0); | 204 | davinci_musb_source_power(musb, is_on, 0); |
197 | } | 205 | } |
198 | 206 | ||
199 | 207 | ||
@@ -212,7 +220,8 @@ static void otg_timer(unsigned long _musb) | |||
212 | * status change events (from the transceiver) otherwise. | 220 | * status change events (from the transceiver) otherwise. |
213 | */ | 221 | */ |
214 | devctl = musb_readb(mregs, MUSB_DEVCTL); | 222 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
215 | DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb)); | 223 | dev_dbg(musb->controller, "poll devctl %02x (%s)\n", devctl, |
224 | otg_state_string(musb->xceiv->state)); | ||
216 | 225 | ||
217 | spin_lock_irqsave(&musb->lock, flags); | 226 | spin_lock_irqsave(&musb->lock, flags); |
218 | switch (musb->xceiv->state) { | 227 | switch (musb->xceiv->state) { |
@@ -259,7 +268,7 @@ static void otg_timer(unsigned long _musb) | |||
259 | spin_unlock_irqrestore(&musb->lock, flags); | 268 | spin_unlock_irqrestore(&musb->lock, flags); |
260 | } | 269 | } |
261 | 270 | ||
262 | static irqreturn_t davinci_interrupt(int irq, void *__hci) | 271 | static irqreturn_t davinci_musb_interrupt(int irq, void *__hci) |
263 | { | 272 | { |
264 | unsigned long flags; | 273 | unsigned long flags; |
265 | irqreturn_t retval = IRQ_NONE; | 274 | irqreturn_t retval = IRQ_NONE; |
@@ -289,7 +298,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci) | |||
289 | /* ack and handle non-CPPI interrupts */ | 298 | /* ack and handle non-CPPI interrupts */ |
290 | tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); | 299 | tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); |
291 | musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); | 300 | musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); |
292 | DBG(4, "IRQ %08x\n", tmp); | 301 | dev_dbg(musb->controller, "IRQ %08x\n", tmp); |
293 | 302 | ||
294 | musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) | 303 | musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) |
295 | >> DAVINCI_USB_RXINT_SHIFT; | 304 | >> DAVINCI_USB_RXINT_SHIFT; |
@@ -345,10 +354,10 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci) | |||
345 | /* NOTE: this must complete poweron within 100 msec | 354 | /* NOTE: this must complete poweron within 100 msec |
346 | * (OTG_TIME_A_WAIT_VRISE) but we don't check for that. | 355 | * (OTG_TIME_A_WAIT_VRISE) but we don't check for that. |
347 | */ | 356 | */ |
348 | davinci_source_power(musb, drvvbus, 0); | 357 | davinci_musb_source_power(musb, drvvbus, 0); |
349 | DBG(2, "VBUS %s (%s)%s, devctl %02x\n", | 358 | dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", |
350 | drvvbus ? "on" : "off", | 359 | drvvbus ? "on" : "off", |
351 | otg_state_string(musb), | 360 | otg_state_string(musb->xceiv->state), |
352 | err ? " ERROR" : "", | 361 | err ? " ERROR" : "", |
353 | devctl); | 362 | devctl); |
354 | retval = IRQ_HANDLED; | 363 | retval = IRQ_HANDLED; |
@@ -370,13 +379,13 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci) | |||
370 | return retval; | 379 | return retval; |
371 | } | 380 | } |
372 | 381 | ||
373 | int musb_platform_set_mode(struct musb *musb, u8 mode) | 382 | static int davinci_musb_set_mode(struct musb *musb, u8 mode) |
374 | { | 383 | { |
375 | /* EVM can't do this (right?) */ | 384 | /* EVM can't do this (right?) */ |
376 | return -EIO; | 385 | return -EIO; |
377 | } | 386 | } |
378 | 387 | ||
379 | int __init musb_platform_init(struct musb *musb, void *board_data) | 388 | static int davinci_musb_init(struct musb *musb) |
380 | { | 389 | { |
381 | void __iomem *tibase = musb->ctrl_base; | 390 | void __iomem *tibase = musb->ctrl_base; |
382 | u32 revision; | 391 | u32 revision; |
@@ -388,8 +397,6 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
388 | 397 | ||
389 | musb->mregs += DAVINCI_BASE_OFFSET; | 398 | musb->mregs += DAVINCI_BASE_OFFSET; |
390 | 399 | ||
391 | clk_enable(musb->clock); | ||
392 | |||
393 | /* returns zero if e.g. not clocked */ | 400 | /* returns zero if e.g. not clocked */ |
394 | revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); | 401 | revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); |
395 | if (revision == 0) | 402 | if (revision == 0) |
@@ -398,8 +405,7 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
398 | if (is_host_enabled(musb)) | 405 | if (is_host_enabled(musb)) |
399 | setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); | 406 | setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); |
400 | 407 | ||
401 | musb->board_set_vbus = davinci_set_vbus; | 408 | davinci_musb_source_power(musb, 0, 1); |
402 | davinci_source_power(musb, 0, 1); | ||
403 | 409 | ||
404 | /* dm355 EVM swaps D+/D- for signal integrity, and | 410 | /* dm355 EVM swaps D+/D- for signal integrity, and |
405 | * is clocked from the main 24 MHz crystal. | 411 | * is clocked from the main 24 MHz crystal. |
@@ -440,17 +446,16 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
440 | revision, __raw_readl(USB_PHY_CTRL), | 446 | revision, __raw_readl(USB_PHY_CTRL), |
441 | musb_readb(tibase, DAVINCI_USB_CTRL_REG)); | 447 | musb_readb(tibase, DAVINCI_USB_CTRL_REG)); |
442 | 448 | ||
443 | musb->isr = davinci_interrupt; | 449 | musb->isr = davinci_musb_interrupt; |
444 | return 0; | 450 | return 0; |
445 | 451 | ||
446 | fail: | 452 | fail: |
447 | clk_disable(musb->clock); | 453 | otg_put_transceiver(musb->xceiv); |
448 | |||
449 | usb_nop_xceiv_unregister(); | 454 | usb_nop_xceiv_unregister(); |
450 | return -ENODEV; | 455 | return -ENODEV; |
451 | } | 456 | } |
452 | 457 | ||
453 | int musb_platform_exit(struct musb *musb) | 458 | static int davinci_musb_exit(struct musb *musb) |
454 | { | 459 | { |
455 | if (is_host_enabled(musb)) | 460 | if (is_host_enabled(musb)) |
456 | del_timer_sync(&otg_workaround); | 461 | del_timer_sync(&otg_workaround); |
@@ -464,7 +469,7 @@ int musb_platform_exit(struct musb *musb) | |||
464 | __raw_writel(deepsleep, DM355_DEEPSLEEP); | 469 | __raw_writel(deepsleep, DM355_DEEPSLEEP); |
465 | } | 470 | } |
466 | 471 | ||
467 | davinci_source_power(musb, 0 /*off*/, 1); | 472 | davinci_musb_source_power(musb, 0 /*off*/, 1); |
468 | 473 | ||
469 | /* delay, to avoid problems with module reload */ | 474 | /* delay, to avoid problems with module reload */ |
470 | if (is_host_enabled(musb) && musb->xceiv->default_a) { | 475 | if (is_host_enabled(musb) && musb->xceiv->default_a) { |
@@ -480,7 +485,7 @@ int musb_platform_exit(struct musb *musb) | |||
480 | break; | 485 | break; |
481 | if ((devctl & MUSB_DEVCTL_VBUS) != warn) { | 486 | if ((devctl & MUSB_DEVCTL_VBUS) != warn) { |
482 | warn = devctl & MUSB_DEVCTL_VBUS; | 487 | warn = devctl & MUSB_DEVCTL_VBUS; |
483 | DBG(1, "VBUS %d\n", | 488 | dev_dbg(musb->controller, "VBUS %d\n", |
484 | warn >> MUSB_DEVCTL_VBUS_SHIFT); | 489 | warn >> MUSB_DEVCTL_VBUS_SHIFT); |
485 | } | 490 | } |
486 | msleep(1000); | 491 | msleep(1000); |
@@ -489,14 +494,146 @@ int musb_platform_exit(struct musb *musb) | |||
489 | 494 | ||
490 | /* in OTG mode, another host might be connected */ | 495 | /* in OTG mode, another host might be connected */ |
491 | if (devctl & MUSB_DEVCTL_VBUS) | 496 | if (devctl & MUSB_DEVCTL_VBUS) |
492 | DBG(1, "VBUS off timeout (devctl %02x)\n", devctl); | 497 | dev_dbg(musb->controller, "VBUS off timeout (devctl %02x)\n", devctl); |
493 | } | 498 | } |
494 | 499 | ||
495 | phy_off(); | 500 | phy_off(); |
496 | 501 | ||
497 | clk_disable(musb->clock); | 502 | otg_put_transceiver(musb->xceiv); |
498 | |||
499 | usb_nop_xceiv_unregister(); | 503 | usb_nop_xceiv_unregister(); |
500 | 504 | ||
501 | return 0; | 505 | return 0; |
502 | } | 506 | } |
507 | |||
508 | static const struct musb_platform_ops davinci_ops = { | ||
509 | .init = davinci_musb_init, | ||
510 | .exit = davinci_musb_exit, | ||
511 | |||
512 | .enable = davinci_musb_enable, | ||
513 | .disable = davinci_musb_disable, | ||
514 | |||
515 | .set_mode = davinci_musb_set_mode, | ||
516 | |||
517 | .set_vbus = davinci_musb_set_vbus, | ||
518 | }; | ||
519 | |||
520 | static u64 davinci_dmamask = DMA_BIT_MASK(32); | ||
521 | |||
522 | static int __init davinci_probe(struct platform_device *pdev) | ||
523 | { | ||
524 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | ||
525 | struct platform_device *musb; | ||
526 | struct davinci_glue *glue; | ||
527 | struct clk *clk; | ||
528 | |||
529 | int ret = -ENOMEM; | ||
530 | |||
531 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); | ||
532 | if (!glue) { | ||
533 | dev_err(&pdev->dev, "failed to allocate glue context\n"); | ||
534 | goto err0; | ||
535 | } | ||
536 | |||
537 | musb = platform_device_alloc("musb-hdrc", -1); | ||
538 | if (!musb) { | ||
539 | dev_err(&pdev->dev, "failed to allocate musb device\n"); | ||
540 | goto err1; | ||
541 | } | ||
542 | |||
543 | clk = clk_get(&pdev->dev, "usb"); | ||
544 | if (IS_ERR(clk)) { | ||
545 | dev_err(&pdev->dev, "failed to get clock\n"); | ||
546 | ret = PTR_ERR(clk); | ||
547 | goto err2; | ||
548 | } | ||
549 | |||
550 | ret = clk_enable(clk); | ||
551 | if (ret) { | ||
552 | dev_err(&pdev->dev, "failed to enable clock\n"); | ||
553 | goto err3; | ||
554 | } | ||
555 | |||
556 | musb->dev.parent = &pdev->dev; | ||
557 | musb->dev.dma_mask = &davinci_dmamask; | ||
558 | musb->dev.coherent_dma_mask = davinci_dmamask; | ||
559 | |||
560 | glue->dev = &pdev->dev; | ||
561 | glue->musb = musb; | ||
562 | glue->clk = clk; | ||
563 | |||
564 | pdata->platform_ops = &davinci_ops; | ||
565 | |||
566 | platform_set_drvdata(pdev, glue); | ||
567 | |||
568 | ret = platform_device_add_resources(musb, pdev->resource, | ||
569 | pdev->num_resources); | ||
570 | if (ret) { | ||
571 | dev_err(&pdev->dev, "failed to add resources\n"); | ||
572 | goto err4; | ||
573 | } | ||
574 | |||
575 | ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); | ||
576 | if (ret) { | ||
577 | dev_err(&pdev->dev, "failed to add platform_data\n"); | ||
578 | goto err4; | ||
579 | } | ||
580 | |||
581 | ret = platform_device_add(musb); | ||
582 | if (ret) { | ||
583 | dev_err(&pdev->dev, "failed to register musb device\n"); | ||
584 | goto err4; | ||
585 | } | ||
586 | |||
587 | return 0; | ||
588 | |||
589 | err4: | ||
590 | clk_disable(clk); | ||
591 | |||
592 | err3: | ||
593 | clk_put(clk); | ||
594 | |||
595 | err2: | ||
596 | platform_device_put(musb); | ||
597 | |||
598 | err1: | ||
599 | kfree(glue); | ||
600 | |||
601 | err0: | ||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | static int __exit davinci_remove(struct platform_device *pdev) | ||
606 | { | ||
607 | struct davinci_glue *glue = platform_get_drvdata(pdev); | ||
608 | |||
609 | platform_device_del(glue->musb); | ||
610 | platform_device_put(glue->musb); | ||
611 | clk_disable(glue->clk); | ||
612 | clk_put(glue->clk); | ||
613 | kfree(glue); | ||
614 | |||
615 | return 0; | ||
616 | } | ||
617 | |||
618 | static struct platform_driver davinci_driver = { | ||
619 | .remove = __exit_p(davinci_remove), | ||
620 | .driver = { | ||
621 | .name = "musb-davinci", | ||
622 | }, | ||
623 | }; | ||
624 | |||
625 | MODULE_DESCRIPTION("DaVinci MUSB Glue Layer"); | ||
626 | MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); | ||
627 | MODULE_LICENSE("GPL v2"); | ||
628 | |||
629 | static int __init davinci_init(void) | ||
630 | { | ||
631 | return platform_driver_probe(&davinci_driver, davinci_probe); | ||
632 | } | ||
633 | subsys_initcall(davinci_init); | ||
634 | |||
635 | static void __exit davinci_exit(void) | ||
636 | { | ||
637 | platform_driver_unregister(&davinci_driver); | ||
638 | } | ||
639 | module_exit(davinci_exit); | ||
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 540c766c4f86..c71b0372786e 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -96,29 +96,15 @@ | |||
96 | #include <linux/init.h> | 96 | #include <linux/init.h> |
97 | #include <linux/list.h> | 97 | #include <linux/list.h> |
98 | #include <linux/kobject.h> | 98 | #include <linux/kobject.h> |
99 | #include <linux/prefetch.h> | ||
99 | #include <linux/platform_device.h> | 100 | #include <linux/platform_device.h> |
100 | #include <linux/io.h> | 101 | #include <linux/io.h> |
101 | 102 | ||
102 | #ifdef CONFIG_ARM | ||
103 | #include <mach/hardware.h> | ||
104 | #include <mach/memory.h> | ||
105 | #include <asm/mach-types.h> | ||
106 | #endif | ||
107 | |||
108 | #include "musb_core.h" | 103 | #include "musb_core.h" |
109 | 104 | ||
110 | |||
111 | #ifdef CONFIG_ARCH_DAVINCI | ||
112 | #include "davinci.h" | ||
113 | #endif | ||
114 | |||
115 | #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON) | 105 | #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON) |
116 | 106 | ||
117 | 107 | ||
118 | unsigned musb_debug; | ||
119 | module_param_named(debug, musb_debug, uint, S_IRUGO | S_IWUSR); | ||
120 | MODULE_PARM_DESC(debug, "Debug message level. Default = 0"); | ||
121 | |||
122 | #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" | 108 | #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" |
123 | #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" | 109 | #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" |
124 | 110 | ||
@@ -126,7 +112,7 @@ MODULE_PARM_DESC(debug, "Debug message level. Default = 0"); | |||
126 | 112 | ||
127 | #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION | 113 | #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION |
128 | 114 | ||
129 | #define MUSB_DRIVER_NAME "musb_hdrc" | 115 | #define MUSB_DRIVER_NAME "musb-hdrc" |
130 | const char musb_driver_name[] = MUSB_DRIVER_NAME; | 116 | const char musb_driver_name[] = MUSB_DRIVER_NAME; |
131 | 117 | ||
132 | MODULE_DESCRIPTION(DRIVER_INFO); | 118 | MODULE_DESCRIPTION(DRIVER_INFO); |
@@ -139,12 +125,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); | |||
139 | 125 | ||
140 | static inline struct musb *dev_to_musb(struct device *dev) | 126 | static inline struct musb *dev_to_musb(struct device *dev) |
141 | { | 127 | { |
142 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
143 | /* usbcore insists dev->driver_data is a "struct hcd *" */ | ||
144 | return hcd_to_musb(dev_get_drvdata(dev)); | ||
145 | #else | ||
146 | return dev_get_drvdata(dev); | 128 | return dev_get_drvdata(dev); |
147 | #endif | ||
148 | } | 129 | } |
149 | 130 | ||
150 | /*-------------------------------------------------------------------------*/ | 131 | /*-------------------------------------------------------------------------*/ |
@@ -173,10 +154,8 @@ static int musb_ulpi_read(struct otg_transceiver *otg, u32 offset) | |||
173 | while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) | 154 | while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) |
174 | & MUSB_ULPI_REG_CMPLT)) { | 155 | & MUSB_ULPI_REG_CMPLT)) { |
175 | i++; | 156 | i++; |
176 | if (i == 10000) { | 157 | if (i == 10000) |
177 | DBG(3, "ULPI read timed out\n"); | ||
178 | return -ETIMEDOUT; | 158 | return -ETIMEDOUT; |
179 | } | ||
180 | 159 | ||
181 | } | 160 | } |
182 | r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); | 161 | r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); |
@@ -206,10 +185,8 @@ static int musb_ulpi_write(struct otg_transceiver *otg, | |||
206 | while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) | 185 | while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) |
207 | & MUSB_ULPI_REG_CMPLT)) { | 186 | & MUSB_ULPI_REG_CMPLT)) { |
208 | i++; | 187 | i++; |
209 | if (i == 10000) { | 188 | if (i == 10000) |
210 | DBG(3, "ULPI write timed out\n"); | ||
211 | return -ETIMEDOUT; | 189 | return -ETIMEDOUT; |
212 | } | ||
213 | } | 190 | } |
214 | 191 | ||
215 | r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); | 192 | r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); |
@@ -230,18 +207,19 @@ static struct otg_io_access_ops musb_ulpi_access = { | |||
230 | 207 | ||
231 | /*-------------------------------------------------------------------------*/ | 208 | /*-------------------------------------------------------------------------*/ |
232 | 209 | ||
233 | #if !defined(CONFIG_USB_TUSB6010) && !defined(CONFIG_BLACKFIN) | 210 | #if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN) |
234 | 211 | ||
235 | /* | 212 | /* |
236 | * Load an endpoint's FIFO | 213 | * Load an endpoint's FIFO |
237 | */ | 214 | */ |
238 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) | 215 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) |
239 | { | 216 | { |
217 | struct musb *musb = hw_ep->musb; | ||
240 | void __iomem *fifo = hw_ep->fifo; | 218 | void __iomem *fifo = hw_ep->fifo; |
241 | 219 | ||
242 | prefetch((u8 *)src); | 220 | prefetch((u8 *)src); |
243 | 221 | ||
244 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | 222 | dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", |
245 | 'T', hw_ep->epnum, fifo, len, src); | 223 | 'T', hw_ep->epnum, fifo, len, src); |
246 | 224 | ||
247 | /* we can't assume unaligned reads work */ | 225 | /* we can't assume unaligned reads work */ |
@@ -272,14 +250,16 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) | |||
272 | } | 250 | } |
273 | } | 251 | } |
274 | 252 | ||
253 | #if !defined(CONFIG_USB_MUSB_AM35X) | ||
275 | /* | 254 | /* |
276 | * Unload an endpoint's FIFO | 255 | * Unload an endpoint's FIFO |
277 | */ | 256 | */ |
278 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) | 257 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) |
279 | { | 258 | { |
259 | struct musb *musb = hw_ep->musb; | ||
280 | void __iomem *fifo = hw_ep->fifo; | 260 | void __iomem *fifo = hw_ep->fifo; |
281 | 261 | ||
282 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | 262 | dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", |
283 | 'R', hw_ep->epnum, fifo, len, dst); | 263 | 'R', hw_ep->epnum, fifo, len, dst); |
284 | 264 | ||
285 | /* we can't assume unaligned writes work */ | 265 | /* we can't assume unaligned writes work */ |
@@ -309,6 +289,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) | |||
309 | readsb(fifo, dst, len); | 289 | readsb(fifo, dst, len); |
310 | } | 290 | } |
311 | } | 291 | } |
292 | #endif | ||
312 | 293 | ||
313 | #endif /* normal PIO */ | 294 | #endif /* normal PIO */ |
314 | 295 | ||
@@ -347,26 +328,6 @@ void musb_load_testpacket(struct musb *musb) | |||
347 | 328 | ||
348 | /*-------------------------------------------------------------------------*/ | 329 | /*-------------------------------------------------------------------------*/ |
349 | 330 | ||
350 | const char *otg_state_string(struct musb *musb) | ||
351 | { | ||
352 | switch (musb->xceiv->state) { | ||
353 | case OTG_STATE_A_IDLE: return "a_idle"; | ||
354 | case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; | ||
355 | case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; | ||
356 | case OTG_STATE_A_HOST: return "a_host"; | ||
357 | case OTG_STATE_A_SUSPEND: return "a_suspend"; | ||
358 | case OTG_STATE_A_PERIPHERAL: return "a_peripheral"; | ||
359 | case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall"; | ||
360 | case OTG_STATE_A_VBUS_ERR: return "a_vbus_err"; | ||
361 | case OTG_STATE_B_IDLE: return "b_idle"; | ||
362 | case OTG_STATE_B_SRP_INIT: return "b_srp_init"; | ||
363 | case OTG_STATE_B_PERIPHERAL: return "b_peripheral"; | ||
364 | case OTG_STATE_B_WAIT_ACON: return "b_wait_acon"; | ||
365 | case OTG_STATE_B_HOST: return "b_host"; | ||
366 | default: return "UNDEFINED"; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | #ifdef CONFIG_USB_MUSB_OTG | 331 | #ifdef CONFIG_USB_MUSB_OTG |
371 | 332 | ||
372 | /* | 333 | /* |
@@ -380,19 +341,21 @@ void musb_otg_timer_func(unsigned long data) | |||
380 | spin_lock_irqsave(&musb->lock, flags); | 341 | spin_lock_irqsave(&musb->lock, flags); |
381 | switch (musb->xceiv->state) { | 342 | switch (musb->xceiv->state) { |
382 | case OTG_STATE_B_WAIT_ACON: | 343 | case OTG_STATE_B_WAIT_ACON: |
383 | DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n"); | 344 | dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n"); |
384 | musb_g_disconnect(musb); | 345 | musb_g_disconnect(musb); |
385 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; | 346 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; |
386 | musb->is_active = 0; | 347 | musb->is_active = 0; |
387 | break; | 348 | break; |
388 | case OTG_STATE_A_SUSPEND: | 349 | case OTG_STATE_A_SUSPEND: |
389 | case OTG_STATE_A_WAIT_BCON: | 350 | case OTG_STATE_A_WAIT_BCON: |
390 | DBG(1, "HNP: %s timeout\n", otg_state_string(musb)); | 351 | dev_dbg(musb->controller, "HNP: %s timeout\n", |
391 | musb_set_vbus(musb, 0); | 352 | otg_state_string(musb->xceiv->state)); |
353 | musb_platform_set_vbus(musb, 0); | ||
392 | musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; | 354 | musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; |
393 | break; | 355 | break; |
394 | default: | 356 | default: |
395 | DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb)); | 357 | dev_dbg(musb->controller, "HNP: Unhandled mode %s\n", |
358 | otg_state_string(musb->xceiv->state)); | ||
396 | } | 359 | } |
397 | musb->ignore_disconnect = 0; | 360 | musb->ignore_disconnect = 0; |
398 | spin_unlock_irqrestore(&musb->lock, flags); | 361 | spin_unlock_irqrestore(&musb->lock, flags); |
@@ -407,15 +370,16 @@ void musb_hnp_stop(struct musb *musb) | |||
407 | void __iomem *mbase = musb->mregs; | 370 | void __iomem *mbase = musb->mregs; |
408 | u8 reg; | 371 | u8 reg; |
409 | 372 | ||
410 | DBG(1, "HNP: stop from %s\n", otg_state_string(musb)); | 373 | dev_dbg(musb->controller, "HNP: stop from %s\n", otg_state_string(musb->xceiv->state)); |
411 | 374 | ||
412 | switch (musb->xceiv->state) { | 375 | switch (musb->xceiv->state) { |
413 | case OTG_STATE_A_PERIPHERAL: | 376 | case OTG_STATE_A_PERIPHERAL: |
414 | musb_g_disconnect(musb); | 377 | musb_g_disconnect(musb); |
415 | DBG(1, "HNP: back to %s\n", otg_state_string(musb)); | 378 | dev_dbg(musb->controller, "HNP: back to %s\n", |
379 | otg_state_string(musb->xceiv->state)); | ||
416 | break; | 380 | break; |
417 | case OTG_STATE_B_HOST: | 381 | case OTG_STATE_B_HOST: |
418 | DBG(1, "HNP: Disabling HR\n"); | 382 | dev_dbg(musb->controller, "HNP: Disabling HR\n"); |
419 | hcd->self.is_b_host = 0; | 383 | hcd->self.is_b_host = 0; |
420 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; | 384 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; |
421 | MUSB_DEV_MODE(musb); | 385 | MUSB_DEV_MODE(musb); |
@@ -425,8 +389,8 @@ void musb_hnp_stop(struct musb *musb) | |||
425 | /* REVISIT: Start SESSION_REQUEST here? */ | 389 | /* REVISIT: Start SESSION_REQUEST here? */ |
426 | break; | 390 | break; |
427 | default: | 391 | default: |
428 | DBG(1, "HNP: Stopping in unknown state %s\n", | 392 | dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n", |
429 | otg_state_string(musb)); | 393 | otg_state_string(musb->xceiv->state)); |
430 | } | 394 | } |
431 | 395 | ||
432 | /* | 396 | /* |
@@ -456,7 +420,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
456 | { | 420 | { |
457 | irqreturn_t handled = IRQ_NONE; | 421 | irqreturn_t handled = IRQ_NONE; |
458 | 422 | ||
459 | DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, | 423 | dev_dbg(musb->controller, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, |
460 | int_usb); | 424 | int_usb); |
461 | 425 | ||
462 | /* in host mode, the peripheral may issue remote wakeup. | 426 | /* in host mode, the peripheral may issue remote wakeup. |
@@ -465,7 +429,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
465 | */ | 429 | */ |
466 | if (int_usb & MUSB_INTR_RESUME) { | 430 | if (int_usb & MUSB_INTR_RESUME) { |
467 | handled = IRQ_HANDLED; | 431 | handled = IRQ_HANDLED; |
468 | DBG(3, "RESUME (%s)\n", otg_state_string(musb)); | 432 | dev_dbg(musb->controller, "RESUME (%s)\n", otg_state_string(musb->xceiv->state)); |
469 | 433 | ||
470 | if (devctl & MUSB_DEVCTL_HM) { | 434 | if (devctl & MUSB_DEVCTL_HM) { |
471 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | 435 | #ifdef CONFIG_USB_MUSB_HDRC_HCD |
@@ -480,7 +444,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
480 | if (power & MUSB_POWER_SUSPENDM) { | 444 | if (power & MUSB_POWER_SUSPENDM) { |
481 | /* spurious */ | 445 | /* spurious */ |
482 | musb->int_usb &= ~MUSB_INTR_SUSPEND; | 446 | musb->int_usb &= ~MUSB_INTR_SUSPEND; |
483 | DBG(2, "Spurious SUSPENDM\n"); | 447 | dev_dbg(musb->controller, "Spurious SUSPENDM\n"); |
484 | break; | 448 | break; |
485 | } | 449 | } |
486 | 450 | ||
@@ -506,7 +470,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
506 | default: | 470 | default: |
507 | WARNING("bogus %s RESUME (%s)\n", | 471 | WARNING("bogus %s RESUME (%s)\n", |
508 | "host", | 472 | "host", |
509 | otg_state_string(musb)); | 473 | otg_state_string(musb->xceiv->state)); |
510 | } | 474 | } |
511 | #endif | 475 | #endif |
512 | } else { | 476 | } else { |
@@ -540,7 +504,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
540 | default: | 504 | default: |
541 | WARNING("bogus %s RESUME (%s)\n", | 505 | WARNING("bogus %s RESUME (%s)\n", |
542 | "peripheral", | 506 | "peripheral", |
543 | otg_state_string(musb)); | 507 | otg_state_string(musb->xceiv->state)); |
544 | } | 508 | } |
545 | } | 509 | } |
546 | } | 510 | } |
@@ -550,7 +514,14 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
550 | if (int_usb & MUSB_INTR_SESSREQ) { | 514 | if (int_usb & MUSB_INTR_SESSREQ) { |
551 | void __iomem *mbase = musb->mregs; | 515 | void __iomem *mbase = musb->mregs; |
552 | 516 | ||
553 | DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb)); | 517 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS |
518 | && (devctl & MUSB_DEVCTL_BDEVICE)) { | ||
519 | dev_dbg(musb->controller, "SessReq while on B state\n"); | ||
520 | return IRQ_HANDLED; | ||
521 | } | ||
522 | |||
523 | dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n", | ||
524 | otg_state_string(musb->xceiv->state)); | ||
554 | 525 | ||
555 | /* IRQ arrives from ID pin sense or (later, if VBUS power | 526 | /* IRQ arrives from ID pin sense or (later, if VBUS power |
556 | * is removed) SRP. responses are time critical: | 527 | * is removed) SRP. responses are time critical: |
@@ -563,7 +534,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
563 | musb->ep0_stage = MUSB_EP0_START; | 534 | musb->ep0_stage = MUSB_EP0_START; |
564 | musb->xceiv->state = OTG_STATE_A_IDLE; | 535 | musb->xceiv->state = OTG_STATE_A_IDLE; |
565 | MUSB_HST_MODE(musb); | 536 | MUSB_HST_MODE(musb); |
566 | musb_set_vbus(musb, 1); | 537 | musb_platform_set_vbus(musb, 1); |
567 | 538 | ||
568 | handled = IRQ_HANDLED; | 539 | handled = IRQ_HANDLED; |
569 | } | 540 | } |
@@ -614,8 +585,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
614 | break; | 585 | break; |
615 | } | 586 | } |
616 | 587 | ||
617 | DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", | 588 | dev_dbg(musb->controller, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", |
618 | otg_state_string(musb), | 589 | otg_state_string(musb->xceiv->state), |
619 | devctl, | 590 | devctl, |
620 | ({ char *s; | 591 | ({ char *s; |
621 | switch (devctl & MUSB_DEVCTL_VBUS) { | 592 | switch (devctl & MUSB_DEVCTL_VBUS) { |
@@ -634,14 +605,14 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
634 | 605 | ||
635 | /* go through A_WAIT_VFALL then start a new session */ | 606 | /* go through A_WAIT_VFALL then start a new session */ |
636 | if (!ignore) | 607 | if (!ignore) |
637 | musb_set_vbus(musb, 0); | 608 | musb_platform_set_vbus(musb, 0); |
638 | handled = IRQ_HANDLED; | 609 | handled = IRQ_HANDLED; |
639 | } | 610 | } |
640 | 611 | ||
641 | #endif | 612 | #endif |
642 | if (int_usb & MUSB_INTR_SUSPEND) { | 613 | if (int_usb & MUSB_INTR_SUSPEND) { |
643 | DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", | 614 | dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x power %02x\n", |
644 | otg_state_string(musb), devctl, power); | 615 | otg_state_string(musb->xceiv->state), devctl, power); |
645 | handled = IRQ_HANDLED; | 616 | handled = IRQ_HANDLED; |
646 | 617 | ||
647 | switch (musb->xceiv->state) { | 618 | switch (musb->xceiv->state) { |
@@ -673,7 +644,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
673 | if (musb->is_active) { | 644 | if (musb->is_active) { |
674 | #ifdef CONFIG_USB_MUSB_OTG | 645 | #ifdef CONFIG_USB_MUSB_OTG |
675 | musb->xceiv->state = OTG_STATE_B_WAIT_ACON; | 646 | musb->xceiv->state = OTG_STATE_B_WAIT_ACON; |
676 | DBG(1, "HNP: Setting timer for b_ase0_brst\n"); | 647 | dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n"); |
677 | mod_timer(&musb->otg_timer, jiffies | 648 | mod_timer(&musb->otg_timer, jiffies |
678 | + msecs_to_jiffies( | 649 | + msecs_to_jiffies( |
679 | OTG_TIME_B_ASE0_BRST)); | 650 | OTG_TIME_B_ASE0_BRST)); |
@@ -692,7 +663,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
692 | break; | 663 | break; |
693 | case OTG_STATE_B_HOST: | 664 | case OTG_STATE_B_HOST: |
694 | /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ | 665 | /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ |
695 | DBG(1, "REVISIT: SUSPEND as B_HOST\n"); | 666 | dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n"); |
696 | break; | 667 | break; |
697 | default: | 668 | default: |
698 | /* "should not happen" */ | 669 | /* "should not happen" */ |
@@ -735,14 +706,14 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
735 | switch (musb->xceiv->state) { | 706 | switch (musb->xceiv->state) { |
736 | case OTG_STATE_B_PERIPHERAL: | 707 | case OTG_STATE_B_PERIPHERAL: |
737 | if (int_usb & MUSB_INTR_SUSPEND) { | 708 | if (int_usb & MUSB_INTR_SUSPEND) { |
738 | DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n"); | 709 | dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n"); |
739 | int_usb &= ~MUSB_INTR_SUSPEND; | 710 | int_usb &= ~MUSB_INTR_SUSPEND; |
740 | goto b_host; | 711 | goto b_host; |
741 | } else | 712 | } else |
742 | DBG(1, "CONNECT as b_peripheral???\n"); | 713 | dev_dbg(musb->controller, "CONNECT as b_peripheral???\n"); |
743 | break; | 714 | break; |
744 | case OTG_STATE_B_WAIT_ACON: | 715 | case OTG_STATE_B_WAIT_ACON: |
745 | DBG(1, "HNP: CONNECT, now b_host\n"); | 716 | dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n"); |
746 | b_host: | 717 | b_host: |
747 | musb->xceiv->state = OTG_STATE_B_HOST; | 718 | musb->xceiv->state = OTG_STATE_B_HOST; |
748 | hcd->self.is_b_host = 1; | 719 | hcd->self.is_b_host = 1; |
@@ -765,14 +736,14 @@ b_host: | |||
765 | else | 736 | else |
766 | usb_hcd_resume_root_hub(hcd); | 737 | usb_hcd_resume_root_hub(hcd); |
767 | 738 | ||
768 | DBG(1, "CONNECT (%s) devctl %02x\n", | 739 | dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n", |
769 | otg_state_string(musb), devctl); | 740 | otg_state_string(musb->xceiv->state), devctl); |
770 | } | 741 | } |
771 | #endif /* CONFIG_USB_MUSB_HDRC_HCD */ | 742 | #endif /* CONFIG_USB_MUSB_HDRC_HCD */ |
772 | 743 | ||
773 | if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { | 744 | if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { |
774 | DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", | 745 | dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n", |
775 | otg_state_string(musb), | 746 | otg_state_string(musb->xceiv->state), |
776 | MUSB_MODE(musb), devctl); | 747 | MUSB_MODE(musb), devctl); |
777 | handled = IRQ_HANDLED; | 748 | handled = IRQ_HANDLED; |
778 | 749 | ||
@@ -815,7 +786,7 @@ b_host: | |||
815 | #endif /* GADGET */ | 786 | #endif /* GADGET */ |
816 | default: | 787 | default: |
817 | WARNING("unhandled DISCONNECT transition (%s)\n", | 788 | WARNING("unhandled DISCONNECT transition (%s)\n", |
818 | otg_state_string(musb)); | 789 | otg_state_string(musb->xceiv->state)); |
819 | break; | 790 | break; |
820 | } | 791 | } |
821 | } | 792 | } |
@@ -834,13 +805,14 @@ b_host: | |||
834 | * stop the session. | 805 | * stop the session. |
835 | */ | 806 | */ |
836 | if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV)) | 807 | if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV)) |
837 | DBG(1, "BABBLE devctl: %02x\n", devctl); | 808 | dev_dbg(musb->controller, "BABBLE devctl: %02x\n", devctl); |
838 | else { | 809 | else { |
839 | ERR("Stopping host session -- babble\n"); | 810 | ERR("Stopping host session -- babble\n"); |
840 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | 811 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
841 | } | 812 | } |
842 | } else if (is_peripheral_capable()) { | 813 | } else if (is_peripheral_capable()) { |
843 | DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); | 814 | dev_dbg(musb->controller, "BUS RESET as %s\n", |
815 | otg_state_string(musb->xceiv->state)); | ||
844 | switch (musb->xceiv->state) { | 816 | switch (musb->xceiv->state) { |
845 | #ifdef CONFIG_USB_OTG | 817 | #ifdef CONFIG_USB_OTG |
846 | case OTG_STATE_A_SUSPEND: | 818 | case OTG_STATE_A_SUSPEND: |
@@ -853,9 +825,9 @@ b_host: | |||
853 | /* FALLTHROUGH */ | 825 | /* FALLTHROUGH */ |
854 | case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ | 826 | case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ |
855 | /* never use invalid T(a_wait_bcon) */ | 827 | /* never use invalid T(a_wait_bcon) */ |
856 | DBG(1, "HNP: in %s, %d msec timeout\n", | 828 | dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n", |
857 | otg_state_string(musb), | 829 | otg_state_string(musb->xceiv->state), |
858 | TA_WAIT_BCON(musb)); | 830 | TA_WAIT_BCON(musb)); |
859 | mod_timer(&musb->otg_timer, jiffies | 831 | mod_timer(&musb->otg_timer, jiffies |
860 | + msecs_to_jiffies(TA_WAIT_BCON(musb))); | 832 | + msecs_to_jiffies(TA_WAIT_BCON(musb))); |
861 | break; | 833 | break; |
@@ -865,8 +837,8 @@ b_host: | |||
865 | musb_g_reset(musb); | 837 | musb_g_reset(musb); |
866 | break; | 838 | break; |
867 | case OTG_STATE_B_WAIT_ACON: | 839 | case OTG_STATE_B_WAIT_ACON: |
868 | DBG(1, "HNP: RESET (%s), to b_peripheral\n", | 840 | dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n", |
869 | otg_state_string(musb)); | 841 | otg_state_string(musb->xceiv->state)); |
870 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; | 842 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; |
871 | musb_g_reset(musb); | 843 | musb_g_reset(musb); |
872 | break; | 844 | break; |
@@ -878,8 +850,8 @@ b_host: | |||
878 | musb_g_reset(musb); | 850 | musb_g_reset(musb); |
879 | break; | 851 | break; |
880 | default: | 852 | default: |
881 | DBG(1, "Unhandled BUS RESET as %s\n", | 853 | dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n", |
882 | otg_state_string(musb)); | 854 | otg_state_string(musb->xceiv->state)); |
883 | } | 855 | } |
884 | } | 856 | } |
885 | } | 857 | } |
@@ -902,7 +874,7 @@ b_host: | |||
902 | u8 epnum; | 874 | u8 epnum; |
903 | u16 frame; | 875 | u16 frame; |
904 | 876 | ||
905 | DBG(6, "START_OF_FRAME\n"); | 877 | dev_dbg(musb->controller, "START_OF_FRAME\n"); |
906 | handled = IRQ_HANDLED; | 878 | handled = IRQ_HANDLED; |
907 | 879 | ||
908 | /* start any periodic Tx transfers waiting for current frame */ | 880 | /* start any periodic Tx transfers waiting for current frame */ |
@@ -944,7 +916,7 @@ void musb_start(struct musb *musb) | |||
944 | void __iomem *regs = musb->mregs; | 916 | void __iomem *regs = musb->mregs; |
945 | u8 devctl = musb_readb(regs, MUSB_DEVCTL); | 917 | u8 devctl = musb_readb(regs, MUSB_DEVCTL); |
946 | 918 | ||
947 | DBG(2, "<== devctl %02x\n", devctl); | 919 | dev_dbg(musb->controller, "<== devctl %02x\n", devctl); |
948 | 920 | ||
949 | /* Set INT enable registers, enable interrupts */ | 921 | /* Set INT enable registers, enable interrupts */ |
950 | musb_writew(regs, MUSB_INTRTXE, musb->epmask); | 922 | musb_writew(regs, MUSB_INTRTXE, musb->epmask); |
@@ -1021,7 +993,7 @@ void musb_stop(struct musb *musb) | |||
1021 | /* stop IRQs, timers, ... */ | 993 | /* stop IRQs, timers, ... */ |
1022 | musb_platform_disable(musb); | 994 | musb_platform_disable(musb); |
1023 | musb_generic_disable(musb); | 995 | musb_generic_disable(musb); |
1024 | DBG(3, "HDRC disabled\n"); | 996 | dev_dbg(musb->controller, "HDRC disabled\n"); |
1025 | 997 | ||
1026 | /* FIXME | 998 | /* FIXME |
1027 | * - mark host and/or peripheral drivers unusable/inactive | 999 | * - mark host and/or peripheral drivers unusable/inactive |
@@ -1038,13 +1010,18 @@ static void musb_shutdown(struct platform_device *pdev) | |||
1038 | struct musb *musb = dev_to_musb(&pdev->dev); | 1010 | struct musb *musb = dev_to_musb(&pdev->dev); |
1039 | unsigned long flags; | 1011 | unsigned long flags; |
1040 | 1012 | ||
1013 | pm_runtime_get_sync(musb->controller); | ||
1041 | spin_lock_irqsave(&musb->lock, flags); | 1014 | spin_lock_irqsave(&musb->lock, flags); |
1042 | musb_platform_disable(musb); | 1015 | musb_platform_disable(musb); |
1043 | musb_generic_disable(musb); | 1016 | musb_generic_disable(musb); |
1044 | if (musb->clock) | ||
1045 | clk_put(musb->clock); | ||
1046 | spin_unlock_irqrestore(&musb->lock, flags); | 1017 | spin_unlock_irqrestore(&musb->lock, flags); |
1047 | 1018 | ||
1019 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) | ||
1020 | usb_remove_hcd(musb_to_hcd(musb)); | ||
1021 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
1022 | musb_platform_exit(musb); | ||
1023 | |||
1024 | pm_runtime_put(musb->controller); | ||
1048 | /* FIXME power down */ | 1025 | /* FIXME power down */ |
1049 | } | 1026 | } |
1050 | 1027 | ||
@@ -1061,10 +1038,11 @@ static void musb_shutdown(struct platform_device *pdev) | |||
1061 | * We don't currently use dynamic fifo setup capability to do anything | 1038 | * We don't currently use dynamic fifo setup capability to do anything |
1062 | * more than selecting one of a bunch of predefined configurations. | 1039 | * more than selecting one of a bunch of predefined configurations. |
1063 | */ | 1040 | */ |
1064 | #if defined(CONFIG_USB_TUSB6010) || \ | 1041 | #if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_OMAP2PLUS) \ |
1065 | defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ | 1042 | || defined(CONFIG_USB_MUSB_AM35X) |
1066 | || defined(CONFIG_ARCH_OMAP4) | ||
1067 | static ushort __initdata fifo_mode = 4; | 1043 | static ushort __initdata fifo_mode = 4; |
1044 | #elif defined(CONFIG_USB_MUSB_UX500) | ||
1045 | static ushort __initdata fifo_mode = 5; | ||
1068 | #else | 1046 | #else |
1069 | static ushort __initdata fifo_mode = 2; | 1047 | static ushort __initdata fifo_mode = 2; |
1070 | #endif | 1048 | #endif |
@@ -1361,7 +1339,7 @@ static int __init ep_config_from_hw(struct musb *musb) | |||
1361 | void *mbase = musb->mregs; | 1339 | void *mbase = musb->mregs; |
1362 | int ret = 0; | 1340 | int ret = 0; |
1363 | 1341 | ||
1364 | DBG(2, "<== static silicon ep config\n"); | 1342 | dev_dbg(musb->controller, "<== static silicon ep config\n"); |
1365 | 1343 | ||
1366 | /* FIXME pick up ep0 maxpacket size */ | 1344 | /* FIXME pick up ep0 maxpacket size */ |
1367 | 1345 | ||
@@ -1488,7 +1466,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1488 | struct musb_hw_ep *hw_ep = musb->endpoints + i; | 1466 | struct musb_hw_ep *hw_ep = musb->endpoints + i; |
1489 | 1467 | ||
1490 | hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; | 1468 | hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; |
1491 | #ifdef CONFIG_USB_TUSB6010 | 1469 | #ifdef CONFIG_USB_MUSB_TUSB6010 |
1492 | hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); | 1470 | hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); |
1493 | hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); | 1471 | hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); |
1494 | hw_ep->fifo_sync_va = | 1472 | hw_ep->fifo_sync_va = |
@@ -1508,7 +1486,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1508 | #endif | 1486 | #endif |
1509 | 1487 | ||
1510 | if (hw_ep->max_packet_sz_tx) { | 1488 | if (hw_ep->max_packet_sz_tx) { |
1511 | DBG(1, | 1489 | dev_dbg(musb->controller, |
1512 | "%s: hw_ep %d%s, %smax %d\n", | 1490 | "%s: hw_ep %d%s, %smax %d\n", |
1513 | musb_driver_name, i, | 1491 | musb_driver_name, i, |
1514 | hw_ep->is_shared_fifo ? "shared" : "tx", | 1492 | hw_ep->is_shared_fifo ? "shared" : "tx", |
@@ -1517,7 +1495,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1517 | hw_ep->max_packet_sz_tx); | 1495 | hw_ep->max_packet_sz_tx); |
1518 | } | 1496 | } |
1519 | if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { | 1497 | if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { |
1520 | DBG(1, | 1498 | dev_dbg(musb->controller, |
1521 | "%s: hw_ep %d%s, %smax %d\n", | 1499 | "%s: hw_ep %d%s, %smax %d\n", |
1522 | musb_driver_name, i, | 1500 | musb_driver_name, i, |
1523 | "rx", | 1501 | "rx", |
@@ -1526,7 +1504,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1526 | hw_ep->max_packet_sz_rx); | 1504 | hw_ep->max_packet_sz_rx); |
1527 | } | 1505 | } |
1528 | if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx)) | 1506 | if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx)) |
1529 | DBG(1, "hw_ep %d not configured\n", i); | 1507 | dev_dbg(musb->controller, "hw_ep %d not configured\n", i); |
1530 | } | 1508 | } |
1531 | 1509 | ||
1532 | return 0; | 1510 | return 0; |
@@ -1534,8 +1512,9 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1534 | 1512 | ||
1535 | /*-------------------------------------------------------------------------*/ | 1513 | /*-------------------------------------------------------------------------*/ |
1536 | 1514 | ||
1537 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \ | 1515 | #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ |
1538 | defined(CONFIG_ARCH_OMAP4) | 1516 | defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ |
1517 | defined(CONFIG_ARCH_U5500) | ||
1539 | 1518 | ||
1540 | static irqreturn_t generic_interrupt(int irq, void *__hci) | 1519 | static irqreturn_t generic_interrupt(int irq, void *__hci) |
1541 | { | 1520 | { |
@@ -1578,14 +1557,14 @@ irqreturn_t musb_interrupt(struct musb *musb) | |||
1578 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | 1557 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
1579 | power = musb_readb(musb->mregs, MUSB_POWER); | 1558 | power = musb_readb(musb->mregs, MUSB_POWER); |
1580 | 1559 | ||
1581 | DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n", | 1560 | dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n", |
1582 | (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", | 1561 | (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", |
1583 | musb->int_usb, musb->int_tx, musb->int_rx); | 1562 | musb->int_usb, musb->int_tx, musb->int_rx); |
1584 | 1563 | ||
1585 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | 1564 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC |
1586 | if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) | 1565 | if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) |
1587 | if (!musb->gadget_driver) { | 1566 | if (!musb->gadget_driver) { |
1588 | DBG(5, "No gadget driver loaded\n"); | 1567 | dev_dbg(musb->controller, "No gadget driver loaded\n"); |
1589 | return IRQ_HANDLED; | 1568 | return IRQ_HANDLED; |
1590 | } | 1569 | } |
1591 | #endif | 1570 | #endif |
@@ -1650,7 +1629,7 @@ irqreturn_t musb_interrupt(struct musb *musb) | |||
1650 | 1629 | ||
1651 | return retval; | 1630 | return retval; |
1652 | } | 1631 | } |
1653 | 1632 | EXPORT_SYMBOL_GPL(musb_interrupt); | |
1654 | 1633 | ||
1655 | #ifndef CONFIG_MUSB_PIO_ONLY | 1634 | #ifndef CONFIG_MUSB_PIO_ONLY |
1656 | static int __initdata use_dma = 1; | 1635 | static int __initdata use_dma = 1; |
@@ -1714,7 +1693,7 @@ musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
1714 | int ret = -EINVAL; | 1693 | int ret = -EINVAL; |
1715 | 1694 | ||
1716 | spin_lock_irqsave(&musb->lock, flags); | 1695 | spin_lock_irqsave(&musb->lock, flags); |
1717 | ret = sprintf(buf, "%s\n", otg_state_string(musb)); | 1696 | ret = sprintf(buf, "%s\n", otg_state_string(musb->xceiv->state)); |
1718 | spin_unlock_irqrestore(&musb->lock, flags); | 1697 | spin_unlock_irqrestore(&musb->lock, flags); |
1719 | 1698 | ||
1720 | return ret; | 1699 | return ret; |
@@ -1867,6 +1846,7 @@ allocate_instance(struct device *dev, | |||
1867 | INIT_LIST_HEAD(&musb->out_bulk); | 1846 | INIT_LIST_HEAD(&musb->out_bulk); |
1868 | 1847 | ||
1869 | hcd->uses_new_polling = 1; | 1848 | hcd->uses_new_polling = 1; |
1849 | hcd->has_tt = 1; | ||
1870 | 1850 | ||
1871 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | 1851 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; |
1872 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; | 1852 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; |
@@ -1874,10 +1854,9 @@ allocate_instance(struct device *dev, | |||
1874 | musb = kzalloc(sizeof *musb, GFP_KERNEL); | 1854 | musb = kzalloc(sizeof *musb, GFP_KERNEL); |
1875 | if (!musb) | 1855 | if (!musb) |
1876 | return NULL; | 1856 | return NULL; |
1877 | dev_set_drvdata(dev, musb); | ||
1878 | 1857 | ||
1879 | #endif | 1858 | #endif |
1880 | 1859 | dev_set_drvdata(dev, musb); | |
1881 | musb->mregs = mbase; | 1860 | musb->mregs = mbase; |
1882 | musb->ctrl_base = mbase; | 1861 | musb->ctrl_base = mbase; |
1883 | musb->nIrq = -ENODEV; | 1862 | musb->nIrq = -ENODEV; |
@@ -1891,6 +1870,7 @@ allocate_instance(struct device *dev, | |||
1891 | } | 1870 | } |
1892 | 1871 | ||
1893 | musb->controller = dev; | 1872 | musb->controller = dev; |
1873 | |||
1894 | return musb; | 1874 | return musb; |
1895 | } | 1875 | } |
1896 | 1876 | ||
@@ -1921,10 +1901,6 @@ static void musb_free(struct musb *musb) | |||
1921 | dma_controller_destroy(c); | 1901 | dma_controller_destroy(c); |
1922 | } | 1902 | } |
1923 | 1903 | ||
1924 | #ifdef CONFIG_USB_MUSB_OTG | ||
1925 | put_device(musb->xceiv->dev); | ||
1926 | #endif | ||
1927 | |||
1928 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | 1904 | #ifdef CONFIG_USB_MUSB_HDRC_HCD |
1929 | usb_put_hcd(musb_to_hcd(musb)); | 1905 | usb_put_hcd(musb_to_hcd(musb)); |
1930 | #else | 1906 | #else |
@@ -1956,31 +1932,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
1956 | goto fail0; | 1932 | goto fail0; |
1957 | } | 1933 | } |
1958 | 1934 | ||
1959 | switch (plat->mode) { | ||
1960 | case MUSB_HOST: | ||
1961 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1962 | break; | ||
1963 | #else | ||
1964 | goto bad_config; | ||
1965 | #endif | ||
1966 | case MUSB_PERIPHERAL: | ||
1967 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
1968 | break; | ||
1969 | #else | ||
1970 | goto bad_config; | ||
1971 | #endif | ||
1972 | case MUSB_OTG: | ||
1973 | #ifdef CONFIG_USB_MUSB_OTG | ||
1974 | break; | ||
1975 | #else | ||
1976 | bad_config: | ||
1977 | #endif | ||
1978 | default: | ||
1979 | dev_err(dev, "incompatible Kconfig role setting\n"); | ||
1980 | status = -EINVAL; | ||
1981 | goto fail0; | ||
1982 | } | ||
1983 | |||
1984 | /* allocate */ | 1935 | /* allocate */ |
1985 | musb = allocate_instance(dev, plat->config, ctrl); | 1936 | musb = allocate_instance(dev, plat->config, ctrl); |
1986 | if (!musb) { | 1937 | if (!musb) { |
@@ -1988,33 +1939,21 @@ bad_config: | |||
1988 | goto fail0; | 1939 | goto fail0; |
1989 | } | 1940 | } |
1990 | 1941 | ||
1942 | pm_runtime_use_autosuspend(musb->controller); | ||
1943 | pm_runtime_set_autosuspend_delay(musb->controller, 200); | ||
1944 | pm_runtime_enable(musb->controller); | ||
1945 | |||
1991 | spin_lock_init(&musb->lock); | 1946 | spin_lock_init(&musb->lock); |
1992 | musb->board_mode = plat->mode; | 1947 | musb->board_mode = plat->mode; |
1993 | musb->board_set_power = plat->set_power; | 1948 | musb->board_set_power = plat->set_power; |
1994 | musb->set_clock = plat->set_clock; | ||
1995 | musb->min_power = plat->min_power; | 1949 | musb->min_power = plat->min_power; |
1996 | 1950 | musb->ops = plat->platform_ops; | |
1997 | /* Clock usage is chip-specific ... functional clock (DaVinci, | ||
1998 | * OMAP2430), or PHY ref (some TUSB6010 boards). All this core | ||
1999 | * code does is make sure a clock handle is available; platform | ||
2000 | * code manages it during start/stop and suspend/resume. | ||
2001 | */ | ||
2002 | if (plat->clock) { | ||
2003 | musb->clock = clk_get(dev, plat->clock); | ||
2004 | if (IS_ERR(musb->clock)) { | ||
2005 | status = PTR_ERR(musb->clock); | ||
2006 | musb->clock = NULL; | ||
2007 | goto fail1; | ||
2008 | } | ||
2009 | } | ||
2010 | 1951 | ||
2011 | /* The musb_platform_init() call: | 1952 | /* The musb_platform_init() call: |
2012 | * - adjusts musb->mregs and musb->isr if needed, | 1953 | * - adjusts musb->mregs and musb->isr if needed, |
2013 | * - may initialize an integrated tranceiver | 1954 | * - may initialize an integrated tranceiver |
2014 | * - initializes musb->xceiv, usually by otg_get_transceiver() | 1955 | * - initializes musb->xceiv, usually by otg_get_transceiver() |
2015 | * - activates clocks. | ||
2016 | * - stops powering VBUS | 1956 | * - stops powering VBUS |
2017 | * - assigns musb->board_set_vbus if host mode is enabled | ||
2018 | * | 1957 | * |
2019 | * There are various transciever configurations. Blackfin, | 1958 | * There are various transciever configurations. Blackfin, |
2020 | * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses | 1959 | * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses |
@@ -2022,9 +1961,9 @@ bad_config: | |||
2022 | * isp1504, non-OTG, etc) mostly hooking up through ULPI. | 1961 | * isp1504, non-OTG, etc) mostly hooking up through ULPI. |
2023 | */ | 1962 | */ |
2024 | musb->isr = generic_interrupt; | 1963 | musb->isr = generic_interrupt; |
2025 | status = musb_platform_init(musb, plat->board_data); | 1964 | status = musb_platform_init(musb); |
2026 | if (status < 0) | 1965 | if (status < 0) |
2027 | goto fail2; | 1966 | goto fail1; |
2028 | 1967 | ||
2029 | if (!musb->isr) { | 1968 | if (!musb->isr) { |
2030 | status = -ENODEV; | 1969 | status = -ENODEV; |
@@ -2107,13 +2046,16 @@ bad_config: | |||
2107 | * Otherwise, wait till the gadget driver hooks up. | 2046 | * Otherwise, wait till the gadget driver hooks up. |
2108 | */ | 2047 | */ |
2109 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) { | 2048 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) { |
2049 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
2050 | |||
2110 | MUSB_HST_MODE(musb); | 2051 | MUSB_HST_MODE(musb); |
2111 | musb->xceiv->default_a = 1; | 2052 | musb->xceiv->default_a = 1; |
2112 | musb->xceiv->state = OTG_STATE_A_IDLE; | 2053 | musb->xceiv->state = OTG_STATE_A_IDLE; |
2113 | 2054 | ||
2114 | status = usb_add_hcd(musb_to_hcd(musb), -1, 0); | 2055 | status = usb_add_hcd(musb_to_hcd(musb), -1, 0); |
2115 | 2056 | ||
2116 | DBG(1, "%s mode, status %d, devctl %02x %c\n", | 2057 | hcd->self.uses_pio_for_control = 1; |
2058 | dev_dbg(musb->controller, "%s mode, status %d, devctl %02x %c\n", | ||
2117 | "HOST", status, | 2059 | "HOST", status, |
2118 | musb_readb(musb->mregs, MUSB_DEVCTL), | 2060 | musb_readb(musb->mregs, MUSB_DEVCTL), |
2119 | (musb_readb(musb->mregs, MUSB_DEVCTL) | 2061 | (musb_readb(musb->mregs, MUSB_DEVCTL) |
@@ -2127,7 +2069,7 @@ bad_config: | |||
2127 | 2069 | ||
2128 | status = musb_gadget_setup(musb); | 2070 | status = musb_gadget_setup(musb); |
2129 | 2071 | ||
2130 | DBG(1, "%s mode, status %d, dev%02x\n", | 2072 | dev_dbg(musb->controller, "%s mode, status %d, dev%02x\n", |
2131 | is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", | 2073 | is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", |
2132 | status, | 2074 | status, |
2133 | musb_readb(musb->mregs, MUSB_DEVCTL)); | 2075 | musb_readb(musb->mregs, MUSB_DEVCTL)); |
@@ -2136,6 +2078,8 @@ bad_config: | |||
2136 | if (status < 0) | 2078 | if (status < 0) |
2137 | goto fail3; | 2079 | goto fail3; |
2138 | 2080 | ||
2081 | pm_runtime_put(musb->controller); | ||
2082 | |||
2139 | status = musb_init_debugfs(musb); | 2083 | status = musb_init_debugfs(musb); |
2140 | if (status < 0) | 2084 | if (status < 0) |
2141 | goto fail4; | 2085 | goto fail4; |
@@ -2174,10 +2118,6 @@ fail3: | |||
2174 | device_init_wakeup(dev, 0); | 2118 | device_init_wakeup(dev, 0); |
2175 | musb_platform_exit(musb); | 2119 | musb_platform_exit(musb); |
2176 | 2120 | ||
2177 | fail2: | ||
2178 | if (musb->clock) | ||
2179 | clk_put(musb->clock); | ||
2180 | |||
2181 | fail1: | 2121 | fail1: |
2182 | dev_err(musb->controller, | 2122 | dev_err(musb->controller, |
2183 | "musb_init_controller failed with status %d\n", status); | 2123 | "musb_init_controller failed with status %d\n", status); |
@@ -2203,13 +2143,13 @@ static u64 *orig_dma_mask; | |||
2203 | static int __init musb_probe(struct platform_device *pdev) | 2143 | static int __init musb_probe(struct platform_device *pdev) |
2204 | { | 2144 | { |
2205 | struct device *dev = &pdev->dev; | 2145 | struct device *dev = &pdev->dev; |
2206 | int irq = platform_get_irq(pdev, 0); | 2146 | int irq = platform_get_irq_byname(pdev, "mc"); |
2207 | int status; | 2147 | int status; |
2208 | struct resource *iomem; | 2148 | struct resource *iomem; |
2209 | void __iomem *base; | 2149 | void __iomem *base; |
2210 | 2150 | ||
2211 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2151 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2212 | if (!iomem || irq == 0) | 2152 | if (!iomem || irq <= 0) |
2213 | return -ENODEV; | 2153 | return -ENODEV; |
2214 | 2154 | ||
2215 | base = ioremap(iomem->start, resource_size(iomem)); | 2155 | base = ioremap(iomem->start, resource_size(iomem)); |
@@ -2239,16 +2179,11 @@ static int __exit musb_remove(struct platform_device *pdev) | |||
2239 | * - Peripheral mode: peripheral is deactivated (or never-activated) | 2179 | * - Peripheral mode: peripheral is deactivated (or never-activated) |
2240 | * - OTG mode: both roles are deactivated (or never-activated) | 2180 | * - OTG mode: both roles are deactivated (or never-activated) |
2241 | */ | 2181 | */ |
2182 | pm_runtime_get_sync(musb->controller); | ||
2242 | musb_exit_debugfs(musb); | 2183 | musb_exit_debugfs(musb); |
2243 | musb_shutdown(pdev); | 2184 | musb_shutdown(pdev); |
2244 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
2245 | if (musb->board_mode == MUSB_HOST) | ||
2246 | usb_remove_hcd(musb_to_hcd(musb)); | ||
2247 | #endif | ||
2248 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
2249 | musb_platform_exit(musb); | ||
2250 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
2251 | 2185 | ||
2186 | pm_runtime_put(musb->controller); | ||
2252 | musb_free(musb); | 2187 | musb_free(musb); |
2253 | iounmap(ctrl_base); | 2188 | iounmap(ctrl_base); |
2254 | device_init_wakeup(&pdev->dev, 0); | 2189 | device_init_wakeup(&pdev->dev, 0); |
@@ -2260,148 +2195,140 @@ static int __exit musb_remove(struct platform_device *pdev) | |||
2260 | 2195 | ||
2261 | #ifdef CONFIG_PM | 2196 | #ifdef CONFIG_PM |
2262 | 2197 | ||
2263 | static struct musb_context_registers musb_context; | 2198 | static void musb_save_context(struct musb *musb) |
2264 | |||
2265 | void musb_save_context(struct musb *musb) | ||
2266 | { | 2199 | { |
2267 | int i; | 2200 | int i; |
2268 | void __iomem *musb_base = musb->mregs; | 2201 | void __iomem *musb_base = musb->mregs; |
2202 | void __iomem *epio; | ||
2269 | 2203 | ||
2270 | if (is_host_enabled(musb)) { | 2204 | if (is_host_enabled(musb)) { |
2271 | musb_context.frame = musb_readw(musb_base, MUSB_FRAME); | 2205 | musb->context.frame = musb_readw(musb_base, MUSB_FRAME); |
2272 | musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE); | 2206 | musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE); |
2273 | musb_context.busctl = musb_read_ulpi_buscontrol(musb->mregs); | 2207 | musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs); |
2274 | } | 2208 | } |
2275 | musb_context.power = musb_readb(musb_base, MUSB_POWER); | 2209 | musb->context.power = musb_readb(musb_base, MUSB_POWER); |
2276 | musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); | 2210 | musb->context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); |
2277 | musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE); | 2211 | musb->context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE); |
2278 | musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE); | 2212 | musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE); |
2279 | musb_context.index = musb_readb(musb_base, MUSB_INDEX); | 2213 | musb->context.index = musb_readb(musb_base, MUSB_INDEX); |
2280 | musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL); | 2214 | musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL); |
2281 | 2215 | ||
2282 | for (i = 0; i < MUSB_C_NUM_EPS; ++i) { | 2216 | for (i = 0; i < musb->config->num_eps; ++i) { |
2283 | musb_writeb(musb_base, MUSB_INDEX, i); | 2217 | epio = musb->endpoints[i].regs; |
2284 | musb_context.index_regs[i].txmaxp = | 2218 | musb->context.index_regs[i].txmaxp = |
2285 | musb_readw(musb_base, 0x10 + MUSB_TXMAXP); | 2219 | musb_readw(epio, MUSB_TXMAXP); |
2286 | musb_context.index_regs[i].txcsr = | 2220 | musb->context.index_regs[i].txcsr = |
2287 | musb_readw(musb_base, 0x10 + MUSB_TXCSR); | 2221 | musb_readw(epio, MUSB_TXCSR); |
2288 | musb_context.index_regs[i].rxmaxp = | 2222 | musb->context.index_regs[i].rxmaxp = |
2289 | musb_readw(musb_base, 0x10 + MUSB_RXMAXP); | 2223 | musb_readw(epio, MUSB_RXMAXP); |
2290 | musb_context.index_regs[i].rxcsr = | 2224 | musb->context.index_regs[i].rxcsr = |
2291 | musb_readw(musb_base, 0x10 + MUSB_RXCSR); | 2225 | musb_readw(epio, MUSB_RXCSR); |
2292 | 2226 | ||
2293 | if (musb->dyn_fifo) { | 2227 | if (musb->dyn_fifo) { |
2294 | musb_context.index_regs[i].txfifoadd = | 2228 | musb->context.index_regs[i].txfifoadd = |
2295 | musb_read_txfifoadd(musb_base); | 2229 | musb_read_txfifoadd(musb_base); |
2296 | musb_context.index_regs[i].rxfifoadd = | 2230 | musb->context.index_regs[i].rxfifoadd = |
2297 | musb_read_rxfifoadd(musb_base); | 2231 | musb_read_rxfifoadd(musb_base); |
2298 | musb_context.index_regs[i].txfifosz = | 2232 | musb->context.index_regs[i].txfifosz = |
2299 | musb_read_txfifosz(musb_base); | 2233 | musb_read_txfifosz(musb_base); |
2300 | musb_context.index_regs[i].rxfifosz = | 2234 | musb->context.index_regs[i].rxfifosz = |
2301 | musb_read_rxfifosz(musb_base); | 2235 | musb_read_rxfifosz(musb_base); |
2302 | } | 2236 | } |
2303 | if (is_host_enabled(musb)) { | 2237 | if (is_host_enabled(musb)) { |
2304 | musb_context.index_regs[i].txtype = | 2238 | musb->context.index_regs[i].txtype = |
2305 | musb_readb(musb_base, 0x10 + MUSB_TXTYPE); | 2239 | musb_readb(epio, MUSB_TXTYPE); |
2306 | musb_context.index_regs[i].txinterval = | 2240 | musb->context.index_regs[i].txinterval = |
2307 | musb_readb(musb_base, 0x10 + MUSB_TXINTERVAL); | 2241 | musb_readb(epio, MUSB_TXINTERVAL); |
2308 | musb_context.index_regs[i].rxtype = | 2242 | musb->context.index_regs[i].rxtype = |
2309 | musb_readb(musb_base, 0x10 + MUSB_RXTYPE); | 2243 | musb_readb(epio, MUSB_RXTYPE); |
2310 | musb_context.index_regs[i].rxinterval = | 2244 | musb->context.index_regs[i].rxinterval = |
2311 | musb_readb(musb_base, 0x10 + MUSB_RXINTERVAL); | 2245 | musb_readb(epio, MUSB_RXINTERVAL); |
2312 | 2246 | ||
2313 | musb_context.index_regs[i].txfunaddr = | 2247 | musb->context.index_regs[i].txfunaddr = |
2314 | musb_read_txfunaddr(musb_base, i); | 2248 | musb_read_txfunaddr(musb_base, i); |
2315 | musb_context.index_regs[i].txhubaddr = | 2249 | musb->context.index_regs[i].txhubaddr = |
2316 | musb_read_txhubaddr(musb_base, i); | 2250 | musb_read_txhubaddr(musb_base, i); |
2317 | musb_context.index_regs[i].txhubport = | 2251 | musb->context.index_regs[i].txhubport = |
2318 | musb_read_txhubport(musb_base, i); | 2252 | musb_read_txhubport(musb_base, i); |
2319 | 2253 | ||
2320 | musb_context.index_regs[i].rxfunaddr = | 2254 | musb->context.index_regs[i].rxfunaddr = |
2321 | musb_read_rxfunaddr(musb_base, i); | 2255 | musb_read_rxfunaddr(musb_base, i); |
2322 | musb_context.index_regs[i].rxhubaddr = | 2256 | musb->context.index_regs[i].rxhubaddr = |
2323 | musb_read_rxhubaddr(musb_base, i); | 2257 | musb_read_rxhubaddr(musb_base, i); |
2324 | musb_context.index_regs[i].rxhubport = | 2258 | musb->context.index_regs[i].rxhubport = |
2325 | musb_read_rxhubport(musb_base, i); | 2259 | musb_read_rxhubport(musb_base, i); |
2326 | } | 2260 | } |
2327 | } | 2261 | } |
2328 | |||
2329 | musb_writeb(musb_base, MUSB_INDEX, musb_context.index); | ||
2330 | |||
2331 | musb_platform_save_context(musb, &musb_context); | ||
2332 | } | 2262 | } |
2333 | 2263 | ||
2334 | void musb_restore_context(struct musb *musb) | 2264 | static void musb_restore_context(struct musb *musb) |
2335 | { | 2265 | { |
2336 | int i; | 2266 | int i; |
2337 | void __iomem *musb_base = musb->mregs; | 2267 | void __iomem *musb_base = musb->mregs; |
2338 | void __iomem *ep_target_regs; | 2268 | void __iomem *ep_target_regs; |
2339 | 2269 | void __iomem *epio; | |
2340 | musb_platform_restore_context(musb, &musb_context); | ||
2341 | 2270 | ||
2342 | if (is_host_enabled(musb)) { | 2271 | if (is_host_enabled(musb)) { |
2343 | musb_writew(musb_base, MUSB_FRAME, musb_context.frame); | 2272 | musb_writew(musb_base, MUSB_FRAME, musb->context.frame); |
2344 | musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode); | 2273 | musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode); |
2345 | musb_write_ulpi_buscontrol(musb->mregs, musb_context.busctl); | 2274 | musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl); |
2346 | } | 2275 | } |
2347 | musb_writeb(musb_base, MUSB_POWER, musb_context.power); | 2276 | musb_writeb(musb_base, MUSB_POWER, musb->context.power); |
2348 | musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe); | 2277 | musb_writew(musb_base, MUSB_INTRTXE, musb->context.intrtxe); |
2349 | musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe); | 2278 | musb_writew(musb_base, MUSB_INTRRXE, musb->context.intrrxe); |
2350 | musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe); | 2279 | musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); |
2351 | musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl); | 2280 | musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); |
2352 | 2281 | ||
2353 | for (i = 0; i < MUSB_C_NUM_EPS; ++i) { | 2282 | for (i = 0; i < musb->config->num_eps; ++i) { |
2354 | musb_writeb(musb_base, MUSB_INDEX, i); | 2283 | epio = musb->endpoints[i].regs; |
2355 | musb_writew(musb_base, 0x10 + MUSB_TXMAXP, | 2284 | musb_writew(epio, MUSB_TXMAXP, |
2356 | musb_context.index_regs[i].txmaxp); | 2285 | musb->context.index_regs[i].txmaxp); |
2357 | musb_writew(musb_base, 0x10 + MUSB_TXCSR, | 2286 | musb_writew(epio, MUSB_TXCSR, |
2358 | musb_context.index_regs[i].txcsr); | 2287 | musb->context.index_regs[i].txcsr); |
2359 | musb_writew(musb_base, 0x10 + MUSB_RXMAXP, | 2288 | musb_writew(epio, MUSB_RXMAXP, |
2360 | musb_context.index_regs[i].rxmaxp); | 2289 | musb->context.index_regs[i].rxmaxp); |
2361 | musb_writew(musb_base, 0x10 + MUSB_RXCSR, | 2290 | musb_writew(epio, MUSB_RXCSR, |
2362 | musb_context.index_regs[i].rxcsr); | 2291 | musb->context.index_regs[i].rxcsr); |
2363 | 2292 | ||
2364 | if (musb->dyn_fifo) { | 2293 | if (musb->dyn_fifo) { |
2365 | musb_write_txfifosz(musb_base, | 2294 | musb_write_txfifosz(musb_base, |
2366 | musb_context.index_regs[i].txfifosz); | 2295 | musb->context.index_regs[i].txfifosz); |
2367 | musb_write_rxfifosz(musb_base, | 2296 | musb_write_rxfifosz(musb_base, |
2368 | musb_context.index_regs[i].rxfifosz); | 2297 | musb->context.index_regs[i].rxfifosz); |
2369 | musb_write_txfifoadd(musb_base, | 2298 | musb_write_txfifoadd(musb_base, |
2370 | musb_context.index_regs[i].txfifoadd); | 2299 | musb->context.index_regs[i].txfifoadd); |
2371 | musb_write_rxfifoadd(musb_base, | 2300 | musb_write_rxfifoadd(musb_base, |
2372 | musb_context.index_regs[i].rxfifoadd); | 2301 | musb->context.index_regs[i].rxfifoadd); |
2373 | } | 2302 | } |
2374 | 2303 | ||
2375 | if (is_host_enabled(musb)) { | 2304 | if (is_host_enabled(musb)) { |
2376 | musb_writeb(musb_base, 0x10 + MUSB_TXTYPE, | 2305 | musb_writeb(epio, MUSB_TXTYPE, |
2377 | musb_context.index_regs[i].txtype); | 2306 | musb->context.index_regs[i].txtype); |
2378 | musb_writeb(musb_base, 0x10 + MUSB_TXINTERVAL, | 2307 | musb_writeb(epio, MUSB_TXINTERVAL, |
2379 | musb_context.index_regs[i].txinterval); | 2308 | musb->context.index_regs[i].txinterval); |
2380 | musb_writeb(musb_base, 0x10 + MUSB_RXTYPE, | 2309 | musb_writeb(epio, MUSB_RXTYPE, |
2381 | musb_context.index_regs[i].rxtype); | 2310 | musb->context.index_regs[i].rxtype); |
2382 | musb_writeb(musb_base, 0x10 + MUSB_RXINTERVAL, | 2311 | musb_writeb(epio, MUSB_RXINTERVAL, |
2383 | 2312 | ||
2384 | musb_context.index_regs[i].rxinterval); | 2313 | musb->context.index_regs[i].rxinterval); |
2385 | musb_write_txfunaddr(musb_base, i, | 2314 | musb_write_txfunaddr(musb_base, i, |
2386 | musb_context.index_regs[i].txfunaddr); | 2315 | musb->context.index_regs[i].txfunaddr); |
2387 | musb_write_txhubaddr(musb_base, i, | 2316 | musb_write_txhubaddr(musb_base, i, |
2388 | musb_context.index_regs[i].txhubaddr); | 2317 | musb->context.index_regs[i].txhubaddr); |
2389 | musb_write_txhubport(musb_base, i, | 2318 | musb_write_txhubport(musb_base, i, |
2390 | musb_context.index_regs[i].txhubport); | 2319 | musb->context.index_regs[i].txhubport); |
2391 | 2320 | ||
2392 | ep_target_regs = | 2321 | ep_target_regs = |
2393 | musb_read_target_reg_base(i, musb_base); | 2322 | musb_read_target_reg_base(i, musb_base); |
2394 | 2323 | ||
2395 | musb_write_rxfunaddr(ep_target_regs, | 2324 | musb_write_rxfunaddr(ep_target_regs, |
2396 | musb_context.index_regs[i].rxfunaddr); | 2325 | musb->context.index_regs[i].rxfunaddr); |
2397 | musb_write_rxhubaddr(ep_target_regs, | 2326 | musb_write_rxhubaddr(ep_target_regs, |
2398 | musb_context.index_regs[i].rxhubaddr); | 2327 | musb->context.index_regs[i].rxhubaddr); |
2399 | musb_write_rxhubport(ep_target_regs, | 2328 | musb_write_rxhubport(ep_target_regs, |
2400 | musb_context.index_regs[i].rxhubport); | 2329 | musb->context.index_regs[i].rxhubport); |
2401 | } | 2330 | } |
2402 | } | 2331 | } |
2403 | |||
2404 | musb_writeb(musb_base, MUSB_INDEX, musb_context.index); | ||
2405 | } | 2332 | } |
2406 | 2333 | ||
2407 | static int musb_suspend(struct device *dev) | 2334 | static int musb_suspend(struct device *dev) |
@@ -2410,9 +2337,6 @@ static int musb_suspend(struct device *dev) | |||
2410 | unsigned long flags; | 2337 | unsigned long flags; |
2411 | struct musb *musb = dev_to_musb(&pdev->dev); | 2338 | struct musb *musb = dev_to_musb(&pdev->dev); |
2412 | 2339 | ||
2413 | if (!musb->clock) | ||
2414 | return 0; | ||
2415 | |||
2416 | spin_lock_irqsave(&musb->lock, flags); | 2340 | spin_lock_irqsave(&musb->lock, flags); |
2417 | 2341 | ||
2418 | if (is_peripheral_active(musb)) { | 2342 | if (is_peripheral_active(musb)) { |
@@ -2427,10 +2351,6 @@ static int musb_suspend(struct device *dev) | |||
2427 | 2351 | ||
2428 | musb_save_context(musb); | 2352 | musb_save_context(musb); |
2429 | 2353 | ||
2430 | if (musb->set_clock) | ||
2431 | musb->set_clock(musb->clock, 0); | ||
2432 | else | ||
2433 | clk_disable(musb->clock); | ||
2434 | spin_unlock_irqrestore(&musb->lock, flags); | 2354 | spin_unlock_irqrestore(&musb->lock, flags); |
2435 | return 0; | 2355 | return 0; |
2436 | } | 2356 | } |
@@ -2440,14 +2360,6 @@ static int musb_resume_noirq(struct device *dev) | |||
2440 | struct platform_device *pdev = to_platform_device(dev); | 2360 | struct platform_device *pdev = to_platform_device(dev); |
2441 | struct musb *musb = dev_to_musb(&pdev->dev); | 2361 | struct musb *musb = dev_to_musb(&pdev->dev); |
2442 | 2362 | ||
2443 | if (!musb->clock) | ||
2444 | return 0; | ||
2445 | |||
2446 | if (musb->set_clock) | ||
2447 | musb->set_clock(musb->clock, 1); | ||
2448 | else | ||
2449 | clk_enable(musb->clock); | ||
2450 | |||
2451 | musb_restore_context(musb); | 2363 | musb_restore_context(musb); |
2452 | 2364 | ||
2453 | /* for static cmos like DaVinci, register values were preserved | 2365 | /* for static cmos like DaVinci, register values were preserved |
@@ -2457,9 +2369,41 @@ static int musb_resume_noirq(struct device *dev) | |||
2457 | return 0; | 2369 | return 0; |
2458 | } | 2370 | } |
2459 | 2371 | ||
2372 | static int musb_runtime_suspend(struct device *dev) | ||
2373 | { | ||
2374 | struct musb *musb = dev_to_musb(dev); | ||
2375 | |||
2376 | musb_save_context(musb); | ||
2377 | |||
2378 | return 0; | ||
2379 | } | ||
2380 | |||
2381 | static int musb_runtime_resume(struct device *dev) | ||
2382 | { | ||
2383 | struct musb *musb = dev_to_musb(dev); | ||
2384 | static int first = 1; | ||
2385 | |||
2386 | /* | ||
2387 | * When pm_runtime_get_sync called for the first time in driver | ||
2388 | * init, some of the structure is still not initialized which is | ||
2389 | * used in restore function. But clock needs to be | ||
2390 | * enabled before any register access, so | ||
2391 | * pm_runtime_get_sync has to be called. | ||
2392 | * Also context restore without save does not make | ||
2393 | * any sense | ||
2394 | */ | ||
2395 | if (!first) | ||
2396 | musb_restore_context(musb); | ||
2397 | first = 0; | ||
2398 | |||
2399 | return 0; | ||
2400 | } | ||
2401 | |||
2460 | static const struct dev_pm_ops musb_dev_pm_ops = { | 2402 | static const struct dev_pm_ops musb_dev_pm_ops = { |
2461 | .suspend = musb_suspend, | 2403 | .suspend = musb_suspend, |
2462 | .resume_noirq = musb_resume_noirq, | 2404 | .resume_noirq = musb_resume_noirq, |
2405 | .runtime_suspend = musb_runtime_suspend, | ||
2406 | .runtime_resume = musb_runtime_resume, | ||
2463 | }; | 2407 | }; |
2464 | 2408 | ||
2465 | #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops) | 2409 | #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops) |
@@ -2496,6 +2440,8 @@ static int __init musb_init(void) | |||
2496 | "musb-dma" | 2440 | "musb-dma" |
2497 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) | 2441 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) |
2498 | "tusb-omap-dma" | 2442 | "tusb-omap-dma" |
2443 | #elif defined(CONFIG_USB_UX500_DMA) | ||
2444 | "ux500-dma" | ||
2499 | #else | 2445 | #else |
2500 | "?dma?" | 2446 | "?dma?" |
2501 | #endif | 2447 | #endif |
@@ -2507,8 +2453,8 @@ static int __init musb_init(void) | |||
2507 | #elif defined(CONFIG_USB_MUSB_HDRC_HCD) | 2453 | #elif defined(CONFIG_USB_MUSB_HDRC_HCD) |
2508 | "host" | 2454 | "host" |
2509 | #endif | 2455 | #endif |
2510 | ", debug=%d\n", | 2456 | , |
2511 | musb_driver_name, musb_debug); | 2457 | musb_driver_name); |
2512 | return platform_driver_probe(&musb_driver, musb_probe); | 2458 | return platform_driver_probe(&musb_driver, musb_probe); |
2513 | } | 2459 | } |
2514 | 2460 | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 91d67794e350..0e053b587960 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -212,8 +212,8 @@ enum musb_g_ep0_state { | |||
212 | * directly with the "flat" model, or after setting up an index register. | 212 | * directly with the "flat" model, or after setting up an index register. |
213 | */ | 213 | */ |
214 | 214 | ||
215 | #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \ | 215 | #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \ |
216 | || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \ | 216 | || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \ |
217 | || defined(CONFIG_ARCH_OMAP4) | 217 | || defined(CONFIG_ARCH_OMAP4) |
218 | /* REVISIT indexed access seemed to | 218 | /* REVISIT indexed access seemed to |
219 | * misbehave (on DaVinci) for at least peripheral IN ... | 219 | * misbehave (on DaVinci) for at least peripheral IN ... |
@@ -222,7 +222,7 @@ enum musb_g_ep0_state { | |||
222 | #endif | 222 | #endif |
223 | 223 | ||
224 | /* TUSB mapping: "flat" plus ep0 special cases */ | 224 | /* TUSB mapping: "flat" plus ep0 special cases */ |
225 | #if defined(CONFIG_USB_TUSB6010) | 225 | #if defined(CONFIG_USB_MUSB_TUSB6010) |
226 | #define musb_ep_select(_mbase, _epnum) \ | 226 | #define musb_ep_select(_mbase, _epnum) \ |
227 | musb_writeb((_mbase), MUSB_INDEX, (_epnum)) | 227 | musb_writeb((_mbase), MUSB_INDEX, (_epnum)) |
228 | #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET | 228 | #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET |
@@ -253,6 +253,34 @@ enum musb_g_ep0_state { | |||
253 | 253 | ||
254 | /******************************** TYPES *************************************/ | 254 | /******************************** TYPES *************************************/ |
255 | 255 | ||
256 | /** | ||
257 | * struct musb_platform_ops - Operations passed to musb_core by HW glue layer | ||
258 | * @init: turns on clocks, sets up platform-specific registers, etc | ||
259 | * @exit: undoes @init | ||
260 | * @set_mode: forcefully changes operating mode | ||
261 | * @try_ilde: tries to idle the IP | ||
262 | * @vbus_status: returns vbus status if possible | ||
263 | * @set_vbus: forces vbus status | ||
264 | * @channel_program: pre check for standard dma channel_program func | ||
265 | */ | ||
266 | struct musb_platform_ops { | ||
267 | int (*init)(struct musb *musb); | ||
268 | int (*exit)(struct musb *musb); | ||
269 | |||
270 | void (*enable)(struct musb *musb); | ||
271 | void (*disable)(struct musb *musb); | ||
272 | |||
273 | int (*set_mode)(struct musb *musb, u8 mode); | ||
274 | void (*try_idle)(struct musb *musb, unsigned long timeout); | ||
275 | |||
276 | int (*vbus_status)(struct musb *musb); | ||
277 | void (*set_vbus)(struct musb *musb, int on); | ||
278 | |||
279 | int (*adjust_channel_params)(struct dma_channel *channel, | ||
280 | u16 packet_sz, u8 *mode, | ||
281 | dma_addr_t *dma_addr, u32 *len); | ||
282 | }; | ||
283 | |||
256 | /* | 284 | /* |
257 | * struct musb_hw_ep - endpoint hardware (bidirectional) | 285 | * struct musb_hw_ep - endpoint hardware (bidirectional) |
258 | * | 286 | * |
@@ -263,7 +291,7 @@ struct musb_hw_ep { | |||
263 | void __iomem *fifo; | 291 | void __iomem *fifo; |
264 | void __iomem *regs; | 292 | void __iomem *regs; |
265 | 293 | ||
266 | #ifdef CONFIG_USB_TUSB6010 | 294 | #ifdef CONFIG_USB_MUSB_TUSB6010 |
267 | void __iomem *conf; | 295 | void __iomem *conf; |
268 | #endif | 296 | #endif |
269 | 297 | ||
@@ -280,7 +308,7 @@ struct musb_hw_ep { | |||
280 | struct dma_channel *tx_channel; | 308 | struct dma_channel *tx_channel; |
281 | struct dma_channel *rx_channel; | 309 | struct dma_channel *rx_channel; |
282 | 310 | ||
283 | #ifdef CONFIG_USB_TUSB6010 | 311 | #ifdef CONFIG_USB_MUSB_TUSB6010 |
284 | /* TUSB has "asynchronous" and "synchronous" dma modes */ | 312 | /* TUSB has "asynchronous" and "synchronous" dma modes */ |
285 | dma_addr_t fifo_async; | 313 | dma_addr_t fifo_async; |
286 | dma_addr_t fifo_sync; | 314 | dma_addr_t fifo_sync; |
@@ -305,7 +333,7 @@ struct musb_hw_ep { | |||
305 | #endif | 333 | #endif |
306 | }; | 334 | }; |
307 | 335 | ||
308 | static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep) | 336 | static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep) |
309 | { | 337 | { |
310 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | 338 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC |
311 | return next_request(&hw_ep->ep_in); | 339 | return next_request(&hw_ep->ep_in); |
@@ -314,7 +342,7 @@ static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep) | |||
314 | #endif | 342 | #endif |
315 | } | 343 | } |
316 | 344 | ||
317 | static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep) | 345 | static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep) |
318 | { | 346 | { |
319 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | 347 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC |
320 | return next_request(&hw_ep->ep_out); | 348 | return next_request(&hw_ep->ep_out); |
@@ -323,13 +351,39 @@ static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep) | |||
323 | #endif | 351 | #endif |
324 | } | 352 | } |
325 | 353 | ||
354 | struct musb_csr_regs { | ||
355 | /* FIFO registers */ | ||
356 | u16 txmaxp, txcsr, rxmaxp, rxcsr; | ||
357 | u16 rxfifoadd, txfifoadd; | ||
358 | u8 txtype, txinterval, rxtype, rxinterval; | ||
359 | u8 rxfifosz, txfifosz; | ||
360 | u8 txfunaddr, txhubaddr, txhubport; | ||
361 | u8 rxfunaddr, rxhubaddr, rxhubport; | ||
362 | }; | ||
363 | |||
364 | struct musb_context_registers { | ||
365 | |||
366 | u8 power; | ||
367 | u16 intrtxe, intrrxe; | ||
368 | u8 intrusbe; | ||
369 | u16 frame; | ||
370 | u8 index, testmode; | ||
371 | |||
372 | u8 devctl, busctl, misc; | ||
373 | |||
374 | struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; | ||
375 | }; | ||
376 | |||
326 | /* | 377 | /* |
327 | * struct musb - Driver instance data. | 378 | * struct musb - Driver instance data. |
328 | */ | 379 | */ |
329 | struct musb { | 380 | struct musb { |
330 | /* device lock */ | 381 | /* device lock */ |
331 | spinlock_t lock; | 382 | spinlock_t lock; |
332 | struct clk *clock; | 383 | |
384 | const struct musb_platform_ops *ops; | ||
385 | struct musb_context_registers context; | ||
386 | |||
333 | irqreturn_t (*isr)(int, void *); | 387 | irqreturn_t (*isr)(int, void *); |
334 | struct work_struct irq_work; | 388 | struct work_struct irq_work; |
335 | u16 hwvers; | 389 | u16 hwvers; |
@@ -358,11 +412,7 @@ struct musb { | |||
358 | 412 | ||
359 | struct timer_list otg_timer; | 413 | struct timer_list otg_timer; |
360 | #endif | 414 | #endif |
361 | 415 | struct notifier_block nb; | |
362 | /* called with IRQs blocked; ON/nonzero implies starting a session, | ||
363 | * and waiting at least a_wait_vrise_tmout. | ||
364 | */ | ||
365 | void (*board_set_vbus)(struct musb *, int is_on); | ||
366 | 416 | ||
367 | struct dma_controller *dma_controller; | 417 | struct dma_controller *dma_controller; |
368 | 418 | ||
@@ -370,7 +420,7 @@ struct musb { | |||
370 | void __iomem *ctrl_base; | 420 | void __iomem *ctrl_base; |
371 | void __iomem *mregs; | 421 | void __iomem *mregs; |
372 | 422 | ||
373 | #ifdef CONFIG_USB_TUSB6010 | 423 | #ifdef CONFIG_USB_MUSB_TUSB6010 |
374 | dma_addr_t async; | 424 | dma_addr_t async; |
375 | dma_addr_t sync; | 425 | dma_addr_t sync; |
376 | void __iomem *sync_va; | 426 | void __iomem *sync_va; |
@@ -397,8 +447,6 @@ struct musb { | |||
397 | u8 board_mode; /* enum musb_mode */ | 447 | u8 board_mode; /* enum musb_mode */ |
398 | int (*board_set_power)(int state); | 448 | int (*board_set_power)(int state); |
399 | 449 | ||
400 | int (*set_clock)(struct clk *clk, int is_active); | ||
401 | |||
402 | u8 min_power; /* vbus for periph, in mA/2 */ | 450 | u8 min_power; /* vbus for periph, in mA/2 */ |
403 | 451 | ||
404 | bool is_host; | 452 | bool is_host; |
@@ -450,6 +498,19 @@ struct musb { | |||
450 | struct usb_gadget_driver *gadget_driver; /* its driver */ | 498 | struct usb_gadget_driver *gadget_driver; /* its driver */ |
451 | #endif | 499 | #endif |
452 | 500 | ||
501 | /* | ||
502 | * FIXME: Remove this flag. | ||
503 | * | ||
504 | * This is only added to allow Blackfin to work | ||
505 | * with current driver. For some unknown reason | ||
506 | * Blackfin doesn't work with double buffering | ||
507 | * and that's enabled by default. | ||
508 | * | ||
509 | * We added this flag to forcefully disable double | ||
510 | * buffering until we get it working. | ||
511 | */ | ||
512 | unsigned double_buffer_not_ok:1 __deprecated; | ||
513 | |||
453 | struct musb_hdrc_config *config; | 514 | struct musb_hdrc_config *config; |
454 | 515 | ||
455 | #ifdef MUSB_CONFIG_PROC_FS | 516 | #ifdef MUSB_CONFIG_PROC_FS |
@@ -457,52 +518,6 @@ struct musb { | |||
457 | #endif | 518 | #endif |
458 | }; | 519 | }; |
459 | 520 | ||
460 | #ifdef CONFIG_PM | ||
461 | struct musb_csr_regs { | ||
462 | /* FIFO registers */ | ||
463 | u16 txmaxp, txcsr, rxmaxp, rxcsr; | ||
464 | u16 rxfifoadd, txfifoadd; | ||
465 | u8 txtype, txinterval, rxtype, rxinterval; | ||
466 | u8 rxfifosz, txfifosz; | ||
467 | u8 txfunaddr, txhubaddr, txhubport; | ||
468 | u8 rxfunaddr, rxhubaddr, rxhubport; | ||
469 | }; | ||
470 | |||
471 | struct musb_context_registers { | ||
472 | |||
473 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ | ||
474 | defined(CONFIG_ARCH_OMAP4) | ||
475 | u32 otg_sysconfig, otg_forcestandby; | ||
476 | #endif | ||
477 | u8 power; | ||
478 | u16 intrtxe, intrrxe; | ||
479 | u8 intrusbe; | ||
480 | u16 frame; | ||
481 | u8 index, testmode; | ||
482 | |||
483 | u8 devctl, busctl, misc; | ||
484 | |||
485 | struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; | ||
486 | }; | ||
487 | |||
488 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ | ||
489 | defined(CONFIG_ARCH_OMAP4) | ||
490 | extern void musb_platform_save_context(struct musb *musb, | ||
491 | struct musb_context_registers *musb_context); | ||
492 | extern void musb_platform_restore_context(struct musb *musb, | ||
493 | struct musb_context_registers *musb_context); | ||
494 | #else | ||
495 | #define musb_platform_save_context(m, x) do {} while (0) | ||
496 | #define musb_platform_restore_context(m, x) do {} while (0) | ||
497 | #endif | ||
498 | |||
499 | #endif | ||
500 | |||
501 | static inline void musb_set_vbus(struct musb *musb, int is_on) | ||
502 | { | ||
503 | musb->board_set_vbus(musb, is_on); | ||
504 | } | ||
505 | |||
506 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | 521 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC |
507 | static inline struct musb *gadget_to_musb(struct usb_gadget *g) | 522 | static inline struct musb *gadget_to_musb(struct usb_gadget *g) |
508 | { | 523 | { |
@@ -591,28 +606,63 @@ extern void musb_load_testpacket(struct musb *); | |||
591 | 606 | ||
592 | extern irqreturn_t musb_interrupt(struct musb *); | 607 | extern irqreturn_t musb_interrupt(struct musb *); |
593 | 608 | ||
594 | extern void musb_platform_enable(struct musb *musb); | ||
595 | extern void musb_platform_disable(struct musb *musb); | ||
596 | |||
597 | extern void musb_hnp_stop(struct musb *musb); | 609 | extern void musb_hnp_stop(struct musb *musb); |
598 | 610 | ||
599 | extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode); | 611 | static inline void musb_platform_set_vbus(struct musb *musb, int is_on) |
612 | { | ||
613 | if (musb->ops->set_vbus) | ||
614 | musb->ops->set_vbus(musb, is_on); | ||
615 | } | ||
600 | 616 | ||
601 | #if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \ | 617 | static inline void musb_platform_enable(struct musb *musb) |
602 | defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ | 618 | { |
603 | defined(CONFIG_ARCH_OMAP4) | 619 | if (musb->ops->enable) |
604 | extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout); | 620 | musb->ops->enable(musb); |
605 | #else | 621 | } |
606 | #define musb_platform_try_idle(x, y) do {} while (0) | ||
607 | #endif | ||
608 | 622 | ||
609 | #if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) | 623 | static inline void musb_platform_disable(struct musb *musb) |
610 | extern int musb_platform_get_vbus_status(struct musb *musb); | 624 | { |
611 | #else | 625 | if (musb->ops->disable) |
612 | #define musb_platform_get_vbus_status(x) 0 | 626 | musb->ops->disable(musb); |
613 | #endif | 627 | } |
628 | |||
629 | static inline int musb_platform_set_mode(struct musb *musb, u8 mode) | ||
630 | { | ||
631 | if (!musb->ops->set_mode) | ||
632 | return 0; | ||
633 | |||
634 | return musb->ops->set_mode(musb, mode); | ||
635 | } | ||
636 | |||
637 | static inline void musb_platform_try_idle(struct musb *musb, | ||
638 | unsigned long timeout) | ||
639 | { | ||
640 | if (musb->ops->try_idle) | ||
641 | musb->ops->try_idle(musb, timeout); | ||
642 | } | ||
614 | 643 | ||
615 | extern int __init musb_platform_init(struct musb *musb, void *board_data); | 644 | static inline int musb_platform_get_vbus_status(struct musb *musb) |
616 | extern int musb_platform_exit(struct musb *musb); | 645 | { |
646 | if (!musb->ops->vbus_status) | ||
647 | return 0; | ||
648 | |||
649 | return musb->ops->vbus_status(musb); | ||
650 | } | ||
651 | |||
652 | static inline int musb_platform_init(struct musb *musb) | ||
653 | { | ||
654 | if (!musb->ops->init) | ||
655 | return -EINVAL; | ||
656 | |||
657 | return musb->ops->init(musb); | ||
658 | } | ||
659 | |||
660 | static inline int musb_platform_exit(struct musb *musb) | ||
661 | { | ||
662 | if (!musb->ops->exit) | ||
663 | return -EINVAL; | ||
664 | |||
665 | return musb->ops->exit(musb); | ||
666 | } | ||
617 | 667 | ||
618 | #endif /* __MUSB_CORE_H__ */ | 668 | #endif /* __MUSB_CORE_H__ */ |
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h index d73afdbde3ee..742eada5002e 100644 --- a/drivers/usb/musb/musb_debug.h +++ b/drivers/usb/musb/musb_debug.h | |||
@@ -42,23 +42,6 @@ | |||
42 | #define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) | 42 | #define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) |
43 | #define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) | 43 | #define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) |
44 | 44 | ||
45 | #define xprintk(level, facility, format, args...) do { \ | ||
46 | if (_dbg_level(level)) { \ | ||
47 | printk(facility "%s %d: " format , \ | ||
48 | __func__, __LINE__ , ## args); \ | ||
49 | } } while (0) | ||
50 | |||
51 | extern unsigned musb_debug; | ||
52 | |||
53 | static inline int _dbg_level(unsigned l) | ||
54 | { | ||
55 | return musb_debug >= l; | ||
56 | } | ||
57 | |||
58 | #define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args) | ||
59 | |||
60 | extern const char *otg_state_string(struct musb *); | ||
61 | |||
62 | #ifdef CONFIG_DEBUG_FS | 45 | #ifdef CONFIG_DEBUG_FS |
63 | extern int musb_init_debugfs(struct musb *musb); | 46 | extern int musb_init_debugfs(struct musb *musb); |
64 | extern void musb_exit_debugfs(struct musb *musb); | 47 | extern void musb_exit_debugfs(struct musb *musb); |
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 9e8639d4e862..b0176e4569e0 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
37 | #include <linux/init.h> | 37 | #include <linux/init.h> |
38 | #include <linux/list.h> | 38 | #include <linux/list.h> |
39 | #include <linux/kobject.h> | ||
40 | #include <linux/platform_device.h> | 39 | #include <linux/platform_device.h> |
41 | #include <linux/io.h> | 40 | #include <linux/io.h> |
42 | #include <linux/debugfs.h> | 41 | #include <linux/debugfs.h> |
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h index 916065ba9e70..3a97c4e2d4f5 100644 --- a/drivers/usb/musb/musb_dma.h +++ b/drivers/usb/musb/musb_dma.h | |||
@@ -169,6 +169,9 @@ struct dma_controller { | |||
169 | dma_addr_t dma_addr, | 169 | dma_addr_t dma_addr, |
170 | u32 length); | 170 | u32 length); |
171 | int (*channel_abort)(struct dma_channel *); | 171 | int (*channel_abort)(struct dma_channel *); |
172 | int (*is_compatible)(struct dma_channel *channel, | ||
173 | u16 maxpacket, | ||
174 | void *buf, u32 length); | ||
172 | }; | 175 | }; |
173 | 176 | ||
174 | /* called after channel_program(), may indicate a fault */ | 177 | /* called after channel_program(), may indicate a fault */ |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index d065e23f123e..6aeb363e63e7 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -92,6 +92,84 @@ | |||
92 | 92 | ||
93 | /* ----------------------------------------------------------------------- */ | 93 | /* ----------------------------------------------------------------------- */ |
94 | 94 | ||
95 | #define is_buffer_mapped(req) (is_dma_capable() && \ | ||
96 | (req->map_state != UN_MAPPED)) | ||
97 | |||
98 | /* Maps the buffer to dma */ | ||
99 | |||
100 | static inline void map_dma_buffer(struct musb_request *request, | ||
101 | struct musb *musb, struct musb_ep *musb_ep) | ||
102 | { | ||
103 | int compatible = true; | ||
104 | struct dma_controller *dma = musb->dma_controller; | ||
105 | |||
106 | request->map_state = UN_MAPPED; | ||
107 | |||
108 | if (!is_dma_capable() || !musb_ep->dma) | ||
109 | return; | ||
110 | |||
111 | /* Check if DMA engine can handle this request. | ||
112 | * DMA code must reject the USB request explicitly. | ||
113 | * Default behaviour is to map the request. | ||
114 | */ | ||
115 | if (dma->is_compatible) | ||
116 | compatible = dma->is_compatible(musb_ep->dma, | ||
117 | musb_ep->packet_sz, request->request.buf, | ||
118 | request->request.length); | ||
119 | if (!compatible) | ||
120 | return; | ||
121 | |||
122 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
123 | request->request.dma = dma_map_single( | ||
124 | musb->controller, | ||
125 | request->request.buf, | ||
126 | request->request.length, | ||
127 | request->tx | ||
128 | ? DMA_TO_DEVICE | ||
129 | : DMA_FROM_DEVICE); | ||
130 | request->map_state = MUSB_MAPPED; | ||
131 | } else { | ||
132 | dma_sync_single_for_device(musb->controller, | ||
133 | request->request.dma, | ||
134 | request->request.length, | ||
135 | request->tx | ||
136 | ? DMA_TO_DEVICE | ||
137 | : DMA_FROM_DEVICE); | ||
138 | request->map_state = PRE_MAPPED; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | /* Unmap the buffer from dma and maps it back to cpu */ | ||
143 | static inline void unmap_dma_buffer(struct musb_request *request, | ||
144 | struct musb *musb) | ||
145 | { | ||
146 | if (!is_buffer_mapped(request)) | ||
147 | return; | ||
148 | |||
149 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
150 | dev_vdbg(musb->controller, | ||
151 | "not unmapping a never mapped buffer\n"); | ||
152 | return; | ||
153 | } | ||
154 | if (request->map_state == MUSB_MAPPED) { | ||
155 | dma_unmap_single(musb->controller, | ||
156 | request->request.dma, | ||
157 | request->request.length, | ||
158 | request->tx | ||
159 | ? DMA_TO_DEVICE | ||
160 | : DMA_FROM_DEVICE); | ||
161 | request->request.dma = DMA_ADDR_INVALID; | ||
162 | } else { /* PRE_MAPPED */ | ||
163 | dma_sync_single_for_cpu(musb->controller, | ||
164 | request->request.dma, | ||
165 | request->request.length, | ||
166 | request->tx | ||
167 | ? DMA_TO_DEVICE | ||
168 | : DMA_FROM_DEVICE); | ||
169 | } | ||
170 | request->map_state = UN_MAPPED; | ||
171 | } | ||
172 | |||
95 | /* | 173 | /* |
96 | * Immediately complete a request. | 174 | * Immediately complete a request. |
97 | * | 175 | * |
@@ -112,37 +190,20 @@ __acquires(ep->musb->lock) | |||
112 | 190 | ||
113 | req = to_musb_request(request); | 191 | req = to_musb_request(request); |
114 | 192 | ||
115 | list_del(&request->list); | 193 | list_del(&req->list); |
116 | if (req->request.status == -EINPROGRESS) | 194 | if (req->request.status == -EINPROGRESS) |
117 | req->request.status = status; | 195 | req->request.status = status; |
118 | musb = req->musb; | 196 | musb = req->musb; |
119 | 197 | ||
120 | ep->busy = 1; | 198 | ep->busy = 1; |
121 | spin_unlock(&musb->lock); | 199 | spin_unlock(&musb->lock); |
122 | if (is_dma_capable()) { | 200 | unmap_dma_buffer(req, musb); |
123 | if (req->mapped) { | ||
124 | dma_unmap_single(musb->controller, | ||
125 | req->request.dma, | ||
126 | req->request.length, | ||
127 | req->tx | ||
128 | ? DMA_TO_DEVICE | ||
129 | : DMA_FROM_DEVICE); | ||
130 | req->request.dma = DMA_ADDR_INVALID; | ||
131 | req->mapped = 0; | ||
132 | } else if (req->request.dma != DMA_ADDR_INVALID) | ||
133 | dma_sync_single_for_cpu(musb->controller, | ||
134 | req->request.dma, | ||
135 | req->request.length, | ||
136 | req->tx | ||
137 | ? DMA_TO_DEVICE | ||
138 | : DMA_FROM_DEVICE); | ||
139 | } | ||
140 | if (request->status == 0) | 201 | if (request->status == 0) |
141 | DBG(5, "%s done request %p, %d/%d\n", | 202 | dev_dbg(musb->controller, "%s done request %p, %d/%d\n", |
142 | ep->end_point.name, request, | 203 | ep->end_point.name, request, |
143 | req->request.actual, req->request.length); | 204 | req->request.actual, req->request.length); |
144 | else | 205 | else |
145 | DBG(2, "%s request %p, %d/%d fault %d\n", | 206 | dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", |
146 | ep->end_point.name, request, | 207 | ep->end_point.name, request, |
147 | req->request.actual, req->request.length, | 208 | req->request.actual, req->request.length, |
148 | request->status); | 209 | request->status); |
@@ -159,6 +220,7 @@ __acquires(ep->musb->lock) | |||
159 | */ | 220 | */ |
160 | static void nuke(struct musb_ep *ep, const int status) | 221 | static void nuke(struct musb_ep *ep, const int status) |
161 | { | 222 | { |
223 | struct musb *musb = ep->musb; | ||
162 | struct musb_request *req = NULL; | 224 | struct musb_request *req = NULL; |
163 | void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; | 225 | void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; |
164 | 226 | ||
@@ -186,14 +248,14 @@ static void nuke(struct musb_ep *ep, const int status) | |||
186 | } | 248 | } |
187 | 249 | ||
188 | value = c->channel_abort(ep->dma); | 250 | value = c->channel_abort(ep->dma); |
189 | DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); | 251 | dev_dbg(musb->controller, "%s: abort DMA --> %d\n", |
252 | ep->name, value); | ||
190 | c->channel_release(ep->dma); | 253 | c->channel_release(ep->dma); |
191 | ep->dma = NULL; | 254 | ep->dma = NULL; |
192 | } | 255 | } |
193 | 256 | ||
194 | while (!list_empty(&(ep->req_list))) { | 257 | while (!list_empty(&ep->req_list)) { |
195 | req = container_of(ep->req_list.next, struct musb_request, | 258 | req = list_first_entry(&ep->req_list, struct musb_request, list); |
196 | request.list); | ||
197 | musb_g_giveback(ep, &req->request, status); | 259 | musb_g_giveback(ep, &req->request, status); |
198 | } | 260 | } |
199 | } | 261 | } |
@@ -270,7 +332,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
270 | 332 | ||
271 | /* we shouldn't get here while DMA is active ... but we do ... */ | 333 | /* we shouldn't get here while DMA is active ... but we do ... */ |
272 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | 334 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
273 | DBG(4, "dma pending...\n"); | 335 | dev_dbg(musb->controller, "dma pending...\n"); |
274 | return; | 336 | return; |
275 | } | 337 | } |
276 | 338 | ||
@@ -282,23 +344,23 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
282 | (int)(request->length - request->actual)); | 344 | (int)(request->length - request->actual)); |
283 | 345 | ||
284 | if (csr & MUSB_TXCSR_TXPKTRDY) { | 346 | if (csr & MUSB_TXCSR_TXPKTRDY) { |
285 | DBG(5, "%s old packet still ready , txcsr %03x\n", | 347 | dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", |
286 | musb_ep->end_point.name, csr); | 348 | musb_ep->end_point.name, csr); |
287 | return; | 349 | return; |
288 | } | 350 | } |
289 | 351 | ||
290 | if (csr & MUSB_TXCSR_P_SENDSTALL) { | 352 | if (csr & MUSB_TXCSR_P_SENDSTALL) { |
291 | DBG(5, "%s stalling, txcsr %03x\n", | 353 | dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", |
292 | musb_ep->end_point.name, csr); | 354 | musb_ep->end_point.name, csr); |
293 | return; | 355 | return; |
294 | } | 356 | } |
295 | 357 | ||
296 | DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", | 358 | dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", |
297 | epnum, musb_ep->packet_sz, fifo_count, | 359 | epnum, musb_ep->packet_sz, fifo_count, |
298 | csr); | 360 | csr); |
299 | 361 | ||
300 | #ifndef CONFIG_MUSB_PIO_ONLY | 362 | #ifndef CONFIG_MUSB_PIO_ONLY |
301 | if (is_dma_capable() && musb_ep->dma) { | 363 | if (is_buffer_mapped(req)) { |
302 | struct dma_controller *c = musb->dma_controller; | 364 | struct dma_controller *c = musb->dma_controller; |
303 | size_t request_size; | 365 | size_t request_size; |
304 | 366 | ||
@@ -310,7 +372,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
310 | 372 | ||
311 | /* MUSB_TXCSR_P_ISO is still set correctly */ | 373 | /* MUSB_TXCSR_P_ISO is still set correctly */ |
312 | 374 | ||
313 | #ifdef CONFIG_USB_INVENTRA_DMA | 375 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) |
314 | { | 376 | { |
315 | if (request_size < musb_ep->packet_sz) | 377 | if (request_size < musb_ep->packet_sz) |
316 | musb_ep->dma->desired_mode = 0; | 378 | musb_ep->dma->desired_mode = 0; |
@@ -337,13 +399,15 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
337 | csr |= (MUSB_TXCSR_DMAENAB | | 399 | csr |= (MUSB_TXCSR_DMAENAB | |
338 | MUSB_TXCSR_MODE); | 400 | MUSB_TXCSR_MODE); |
339 | /* against programming guide */ | 401 | /* against programming guide */ |
340 | } else | 402 | } else { |
341 | csr |= (MUSB_TXCSR_AUTOSET | 403 | csr |= (MUSB_TXCSR_DMAENAB |
342 | | MUSB_TXCSR_DMAENAB | ||
343 | | MUSB_TXCSR_DMAMODE | 404 | | MUSB_TXCSR_DMAMODE |
344 | | MUSB_TXCSR_MODE); | 405 | | MUSB_TXCSR_MODE); |
345 | 406 | if (!musb_ep->hb_mult) | |
407 | csr |= MUSB_TXCSR_AUTOSET; | ||
408 | } | ||
346 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | 409 | csr &= ~MUSB_TXCSR_P_UNDERRUN; |
410 | |||
347 | musb_writew(epio, MUSB_TXCSR, csr); | 411 | musb_writew(epio, MUSB_TXCSR, csr); |
348 | } | 412 | } |
349 | } | 413 | } |
@@ -393,6 +457,12 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
393 | #endif | 457 | #endif |
394 | 458 | ||
395 | if (!use_dma) { | 459 | if (!use_dma) { |
460 | /* | ||
461 | * Unmap the dma buffer back to cpu if dma channel | ||
462 | * programming fails | ||
463 | */ | ||
464 | unmap_dma_buffer(req, musb); | ||
465 | |||
396 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | 466 | musb_write_fifo(musb_ep->hw_ep, fifo_count, |
397 | (u8 *) (request->buf + request->actual)); | 467 | (u8 *) (request->buf + request->actual)); |
398 | request->actual += fifo_count; | 468 | request->actual += fifo_count; |
@@ -402,7 +472,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
402 | } | 472 | } |
403 | 473 | ||
404 | /* host may already have the data when this message shows... */ | 474 | /* host may already have the data when this message shows... */ |
405 | DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", | 475 | dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", |
406 | musb_ep->end_point.name, use_dma ? "dma" : "pio", | 476 | musb_ep->end_point.name, use_dma ? "dma" : "pio", |
407 | request->actual, request->length, | 477 | request->actual, request->length, |
408 | musb_readw(epio, MUSB_TXCSR), | 478 | musb_readw(epio, MUSB_TXCSR), |
@@ -417,6 +487,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
417 | void musb_g_tx(struct musb *musb, u8 epnum) | 487 | void musb_g_tx(struct musb *musb, u8 epnum) |
418 | { | 488 | { |
419 | u16 csr; | 489 | u16 csr; |
490 | struct musb_request *req; | ||
420 | struct usb_request *request; | 491 | struct usb_request *request; |
421 | u8 __iomem *mbase = musb->mregs; | 492 | u8 __iomem *mbase = musb->mregs; |
422 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; | 493 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; |
@@ -424,10 +495,11 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
424 | struct dma_channel *dma; | 495 | struct dma_channel *dma; |
425 | 496 | ||
426 | musb_ep_select(mbase, epnum); | 497 | musb_ep_select(mbase, epnum); |
427 | request = next_request(musb_ep); | 498 | req = next_request(musb_ep); |
499 | request = &req->request; | ||
428 | 500 | ||
429 | csr = musb_readw(epio, MUSB_TXCSR); | 501 | csr = musb_readw(epio, MUSB_TXCSR); |
430 | DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); | 502 | dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); |
431 | 503 | ||
432 | dma = is_dma_capable() ? musb_ep->dma : NULL; | 504 | dma = is_dma_capable() ? musb_ep->dma : NULL; |
433 | 505 | ||
@@ -447,7 +519,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
447 | csr |= MUSB_TXCSR_P_WZC_BITS; | 519 | csr |= MUSB_TXCSR_P_WZC_BITS; |
448 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); | 520 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); |
449 | musb_writew(epio, MUSB_TXCSR, csr); | 521 | musb_writew(epio, MUSB_TXCSR, csr); |
450 | DBG(20, "underrun on ep%d, req %p\n", epnum, request); | 522 | dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", |
523 | epnum, request); | ||
451 | } | 524 | } |
452 | 525 | ||
453 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 526 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
@@ -455,7 +528,7 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
455 | * SHOULD NOT HAPPEN... has with CPPI though, after | 528 | * SHOULD NOT HAPPEN... has with CPPI though, after |
456 | * changing SENDSTALL (and other cases); harmless? | 529 | * changing SENDSTALL (and other cases); harmless? |
457 | */ | 530 | */ |
458 | DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); | 531 | dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); |
459 | return; | 532 | return; |
460 | } | 533 | } |
461 | 534 | ||
@@ -466,53 +539,52 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
466 | is_dma = 1; | 539 | is_dma = 1; |
467 | csr |= MUSB_TXCSR_P_WZC_BITS; | 540 | csr |= MUSB_TXCSR_P_WZC_BITS; |
468 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | | 541 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | |
469 | MUSB_TXCSR_TXPKTRDY); | 542 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); |
470 | musb_writew(epio, MUSB_TXCSR, csr); | 543 | musb_writew(epio, MUSB_TXCSR, csr); |
471 | /* Ensure writebuffer is empty. */ | 544 | /* Ensure writebuffer is empty. */ |
472 | csr = musb_readw(epio, MUSB_TXCSR); | 545 | csr = musb_readw(epio, MUSB_TXCSR); |
473 | request->actual += musb_ep->dma->actual_len; | 546 | request->actual += musb_ep->dma->actual_len; |
474 | DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n", | 547 | dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", |
475 | epnum, csr, musb_ep->dma->actual_len, request); | 548 | epnum, csr, musb_ep->dma->actual_len, request); |
476 | } | 549 | } |
477 | 550 | ||
478 | if (is_dma || request->actual == request->length) { | 551 | /* |
552 | * First, maybe a terminating short packet. Some DMA | ||
553 | * engines might handle this by themselves. | ||
554 | */ | ||
555 | if ((request->zero && request->length | ||
556 | && (request->length % musb_ep->packet_sz == 0) | ||
557 | && (request->actual == request->length)) | ||
558 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) | ||
559 | || (is_dma && (!dma->desired_mode || | ||
560 | (request->actual & | ||
561 | (musb_ep->packet_sz - 1)))) | ||
562 | #endif | ||
563 | ) { | ||
479 | /* | 564 | /* |
480 | * First, maybe a terminating short packet. Some DMA | 565 | * On DMA completion, FIFO may not be |
481 | * engines might handle this by themselves. | 566 | * available yet... |
482 | */ | 567 | */ |
483 | if ((request->zero && request->length | 568 | if (csr & MUSB_TXCSR_TXPKTRDY) |
484 | && request->length % musb_ep->packet_sz == 0) | 569 | return; |
485 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
486 | || (is_dma && (!dma->desired_mode || | ||
487 | (request->actual & | ||
488 | (musb_ep->packet_sz - 1)))) | ||
489 | #endif | ||
490 | ) { | ||
491 | /* | ||
492 | * On DMA completion, FIFO may not be | ||
493 | * available yet... | ||
494 | */ | ||
495 | if (csr & MUSB_TXCSR_TXPKTRDY) | ||
496 | return; | ||
497 | 570 | ||
498 | DBG(4, "sending zero pkt\n"); | 571 | dev_dbg(musb->controller, "sending zero pkt\n"); |
499 | musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE | 572 | musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE |
500 | | MUSB_TXCSR_TXPKTRDY); | 573 | | MUSB_TXCSR_TXPKTRDY); |
501 | request->zero = 0; | 574 | request->zero = 0; |
502 | } | 575 | } |
503 | 576 | ||
504 | if (request->actual == request->length) { | 577 | if (request->actual == request->length) { |
505 | musb_g_giveback(musb_ep, request, 0); | 578 | musb_g_giveback(musb_ep, request, 0); |
506 | request = musb_ep->desc ? next_request(musb_ep) : NULL; | 579 | req = musb_ep->desc ? next_request(musb_ep) : NULL; |
507 | if (!request) { | 580 | if (!req) { |
508 | DBG(4, "%s idle now\n", | 581 | dev_dbg(musb->controller, "%s idle now\n", |
509 | musb_ep->end_point.name); | 582 | musb_ep->end_point.name); |
510 | return; | 583 | return; |
511 | } | ||
512 | } | 584 | } |
513 | } | 585 | } |
514 | 586 | ||
515 | txstate(musb, to_musb_request(request)); | 587 | txstate(musb, req); |
516 | } | 588 | } |
517 | } | 589 | } |
518 | 590 | ||
@@ -572,17 +644,17 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
572 | 644 | ||
573 | /* We shouldn't get here while DMA is active, but we do... */ | 645 | /* We shouldn't get here while DMA is active, but we do... */ |
574 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | 646 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
575 | DBG(4, "DMA pending...\n"); | 647 | dev_dbg(musb->controller, "DMA pending...\n"); |
576 | return; | 648 | return; |
577 | } | 649 | } |
578 | 650 | ||
579 | if (csr & MUSB_RXCSR_P_SENDSTALL) { | 651 | if (csr & MUSB_RXCSR_P_SENDSTALL) { |
580 | DBG(5, "%s stalling, RXCSR %04x\n", | 652 | dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", |
581 | musb_ep->end_point.name, csr); | 653 | musb_ep->end_point.name, csr); |
582 | return; | 654 | return; |
583 | } | 655 | } |
584 | 656 | ||
585 | if (is_cppi_enabled() && musb_ep->dma) { | 657 | if (is_cppi_enabled() && is_buffer_mapped(req)) { |
586 | struct dma_controller *c = musb->dma_controller; | 658 | struct dma_controller *c = musb->dma_controller; |
587 | struct dma_channel *channel = musb_ep->dma; | 659 | struct dma_channel *channel = musb_ep->dma; |
588 | 660 | ||
@@ -613,7 +685,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
613 | len = musb_readw(epio, MUSB_RXCOUNT); | 685 | len = musb_readw(epio, MUSB_RXCOUNT); |
614 | if (request->actual < request->length) { | 686 | if (request->actual < request->length) { |
615 | #ifdef CONFIG_USB_INVENTRA_DMA | 687 | #ifdef CONFIG_USB_INVENTRA_DMA |
616 | if (is_dma_capable() && musb_ep->dma) { | 688 | if (is_buffer_mapped(req)) { |
617 | struct dma_controller *c; | 689 | struct dma_controller *c; |
618 | struct dma_channel *channel; | 690 | struct dma_channel *channel; |
619 | int use_dma = 0; | 691 | int use_dma = 0; |
@@ -643,8 +715,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
643 | */ | 715 | */ |
644 | 716 | ||
645 | csr |= MUSB_RXCSR_DMAENAB; | 717 | csr |= MUSB_RXCSR_DMAENAB; |
646 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
647 | #ifdef USE_MODE1 | 718 | #ifdef USE_MODE1 |
719 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
648 | /* csr |= MUSB_RXCSR_DMAMODE; */ | 720 | /* csr |= MUSB_RXCSR_DMAMODE; */ |
649 | 721 | ||
650 | /* this special sequence (enabling and then | 722 | /* this special sequence (enabling and then |
@@ -653,6 +725,10 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
653 | */ | 725 | */ |
654 | musb_writew(epio, MUSB_RXCSR, | 726 | musb_writew(epio, MUSB_RXCSR, |
655 | csr | MUSB_RXCSR_DMAMODE); | 727 | csr | MUSB_RXCSR_DMAMODE); |
728 | #else | ||
729 | if (!musb_ep->hb_mult && | ||
730 | musb_ep->hw_ep->rx_double_buffered) | ||
731 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
656 | #endif | 732 | #endif |
657 | musb_writew(epio, MUSB_RXCSR, csr); | 733 | musb_writew(epio, MUSB_RXCSR, csr); |
658 | 734 | ||
@@ -682,10 +758,57 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
682 | if (use_dma) | 758 | if (use_dma) |
683 | return; | 759 | return; |
684 | } | 760 | } |
761 | #elif defined(CONFIG_USB_UX500_DMA) | ||
762 | if ((is_buffer_mapped(req)) && | ||
763 | (request->actual < request->length)) { | ||
764 | |||
765 | struct dma_controller *c; | ||
766 | struct dma_channel *channel; | ||
767 | int transfer_size = 0; | ||
768 | |||
769 | c = musb->dma_controller; | ||
770 | channel = musb_ep->dma; | ||
771 | |||
772 | /* In case first packet is short */ | ||
773 | if (len < musb_ep->packet_sz) | ||
774 | transfer_size = len; | ||
775 | else if (request->short_not_ok) | ||
776 | transfer_size = min(request->length - | ||
777 | request->actual, | ||
778 | channel->max_len); | ||
779 | else | ||
780 | transfer_size = min(request->length - | ||
781 | request->actual, | ||
782 | (unsigned)len); | ||
783 | |||
784 | csr &= ~MUSB_RXCSR_DMAMODE; | ||
785 | csr |= (MUSB_RXCSR_DMAENAB | | ||
786 | MUSB_RXCSR_AUTOCLEAR); | ||
787 | |||
788 | musb_writew(epio, MUSB_RXCSR, csr); | ||
789 | |||
790 | if (transfer_size <= musb_ep->packet_sz) { | ||
791 | musb_ep->dma->desired_mode = 0; | ||
792 | } else { | ||
793 | musb_ep->dma->desired_mode = 1; | ||
794 | /* Mode must be set after DMAENAB */ | ||
795 | csr |= MUSB_RXCSR_DMAMODE; | ||
796 | musb_writew(epio, MUSB_RXCSR, csr); | ||
797 | } | ||
798 | |||
799 | if (c->channel_program(channel, | ||
800 | musb_ep->packet_sz, | ||
801 | channel->desired_mode, | ||
802 | request->dma | ||
803 | + request->actual, | ||
804 | transfer_size)) | ||
805 | |||
806 | return; | ||
807 | } | ||
685 | #endif /* Mentor's DMA */ | 808 | #endif /* Mentor's DMA */ |
686 | 809 | ||
687 | fifo_count = request->length - request->actual; | 810 | fifo_count = request->length - request->actual; |
688 | DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", | 811 | dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", |
689 | musb_ep->end_point.name, | 812 | musb_ep->end_point.name, |
690 | len, fifo_count, | 813 | len, fifo_count, |
691 | musb_ep->packet_sz); | 814 | musb_ep->packet_sz); |
@@ -693,7 +816,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
693 | fifo_count = min_t(unsigned, len, fifo_count); | 816 | fifo_count = min_t(unsigned, len, fifo_count); |
694 | 817 | ||
695 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | 818 | #ifdef CONFIG_USB_TUSB_OMAP_DMA |
696 | if (tusb_dma_omap() && musb_ep->dma) { | 819 | if (tusb_dma_omap() && is_buffer_mapped(req)) { |
697 | struct dma_controller *c = musb->dma_controller; | 820 | struct dma_controller *c = musb->dma_controller; |
698 | struct dma_channel *channel = musb_ep->dma; | 821 | struct dma_channel *channel = musb_ep->dma; |
699 | u32 dma_addr = request->dma + request->actual; | 822 | u32 dma_addr = request->dma + request->actual; |
@@ -708,6 +831,21 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
708 | return; | 831 | return; |
709 | } | 832 | } |
710 | #endif | 833 | #endif |
834 | /* | ||
835 | * Unmap the dma buffer back to cpu if dma channel | ||
836 | * programming fails. This buffer is mapped if the | ||
837 | * channel allocation is successful | ||
838 | */ | ||
839 | if (is_buffer_mapped(req)) { | ||
840 | unmap_dma_buffer(req, musb); | ||
841 | |||
842 | /* | ||
843 | * Clear DMAENAB and AUTOCLEAR for the | ||
844 | * PIO mode transfer | ||
845 | */ | ||
846 | csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); | ||
847 | musb_writew(epio, MUSB_RXCSR, csr); | ||
848 | } | ||
711 | 849 | ||
712 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) | 850 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) |
713 | (request->buf + request->actual)); | 851 | (request->buf + request->actual)); |
@@ -735,6 +873,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
735 | void musb_g_rx(struct musb *musb, u8 epnum) | 873 | void musb_g_rx(struct musb *musb, u8 epnum) |
736 | { | 874 | { |
737 | u16 csr; | 875 | u16 csr; |
876 | struct musb_request *req; | ||
738 | struct usb_request *request; | 877 | struct usb_request *request; |
739 | void __iomem *mbase = musb->mregs; | 878 | void __iomem *mbase = musb->mregs; |
740 | struct musb_ep *musb_ep; | 879 | struct musb_ep *musb_ep; |
@@ -749,14 +888,16 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
749 | 888 | ||
750 | musb_ep_select(mbase, epnum); | 889 | musb_ep_select(mbase, epnum); |
751 | 890 | ||
752 | request = next_request(musb_ep); | 891 | req = next_request(musb_ep); |
753 | if (!request) | 892 | if (!req) |
754 | return; | 893 | return; |
755 | 894 | ||
895 | request = &req->request; | ||
896 | |||
756 | csr = musb_readw(epio, MUSB_RXCSR); | 897 | csr = musb_readw(epio, MUSB_RXCSR); |
757 | dma = is_dma_capable() ? musb_ep->dma : NULL; | 898 | dma = is_dma_capable() ? musb_ep->dma : NULL; |
758 | 899 | ||
759 | DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, | 900 | dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, |
760 | csr, dma ? " (dma)" : "", request); | 901 | csr, dma ? " (dma)" : "", request); |
761 | 902 | ||
762 | if (csr & MUSB_RXCSR_P_SENTSTALL) { | 903 | if (csr & MUSB_RXCSR_P_SENTSTALL) { |
@@ -771,19 +912,18 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
771 | csr &= ~MUSB_RXCSR_P_OVERRUN; | 912 | csr &= ~MUSB_RXCSR_P_OVERRUN; |
772 | musb_writew(epio, MUSB_RXCSR, csr); | 913 | musb_writew(epio, MUSB_RXCSR, csr); |
773 | 914 | ||
774 | DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); | 915 | dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); |
775 | if (request && request->status == -EINPROGRESS) | 916 | if (request->status == -EINPROGRESS) |
776 | request->status = -EOVERFLOW; | 917 | request->status = -EOVERFLOW; |
777 | } | 918 | } |
778 | if (csr & MUSB_RXCSR_INCOMPRX) { | 919 | if (csr & MUSB_RXCSR_INCOMPRX) { |
779 | /* REVISIT not necessarily an error */ | 920 | /* REVISIT not necessarily an error */ |
780 | DBG(4, "%s, incomprx\n", musb_ep->end_point.name); | 921 | dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); |
781 | } | 922 | } |
782 | 923 | ||
783 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 924 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
784 | /* "should not happen"; likely RXPKTRDY pending for DMA */ | 925 | /* "should not happen"; likely RXPKTRDY pending for DMA */ |
785 | DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, | 926 | dev_dbg(musb->controller, "%s busy, csr %04x\n", |
786 | "%s busy, csr %04x\n", | ||
787 | musb_ep->end_point.name, csr); | 927 | musb_ep->end_point.name, csr); |
788 | return; | 928 | return; |
789 | } | 929 | } |
@@ -797,14 +937,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
797 | 937 | ||
798 | request->actual += musb_ep->dma->actual_len; | 938 | request->actual += musb_ep->dma->actual_len; |
799 | 939 | ||
800 | DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", | 940 | dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", |
801 | epnum, csr, | 941 | epnum, csr, |
802 | musb_readw(epio, MUSB_RXCSR), | 942 | musb_readw(epio, MUSB_RXCSR), |
803 | musb_ep->dma->actual_len, request); | 943 | musb_ep->dma->actual_len, request); |
804 | 944 | ||
805 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) | 945 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ |
946 | defined(CONFIG_USB_UX500_DMA) | ||
806 | /* Autoclear doesn't clear RxPktRdy for short packets */ | 947 | /* Autoclear doesn't clear RxPktRdy for short packets */ |
807 | if ((dma->desired_mode == 0) | 948 | if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) |
808 | || (dma->actual_len | 949 | || (dma->actual_len |
809 | & (musb_ep->packet_sz - 1))) { | 950 | & (musb_ep->packet_sz - 1))) { |
810 | /* ack the read! */ | 951 | /* ack the read! */ |
@@ -815,24 +956,29 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
815 | /* incomplete, and not short? wait for next IN packet */ | 956 | /* incomplete, and not short? wait for next IN packet */ |
816 | if ((request->actual < request->length) | 957 | if ((request->actual < request->length) |
817 | && (musb_ep->dma->actual_len | 958 | && (musb_ep->dma->actual_len |
818 | == musb_ep->packet_sz)) | 959 | == musb_ep->packet_sz)) { |
960 | /* In double buffer case, continue to unload fifo if | ||
961 | * there is Rx packet in FIFO. | ||
962 | **/ | ||
963 | csr = musb_readw(epio, MUSB_RXCSR); | ||
964 | if ((csr & MUSB_RXCSR_RXPKTRDY) && | ||
965 | hw_ep->rx_double_buffered) | ||
966 | goto exit; | ||
819 | return; | 967 | return; |
968 | } | ||
820 | #endif | 969 | #endif |
821 | musb_g_giveback(musb_ep, request, 0); | 970 | musb_g_giveback(musb_ep, request, 0); |
822 | 971 | ||
823 | request = next_request(musb_ep); | 972 | req = next_request(musb_ep); |
824 | if (!request) | 973 | if (!req) |
825 | return; | 974 | return; |
826 | } | 975 | } |
827 | 976 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ | |
828 | /* analyze request if the ep is hot */ | 977 | defined(CONFIG_USB_UX500_DMA) |
829 | if (request) | 978 | exit: |
830 | rxstate(musb, to_musb_request(request)); | 979 | #endif |
831 | else | 980 | /* Analyze request */ |
832 | DBG(3, "packet waiting for %s%s request\n", | 981 | rxstate(musb, req); |
833 | musb_ep->desc ? "" : "inactive ", | ||
834 | musb_ep->end_point.name); | ||
835 | return; | ||
836 | } | 982 | } |
837 | 983 | ||
838 | /* ------------------------------------------------------------ */ | 984 | /* ------------------------------------------------------------ */ |
@@ -875,9 +1021,25 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
875 | 1021 | ||
876 | /* REVISIT this rules out high bandwidth periodic transfers */ | 1022 | /* REVISIT this rules out high bandwidth periodic transfers */ |
877 | tmp = le16_to_cpu(desc->wMaxPacketSize); | 1023 | tmp = le16_to_cpu(desc->wMaxPacketSize); |
878 | if (tmp & ~0x07ff) | 1024 | if (tmp & ~0x07ff) { |
879 | goto fail; | 1025 | int ok; |
880 | musb_ep->packet_sz = tmp; | 1026 | |
1027 | if (usb_endpoint_dir_in(desc)) | ||
1028 | ok = musb->hb_iso_tx; | ||
1029 | else | ||
1030 | ok = musb->hb_iso_rx; | ||
1031 | |||
1032 | if (!ok) { | ||
1033 | dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); | ||
1034 | goto fail; | ||
1035 | } | ||
1036 | musb_ep->hb_mult = (tmp >> 11) & 3; | ||
1037 | } else { | ||
1038 | musb_ep->hb_mult = 0; | ||
1039 | } | ||
1040 | |||
1041 | musb_ep->packet_sz = tmp & 0x7ff; | ||
1042 | tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); | ||
881 | 1043 | ||
882 | /* enable the interrupts for the endpoint, set the endpoint | 1044 | /* enable the interrupts for the endpoint, set the endpoint |
883 | * packet size (or fail), set the mode, clear the fifo | 1045 | * packet size (or fail), set the mode, clear the fifo |
@@ -890,8 +1052,11 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
890 | musb_ep->is_in = 1; | 1052 | musb_ep->is_in = 1; |
891 | if (!musb_ep->is_in) | 1053 | if (!musb_ep->is_in) |
892 | goto fail; | 1054 | goto fail; |
893 | if (tmp > hw_ep->max_packet_sz_tx) | 1055 | |
1056 | if (tmp > hw_ep->max_packet_sz_tx) { | ||
1057 | dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); | ||
894 | goto fail; | 1058 | goto fail; |
1059 | } | ||
895 | 1060 | ||
896 | int_txe |= (1 << epnum); | 1061 | int_txe |= (1 << epnum); |
897 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | 1062 | musb_writew(mbase, MUSB_INTRTXE, int_txe); |
@@ -900,13 +1065,13 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
900 | * likewise high bandwidth periodic tx | 1065 | * likewise high bandwidth periodic tx |
901 | */ | 1066 | */ |
902 | /* Set TXMAXP with the FIFO size of the endpoint | 1067 | /* Set TXMAXP with the FIFO size of the endpoint |
903 | * to disable double buffering mode. Currently, It seems that double | 1068 | * to disable double buffering mode. |
904 | * buffering has problem if musb RTL revision number < 2.0. | ||
905 | */ | 1069 | */ |
906 | if (musb->hwvers < MUSB_HWVERS_2000) | 1070 | if (musb->double_buffer_not_ok) |
907 | musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); | 1071 | musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); |
908 | else | 1072 | else |
909 | musb_writew(regs, MUSB_TXMAXP, tmp); | 1073 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz |
1074 | | (musb_ep->hb_mult << 11)); | ||
910 | 1075 | ||
911 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; | 1076 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; |
912 | if (musb_readw(regs, MUSB_TXCSR) | 1077 | if (musb_readw(regs, MUSB_TXCSR) |
@@ -927,8 +1092,11 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
927 | musb_ep->is_in = 0; | 1092 | musb_ep->is_in = 0; |
928 | if (musb_ep->is_in) | 1093 | if (musb_ep->is_in) |
929 | goto fail; | 1094 | goto fail; |
930 | if (tmp > hw_ep->max_packet_sz_rx) | 1095 | |
1096 | if (tmp > hw_ep->max_packet_sz_rx) { | ||
1097 | dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); | ||
931 | goto fail; | 1098 | goto fail; |
1099 | } | ||
932 | 1100 | ||
933 | int_rxe |= (1 << epnum); | 1101 | int_rxe |= (1 << epnum); |
934 | musb_writew(mbase, MUSB_INTRRXE, int_rxe); | 1102 | musb_writew(mbase, MUSB_INTRRXE, int_rxe); |
@@ -939,10 +1107,11 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
939 | /* Set RXMAXP with the FIFO size of the endpoint | 1107 | /* Set RXMAXP with the FIFO size of the endpoint |
940 | * to disable double buffering mode. | 1108 | * to disable double buffering mode. |
941 | */ | 1109 | */ |
942 | if (musb->hwvers < MUSB_HWVERS_2000) | 1110 | if (musb->double_buffer_not_ok) |
943 | musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx); | 1111 | musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); |
944 | else | 1112 | else |
945 | musb_writew(regs, MUSB_RXMAXP, tmp); | 1113 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz |
1114 | | (musb_ep->hb_mult << 11)); | ||
946 | 1115 | ||
947 | /* force shared fifo to OUT-only mode */ | 1116 | /* force shared fifo to OUT-only mode */ |
948 | if (hw_ep->is_shared_fifo) { | 1117 | if (hw_ep->is_shared_fifo) { |
@@ -1038,7 +1207,7 @@ static int musb_gadget_disable(struct usb_ep *ep) | |||
1038 | 1207 | ||
1039 | spin_unlock_irqrestore(&(musb->lock), flags); | 1208 | spin_unlock_irqrestore(&(musb->lock), flags); |
1040 | 1209 | ||
1041 | DBG(2, "%s\n", musb_ep->end_point.name); | 1210 | dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); |
1042 | 1211 | ||
1043 | return status; | 1212 | return status; |
1044 | } | 1213 | } |
@@ -1050,16 +1219,19 @@ static int musb_gadget_disable(struct usb_ep *ep) | |||
1050 | struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) | 1219 | struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) |
1051 | { | 1220 | { |
1052 | struct musb_ep *musb_ep = to_musb_ep(ep); | 1221 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1222 | struct musb *musb = musb_ep->musb; | ||
1053 | struct musb_request *request = NULL; | 1223 | struct musb_request *request = NULL; |
1054 | 1224 | ||
1055 | request = kzalloc(sizeof *request, gfp_flags); | 1225 | request = kzalloc(sizeof *request, gfp_flags); |
1056 | if (request) { | 1226 | if (!request) { |
1057 | INIT_LIST_HEAD(&request->request.list); | 1227 | dev_dbg(musb->controller, "not enough memory\n"); |
1058 | request->request.dma = DMA_ADDR_INVALID; | 1228 | return NULL; |
1059 | request->epnum = musb_ep->current_epnum; | ||
1060 | request->ep = musb_ep; | ||
1061 | } | 1229 | } |
1062 | 1230 | ||
1231 | request->request.dma = DMA_ADDR_INVALID; | ||
1232 | request->epnum = musb_ep->current_epnum; | ||
1233 | request->ep = musb_ep; | ||
1234 | |||
1063 | return &request->request; | 1235 | return &request->request; |
1064 | } | 1236 | } |
1065 | 1237 | ||
@@ -1086,7 +1258,7 @@ struct free_record { | |||
1086 | */ | 1258 | */ |
1087 | void musb_ep_restart(struct musb *musb, struct musb_request *req) | 1259 | void musb_ep_restart(struct musb *musb, struct musb_request *req) |
1088 | { | 1260 | { |
1089 | DBG(3, "<== %s request %p len %u on hw_ep%d\n", | 1261 | dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", |
1090 | req->tx ? "TX/IN" : "RX/OUT", | 1262 | req->tx ? "TX/IN" : "RX/OUT", |
1091 | &req->request, req->request.length, req->epnum); | 1263 | &req->request, req->request.length, req->epnum); |
1092 | 1264 | ||
@@ -1120,7 +1292,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1120 | if (request->ep != musb_ep) | 1292 | if (request->ep != musb_ep) |
1121 | return -EINVAL; | 1293 | return -EINVAL; |
1122 | 1294 | ||
1123 | DBG(4, "<== to %s request=%p\n", ep->name, req); | 1295 | dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); |
1124 | 1296 | ||
1125 | /* request is mine now... */ | 1297 | /* request is mine now... */ |
1126 | request->request.actual = 0; | 1298 | request->request.actual = 0; |
@@ -1128,45 +1300,23 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1128 | request->epnum = musb_ep->current_epnum; | 1300 | request->epnum = musb_ep->current_epnum; |
1129 | request->tx = musb_ep->is_in; | 1301 | request->tx = musb_ep->is_in; |
1130 | 1302 | ||
1131 | if (is_dma_capable() && musb_ep->dma) { | 1303 | map_dma_buffer(request, musb, musb_ep); |
1132 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
1133 | request->request.dma = dma_map_single( | ||
1134 | musb->controller, | ||
1135 | request->request.buf, | ||
1136 | request->request.length, | ||
1137 | request->tx | ||
1138 | ? DMA_TO_DEVICE | ||
1139 | : DMA_FROM_DEVICE); | ||
1140 | request->mapped = 1; | ||
1141 | } else { | ||
1142 | dma_sync_single_for_device(musb->controller, | ||
1143 | request->request.dma, | ||
1144 | request->request.length, | ||
1145 | request->tx | ||
1146 | ? DMA_TO_DEVICE | ||
1147 | : DMA_FROM_DEVICE); | ||
1148 | request->mapped = 0; | ||
1149 | } | ||
1150 | } else if (!req->buf) { | ||
1151 | return -ENODATA; | ||
1152 | } else | ||
1153 | request->mapped = 0; | ||
1154 | 1304 | ||
1155 | spin_lock_irqsave(&musb->lock, lockflags); | 1305 | spin_lock_irqsave(&musb->lock, lockflags); |
1156 | 1306 | ||
1157 | /* don't queue if the ep is down */ | 1307 | /* don't queue if the ep is down */ |
1158 | if (!musb_ep->desc) { | 1308 | if (!musb_ep->desc) { |
1159 | DBG(4, "req %p queued to %s while ep %s\n", | 1309 | dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", |
1160 | req, ep->name, "disabled"); | 1310 | req, ep->name, "disabled"); |
1161 | status = -ESHUTDOWN; | 1311 | status = -ESHUTDOWN; |
1162 | goto cleanup; | 1312 | goto cleanup; |
1163 | } | 1313 | } |
1164 | 1314 | ||
1165 | /* add request to the list */ | 1315 | /* add request to the list */ |
1166 | list_add_tail(&(request->request.list), &(musb_ep->req_list)); | 1316 | list_add_tail(&request->list, &musb_ep->req_list); |
1167 | 1317 | ||
1168 | /* it this is the head of the queue, start i/o ... */ | 1318 | /* it this is the head of the queue, start i/o ... */ |
1169 | if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) | 1319 | if (!musb_ep->busy && &request->list == musb_ep->req_list.next) |
1170 | musb_ep_restart(musb, request); | 1320 | musb_ep_restart(musb, request); |
1171 | 1321 | ||
1172 | cleanup: | 1322 | cleanup: |
@@ -1177,7 +1327,8 @@ cleanup: | |||
1177 | static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) | 1327 | static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) |
1178 | { | 1328 | { |
1179 | struct musb_ep *musb_ep = to_musb_ep(ep); | 1329 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1180 | struct usb_request *r; | 1330 | struct musb_request *req = to_musb_request(request); |
1331 | struct musb_request *r; | ||
1181 | unsigned long flags; | 1332 | unsigned long flags; |
1182 | int status = 0; | 1333 | int status = 0; |
1183 | struct musb *musb = musb_ep->musb; | 1334 | struct musb *musb = musb_ep->musb; |
@@ -1188,17 +1339,17 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) | |||
1188 | spin_lock_irqsave(&musb->lock, flags); | 1339 | spin_lock_irqsave(&musb->lock, flags); |
1189 | 1340 | ||
1190 | list_for_each_entry(r, &musb_ep->req_list, list) { | 1341 | list_for_each_entry(r, &musb_ep->req_list, list) { |
1191 | if (r == request) | 1342 | if (r == req) |
1192 | break; | 1343 | break; |
1193 | } | 1344 | } |
1194 | if (r != request) { | 1345 | if (r != req) { |
1195 | DBG(3, "request %p not queued to %s\n", request, ep->name); | 1346 | dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); |
1196 | status = -EINVAL; | 1347 | status = -EINVAL; |
1197 | goto done; | 1348 | goto done; |
1198 | } | 1349 | } |
1199 | 1350 | ||
1200 | /* if the hardware doesn't have the request, easy ... */ | 1351 | /* if the hardware doesn't have the request, easy ... */ |
1201 | if (musb_ep->req_list.next != &request->list || musb_ep->busy) | 1352 | if (musb_ep->req_list.next != &req->list || musb_ep->busy) |
1202 | musb_g_giveback(musb_ep, request, -ECONNRESET); | 1353 | musb_g_giveback(musb_ep, request, -ECONNRESET); |
1203 | 1354 | ||
1204 | /* ... else abort the dma transfer ... */ | 1355 | /* ... else abort the dma transfer ... */ |
@@ -1255,10 +1406,10 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value) | |||
1255 | 1406 | ||
1256 | musb_ep_select(mbase, epnum); | 1407 | musb_ep_select(mbase, epnum); |
1257 | 1408 | ||
1258 | request = to_musb_request(next_request(musb_ep)); | 1409 | request = next_request(musb_ep); |
1259 | if (value) { | 1410 | if (value) { |
1260 | if (request) { | 1411 | if (request) { |
1261 | DBG(3, "request in progress, cannot halt %s\n", | 1412 | dev_dbg(musb->controller, "request in progress, cannot halt %s\n", |
1262 | ep->name); | 1413 | ep->name); |
1263 | status = -EAGAIN; | 1414 | status = -EAGAIN; |
1264 | goto done; | 1415 | goto done; |
@@ -1267,7 +1418,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value) | |||
1267 | if (musb_ep->is_in) { | 1418 | if (musb_ep->is_in) { |
1268 | csr = musb_readw(epio, MUSB_TXCSR); | 1419 | csr = musb_readw(epio, MUSB_TXCSR); |
1269 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 1420 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
1270 | DBG(3, "FIFO busy, cannot halt %s\n", ep->name); | 1421 | dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); |
1271 | status = -EAGAIN; | 1422 | status = -EAGAIN; |
1272 | goto done; | 1423 | goto done; |
1273 | } | 1424 | } |
@@ -1276,7 +1427,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value) | |||
1276 | musb_ep->wedged = 0; | 1427 | musb_ep->wedged = 0; |
1277 | 1428 | ||
1278 | /* set/clear the stall and toggle bits */ | 1429 | /* set/clear the stall and toggle bits */ |
1279 | DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); | 1430 | dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); |
1280 | if (musb_ep->is_in) { | 1431 | if (musb_ep->is_in) { |
1281 | csr = musb_readw(epio, MUSB_TXCSR); | 1432 | csr = musb_readw(epio, MUSB_TXCSR); |
1282 | csr |= MUSB_TXCSR_P_WZC_BITS | 1433 | csr |= MUSB_TXCSR_P_WZC_BITS |
@@ -1303,7 +1454,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value) | |||
1303 | 1454 | ||
1304 | /* maybe start the first request in the queue */ | 1455 | /* maybe start the first request in the queue */ |
1305 | if (!musb_ep->busy && !value && request) { | 1456 | if (!musb_ep->busy && !value && request) { |
1306 | DBG(3, "restarting the request\n"); | 1457 | dev_dbg(musb->controller, "restarting the request\n"); |
1307 | musb_ep_restart(musb, request); | 1458 | musb_ep_restart(musb, request); |
1308 | } | 1459 | } |
1309 | 1460 | ||
@@ -1373,6 +1524,12 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep) | |||
1373 | csr = musb_readw(epio, MUSB_TXCSR); | 1524 | csr = musb_readw(epio, MUSB_TXCSR); |
1374 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 1525 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
1375 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; | 1526 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; |
1527 | /* | ||
1528 | * Setting both TXPKTRDY and FLUSHFIFO makes controller | ||
1529 | * to interrupt current FIFO loading, but not flushing | ||
1530 | * the already loaded ones. | ||
1531 | */ | ||
1532 | csr &= ~MUSB_TXCSR_TXPKTRDY; | ||
1376 | musb_writew(epio, MUSB_TXCSR, csr); | 1533 | musb_writew(epio, MUSB_TXCSR, csr); |
1377 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ | 1534 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ |
1378 | musb_writew(epio, MUSB_TXCSR, csr); | 1535 | musb_writew(epio, MUSB_TXCSR, csr); |
@@ -1434,7 +1591,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) | |||
1434 | case OTG_STATE_B_IDLE: | 1591 | case OTG_STATE_B_IDLE: |
1435 | /* Start SRP ... OTG not required. */ | 1592 | /* Start SRP ... OTG not required. */ |
1436 | devctl = musb_readb(mregs, MUSB_DEVCTL); | 1593 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1437 | DBG(2, "Sending SRP: devctl: %02x\n", devctl); | 1594 | dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); |
1438 | devctl |= MUSB_DEVCTL_SESSION; | 1595 | devctl |= MUSB_DEVCTL_SESSION; |
1439 | musb_writeb(mregs, MUSB_DEVCTL, devctl); | 1596 | musb_writeb(mregs, MUSB_DEVCTL, devctl); |
1440 | devctl = musb_readb(mregs, MUSB_DEVCTL); | 1597 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
@@ -1451,6 +1608,10 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) | |||
1451 | break; | 1608 | break; |
1452 | } | 1609 | } |
1453 | 1610 | ||
1611 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1612 | otg_start_srp(musb->xceiv); | ||
1613 | spin_lock_irqsave(&musb->lock, flags); | ||
1614 | |||
1454 | /* Block idling for at least 1s */ | 1615 | /* Block idling for at least 1s */ |
1455 | musb_platform_try_idle(musb, | 1616 | musb_platform_try_idle(musb, |
1456 | jiffies + msecs_to_jiffies(1 * HZ)); | 1617 | jiffies + msecs_to_jiffies(1 * HZ)); |
@@ -1458,7 +1619,8 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) | |||
1458 | status = 0; | 1619 | status = 0; |
1459 | goto done; | 1620 | goto done; |
1460 | default: | 1621 | default: |
1461 | DBG(2, "Unhandled wake: %s\n", otg_state_string(musb)); | 1622 | dev_dbg(musb->controller, "Unhandled wake: %s\n", |
1623 | otg_state_string(musb->xceiv->state)); | ||
1462 | goto done; | 1624 | goto done; |
1463 | } | 1625 | } |
1464 | 1626 | ||
@@ -1467,7 +1629,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) | |||
1467 | power = musb_readb(mregs, MUSB_POWER); | 1629 | power = musb_readb(mregs, MUSB_POWER); |
1468 | power |= MUSB_POWER_RESUME; | 1630 | power |= MUSB_POWER_RESUME; |
1469 | musb_writeb(mregs, MUSB_POWER, power); | 1631 | musb_writeb(mregs, MUSB_POWER, power); |
1470 | DBG(2, "issue wakeup\n"); | 1632 | dev_dbg(musb->controller, "issue wakeup\n"); |
1471 | 1633 | ||
1472 | /* FIXME do this next chunk in a timer callback, no udelay */ | 1634 | /* FIXME do this next chunk in a timer callback, no udelay */ |
1473 | mdelay(2); | 1635 | mdelay(2); |
@@ -1501,7 +1663,7 @@ static void musb_pullup(struct musb *musb, int is_on) | |||
1501 | 1663 | ||
1502 | /* FIXME if on, HdrcStart; if off, HdrcStop */ | 1664 | /* FIXME if on, HdrcStart; if off, HdrcStop */ |
1503 | 1665 | ||
1504 | DBG(3, "gadget %s D+ pullup %s\n", | 1666 | dev_dbg(musb->controller, "gadget %s D+ pullup %s\n", |
1505 | musb->gadget_driver->function, is_on ? "on" : "off"); | 1667 | musb->gadget_driver->function, is_on ? "on" : "off"); |
1506 | musb_writeb(musb->mregs, MUSB_POWER, power); | 1668 | musb_writeb(musb->mregs, MUSB_POWER, power); |
1507 | } | 1669 | } |
@@ -1509,7 +1671,7 @@ static void musb_pullup(struct musb *musb, int is_on) | |||
1509 | #if 0 | 1671 | #if 0 |
1510 | static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) | 1672 | static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) |
1511 | { | 1673 | { |
1512 | DBG(2, "<= %s =>\n", __func__); | 1674 | dev_dbg(musb->controller, "<= %s =>\n", __func__); |
1513 | 1675 | ||
1514 | /* | 1676 | /* |
1515 | * FIXME iff driver's softconnect flag is set (as it is during probe, | 1677 | * FIXME iff driver's softconnect flag is set (as it is during probe, |
@@ -1617,7 +1779,7 @@ static inline void __init musb_g_init_endpoints(struct musb *musb) | |||
1617 | struct musb_hw_ep *hw_ep; | 1779 | struct musb_hw_ep *hw_ep; |
1618 | unsigned count = 0; | 1780 | unsigned count = 0; |
1619 | 1781 | ||
1620 | /* intialize endpoint list just once */ | 1782 | /* initialize endpoint list just once */ |
1621 | INIT_LIST_HEAD(&(musb->g.ep_list)); | 1783 | INIT_LIST_HEAD(&(musb->g.ep_list)); |
1622 | 1784 | ||
1623 | for (epnum = 0, hw_ep = musb->endpoints; | 1785 | for (epnum = 0, hw_ep = musb->endpoints; |
@@ -1676,8 +1838,10 @@ int __init musb_gadget_setup(struct musb *musb) | |||
1676 | musb_platform_try_idle(musb, 0); | 1838 | musb_platform_try_idle(musb, 0); |
1677 | 1839 | ||
1678 | status = device_register(&musb->g.dev); | 1840 | status = device_register(&musb->g.dev); |
1679 | if (status != 0) | 1841 | if (status != 0) { |
1842 | put_device(&musb->g.dev); | ||
1680 | the_gadget = NULL; | 1843 | the_gadget = NULL; |
1844 | } | ||
1681 | return status; | 1845 | return status; |
1682 | } | 1846 | } |
1683 | 1847 | ||
@@ -1696,97 +1860,115 @@ void musb_gadget_cleanup(struct musb *musb) | |||
1696 | * | 1860 | * |
1697 | * -EINVAL something went wrong (not driver) | 1861 | * -EINVAL something went wrong (not driver) |
1698 | * -EBUSY another gadget is already using the controller | 1862 | * -EBUSY another gadget is already using the controller |
1699 | * -ENOMEM no memeory to perform the operation | 1863 | * -ENOMEM no memory to perform the operation |
1700 | * | 1864 | * |
1701 | * @param driver the gadget driver | 1865 | * @param driver the gadget driver |
1866 | * @param bind the driver's bind function | ||
1702 | * @return <0 if error, 0 if everything is fine | 1867 | * @return <0 if error, 0 if everything is fine |
1703 | */ | 1868 | */ |
1704 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | 1869 | int usb_gadget_probe_driver(struct usb_gadget_driver *driver, |
1870 | int (*bind)(struct usb_gadget *)) | ||
1705 | { | 1871 | { |
1706 | int retval; | 1872 | struct musb *musb = the_gadget; |
1707 | unsigned long flags; | 1873 | unsigned long flags; |
1708 | struct musb *musb = the_gadget; | 1874 | int retval = -EINVAL; |
1709 | 1875 | ||
1710 | if (!driver | 1876 | if (!driver |
1711 | || driver->speed != USB_SPEED_HIGH | 1877 | || driver->speed != USB_SPEED_HIGH |
1712 | || !driver->bind | 1878 | || !bind || !driver->setup) |
1713 | || !driver->setup) | 1879 | goto err0; |
1714 | return -EINVAL; | ||
1715 | 1880 | ||
1716 | /* driver must be initialized to support peripheral mode */ | 1881 | /* driver must be initialized to support peripheral mode */ |
1717 | if (!musb) { | 1882 | if (!musb) { |
1718 | DBG(1, "%s, no dev??\n", __func__); | 1883 | dev_dbg(musb->controller, "no dev??\n"); |
1719 | return -ENODEV; | 1884 | retval = -ENODEV; |
1885 | goto err0; | ||
1720 | } | 1886 | } |
1721 | 1887 | ||
1722 | DBG(3, "registering driver %s\n", driver->function); | 1888 | pm_runtime_get_sync(musb->controller); |
1723 | spin_lock_irqsave(&musb->lock, flags); | 1889 | |
1890 | dev_dbg(musb->controller, "registering driver %s\n", driver->function); | ||
1724 | 1891 | ||
1725 | if (musb->gadget_driver) { | 1892 | if (musb->gadget_driver) { |
1726 | DBG(1, "%s is already bound to %s\n", | 1893 | dev_dbg(musb->controller, "%s is already bound to %s\n", |
1727 | musb_driver_name, | 1894 | musb_driver_name, |
1728 | musb->gadget_driver->driver.name); | 1895 | musb->gadget_driver->driver.name); |
1729 | retval = -EBUSY; | 1896 | retval = -EBUSY; |
1730 | } else { | 1897 | goto err0; |
1731 | musb->gadget_driver = driver; | ||
1732 | musb->g.dev.driver = &driver->driver; | ||
1733 | driver->driver.bus = NULL; | ||
1734 | musb->softconnect = 1; | ||
1735 | retval = 0; | ||
1736 | } | 1898 | } |
1737 | 1899 | ||
1900 | spin_lock_irqsave(&musb->lock, flags); | ||
1901 | musb->gadget_driver = driver; | ||
1902 | musb->g.dev.driver = &driver->driver; | ||
1903 | driver->driver.bus = NULL; | ||
1904 | musb->softconnect = 1; | ||
1738 | spin_unlock_irqrestore(&musb->lock, flags); | 1905 | spin_unlock_irqrestore(&musb->lock, flags); |
1739 | 1906 | ||
1740 | if (retval == 0) { | 1907 | retval = bind(&musb->g); |
1741 | retval = driver->bind(&musb->g); | 1908 | if (retval) { |
1742 | if (retval != 0) { | 1909 | dev_dbg(musb->controller, "bind to driver %s failed --> %d\n", |
1743 | DBG(3, "bind to driver %s failed --> %d\n", | 1910 | driver->driver.name, retval); |
1744 | driver->driver.name, retval); | 1911 | goto err1; |
1745 | musb->gadget_driver = NULL; | 1912 | } |
1746 | musb->g.dev.driver = NULL; | ||
1747 | } | ||
1748 | 1913 | ||
1749 | spin_lock_irqsave(&musb->lock, flags); | 1914 | spin_lock_irqsave(&musb->lock, flags); |
1750 | 1915 | ||
1751 | otg_set_peripheral(musb->xceiv, &musb->g); | 1916 | otg_set_peripheral(musb->xceiv, &musb->g); |
1752 | musb->xceiv->state = OTG_STATE_B_IDLE; | 1917 | musb->xceiv->state = OTG_STATE_B_IDLE; |
1753 | musb->is_active = 1; | 1918 | musb->is_active = 1; |
1754 | 1919 | ||
1755 | /* FIXME this ignores the softconnect flag. Drivers are | 1920 | /* |
1756 | * allowed hold the peripheral inactive until for example | 1921 | * FIXME this ignores the softconnect flag. Drivers are |
1757 | * userspace hooks up printer hardware or DSP codecs, so | 1922 | * allowed hold the peripheral inactive until for example |
1758 | * hosts only see fully functional devices. | 1923 | * userspace hooks up printer hardware or DSP codecs, so |
1759 | */ | 1924 | * hosts only see fully functional devices. |
1925 | */ | ||
1760 | 1926 | ||
1761 | if (!is_otg_enabled(musb)) | 1927 | if (!is_otg_enabled(musb)) |
1762 | musb_start(musb); | 1928 | musb_start(musb); |
1763 | 1929 | ||
1764 | otg_set_peripheral(musb->xceiv, &musb->g); | 1930 | otg_set_peripheral(musb->xceiv, &musb->g); |
1765 | 1931 | ||
1766 | spin_unlock_irqrestore(&musb->lock, flags); | 1932 | spin_unlock_irqrestore(&musb->lock, flags); |
1767 | 1933 | ||
1768 | if (is_otg_enabled(musb)) { | 1934 | if (is_otg_enabled(musb)) { |
1769 | DBG(3, "OTG startup...\n"); | 1935 | struct usb_hcd *hcd = musb_to_hcd(musb); |
1770 | 1936 | ||
1771 | /* REVISIT: funcall to other code, which also | 1937 | dev_dbg(musb->controller, "OTG startup...\n"); |
1772 | * handles power budgeting ... this way also | 1938 | |
1773 | * ensures HdrcStart is indirectly called. | 1939 | /* REVISIT: funcall to other code, which also |
1774 | */ | 1940 | * handles power budgeting ... this way also |
1775 | retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); | 1941 | * ensures HdrcStart is indirectly called. |
1776 | if (retval < 0) { | 1942 | */ |
1777 | DBG(1, "add_hcd failed, %d\n", retval); | 1943 | retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); |
1778 | spin_lock_irqsave(&musb->lock, flags); | 1944 | if (retval < 0) { |
1779 | otg_set_peripheral(musb->xceiv, NULL); | 1945 | dev_dbg(musb->controller, "add_hcd failed, %d\n", retval); |
1780 | musb->gadget_driver = NULL; | 1946 | goto err2; |
1781 | musb->g.dev.driver = NULL; | ||
1782 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1783 | } | ||
1784 | } | 1947 | } |
1948 | |||
1949 | if ((musb->xceiv->last_event == USB_EVENT_ID) | ||
1950 | && musb->xceiv->set_vbus) | ||
1951 | otg_set_vbus(musb->xceiv, 1); | ||
1952 | |||
1953 | hcd->self.uses_pio_for_control = 1; | ||
1785 | } | 1954 | } |
1955 | if (musb->xceiv->last_event == USB_EVENT_NONE) | ||
1956 | pm_runtime_put(musb->controller); | ||
1957 | |||
1958 | return 0; | ||
1786 | 1959 | ||
1960 | err2: | ||
1961 | if (!is_otg_enabled(musb)) | ||
1962 | musb_stop(musb); | ||
1963 | |||
1964 | err1: | ||
1965 | musb->gadget_driver = NULL; | ||
1966 | musb->g.dev.driver = NULL; | ||
1967 | |||
1968 | err0: | ||
1787 | return retval; | 1969 | return retval; |
1788 | } | 1970 | } |
1789 | EXPORT_SYMBOL(usb_gadget_register_driver); | 1971 | EXPORT_SYMBOL(usb_gadget_probe_driver); |
1790 | 1972 | ||
1791 | static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) | 1973 | static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) |
1792 | { | 1974 | { |
@@ -1838,14 +2020,20 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) | |||
1838 | */ | 2020 | */ |
1839 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 2021 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) |
1840 | { | 2022 | { |
1841 | unsigned long flags; | ||
1842 | int retval = 0; | ||
1843 | struct musb *musb = the_gadget; | 2023 | struct musb *musb = the_gadget; |
2024 | unsigned long flags; | ||
1844 | 2025 | ||
1845 | if (!driver || !driver->unbind || !musb) | 2026 | if (!driver || !driver->unbind || !musb) |
1846 | return -EINVAL; | 2027 | return -EINVAL; |
1847 | 2028 | ||
1848 | /* REVISIT always use otg_set_peripheral() here too; | 2029 | if (!musb->gadget_driver) |
2030 | return -EINVAL; | ||
2031 | |||
2032 | if (musb->xceiv->last_event == USB_EVENT_NONE) | ||
2033 | pm_runtime_get_sync(musb->controller); | ||
2034 | |||
2035 | /* | ||
2036 | * REVISIT always use otg_set_peripheral() here too; | ||
1849 | * this needs to shut down the OTG engine. | 2037 | * this needs to shut down the OTG engine. |
1850 | */ | 2038 | */ |
1851 | 2039 | ||
@@ -1855,29 +2043,26 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |||
1855 | musb_hnp_stop(musb); | 2043 | musb_hnp_stop(musb); |
1856 | #endif | 2044 | #endif |
1857 | 2045 | ||
1858 | if (musb->gadget_driver == driver) { | 2046 | (void) musb_gadget_vbus_draw(&musb->g, 0); |
1859 | 2047 | ||
1860 | (void) musb_gadget_vbus_draw(&musb->g, 0); | 2048 | musb->xceiv->state = OTG_STATE_UNDEFINED; |
2049 | stop_activity(musb, driver); | ||
2050 | otg_set_peripheral(musb->xceiv, NULL); | ||
1861 | 2051 | ||
1862 | musb->xceiv->state = OTG_STATE_UNDEFINED; | 2052 | dev_dbg(musb->controller, "unregistering driver %s\n", driver->function); |
1863 | stop_activity(musb, driver); | ||
1864 | otg_set_peripheral(musb->xceiv, NULL); | ||
1865 | 2053 | ||
1866 | DBG(3, "unregistering driver %s\n", driver->function); | 2054 | spin_unlock_irqrestore(&musb->lock, flags); |
1867 | spin_unlock_irqrestore(&musb->lock, flags); | 2055 | driver->unbind(&musb->g); |
1868 | driver->unbind(&musb->g); | 2056 | spin_lock_irqsave(&musb->lock, flags); |
1869 | spin_lock_irqsave(&musb->lock, flags); | ||
1870 | 2057 | ||
1871 | musb->gadget_driver = NULL; | 2058 | musb->gadget_driver = NULL; |
1872 | musb->g.dev.driver = NULL; | 2059 | musb->g.dev.driver = NULL; |
1873 | 2060 | ||
1874 | musb->is_active = 0; | 2061 | musb->is_active = 0; |
1875 | musb_platform_try_idle(musb, 0); | 2062 | musb_platform_try_idle(musb, 0); |
1876 | } else | ||
1877 | retval = -EINVAL; | ||
1878 | spin_unlock_irqrestore(&musb->lock, flags); | 2063 | spin_unlock_irqrestore(&musb->lock, flags); |
1879 | 2064 | ||
1880 | if (is_otg_enabled(musb) && retval == 0) { | 2065 | if (is_otg_enabled(musb)) { |
1881 | usb_remove_hcd(musb_to_hcd(musb)); | 2066 | usb_remove_hcd(musb_to_hcd(musb)); |
1882 | /* FIXME we need to be able to register another | 2067 | /* FIXME we need to be able to register another |
1883 | * gadget driver here and have everything work; | 2068 | * gadget driver here and have everything work; |
@@ -1885,7 +2070,12 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |||
1885 | */ | 2070 | */ |
1886 | } | 2071 | } |
1887 | 2072 | ||
1888 | return retval; | 2073 | if (!is_otg_enabled(musb)) |
2074 | musb_stop(musb); | ||
2075 | |||
2076 | pm_runtime_put(musb->controller); | ||
2077 | |||
2078 | return 0; | ||
1889 | } | 2079 | } |
1890 | EXPORT_SYMBOL(usb_gadget_unregister_driver); | 2080 | EXPORT_SYMBOL(usb_gadget_unregister_driver); |
1891 | 2081 | ||
@@ -1911,7 +2101,7 @@ void musb_g_resume(struct musb *musb) | |||
1911 | break; | 2101 | break; |
1912 | default: | 2102 | default: |
1913 | WARNING("unhandled RESUME transition (%s)\n", | 2103 | WARNING("unhandled RESUME transition (%s)\n", |
1914 | otg_state_string(musb)); | 2104 | otg_state_string(musb->xceiv->state)); |
1915 | } | 2105 | } |
1916 | } | 2106 | } |
1917 | 2107 | ||
@@ -1921,7 +2111,7 @@ void musb_g_suspend(struct musb *musb) | |||
1921 | u8 devctl; | 2111 | u8 devctl; |
1922 | 2112 | ||
1923 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | 2113 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
1924 | DBG(3, "devctl %02x\n", devctl); | 2114 | dev_dbg(musb->controller, "devctl %02x\n", devctl); |
1925 | 2115 | ||
1926 | switch (musb->xceiv->state) { | 2116 | switch (musb->xceiv->state) { |
1927 | case OTG_STATE_B_IDLE: | 2117 | case OTG_STATE_B_IDLE: |
@@ -1941,7 +2131,7 @@ void musb_g_suspend(struct musb *musb) | |||
1941 | * A_PERIPHERAL may need care too | 2131 | * A_PERIPHERAL may need care too |
1942 | */ | 2132 | */ |
1943 | WARNING("unhandled SUSPEND transition (%s)\n", | 2133 | WARNING("unhandled SUSPEND transition (%s)\n", |
1944 | otg_state_string(musb)); | 2134 | otg_state_string(musb->xceiv->state)); |
1945 | } | 2135 | } |
1946 | } | 2136 | } |
1947 | 2137 | ||
@@ -1957,7 +2147,7 @@ void musb_g_disconnect(struct musb *musb) | |||
1957 | void __iomem *mregs = musb->mregs; | 2147 | void __iomem *mregs = musb->mregs; |
1958 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | 2148 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); |
1959 | 2149 | ||
1960 | DBG(3, "devctl %02x\n", devctl); | 2150 | dev_dbg(musb->controller, "devctl %02x\n", devctl); |
1961 | 2151 | ||
1962 | /* clear HR */ | 2152 | /* clear HR */ |
1963 | musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); | 2153 | musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); |
@@ -1975,8 +2165,8 @@ void musb_g_disconnect(struct musb *musb) | |||
1975 | switch (musb->xceiv->state) { | 2165 | switch (musb->xceiv->state) { |
1976 | default: | 2166 | default: |
1977 | #ifdef CONFIG_USB_MUSB_OTG | 2167 | #ifdef CONFIG_USB_MUSB_OTG |
1978 | DBG(2, "Unhandled disconnect %s, setting a_idle\n", | 2168 | dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", |
1979 | otg_state_string(musb)); | 2169 | otg_state_string(musb->xceiv->state)); |
1980 | musb->xceiv->state = OTG_STATE_A_IDLE; | 2170 | musb->xceiv->state = OTG_STATE_A_IDLE; |
1981 | MUSB_HST_MODE(musb); | 2171 | MUSB_HST_MODE(musb); |
1982 | break; | 2172 | break; |
@@ -2006,7 +2196,7 @@ __acquires(musb->lock) | |||
2006 | u8 devctl = musb_readb(mbase, MUSB_DEVCTL); | 2196 | u8 devctl = musb_readb(mbase, MUSB_DEVCTL); |
2007 | u8 power; | 2197 | u8 power; |
2008 | 2198 | ||
2009 | DBG(3, "<== %s addr=%x driver '%s'\n", | 2199 | dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n", |
2010 | (devctl & MUSB_DEVCTL_BDEVICE) | 2200 | (devctl & MUSB_DEVCTL_BDEVICE) |
2011 | ? "B-Device" : "A-Device", | 2201 | ? "B-Device" : "A-Device", |
2012 | musb_readb(mbase, MUSB_FADDR), | 2202 | musb_readb(mbase, MUSB_FADDR), |
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index 572b1da7f2dc..66b7c5e0fb44 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h | |||
@@ -35,13 +35,22 @@ | |||
35 | #ifndef __MUSB_GADGET_H | 35 | #ifndef __MUSB_GADGET_H |
36 | #define __MUSB_GADGET_H | 36 | #define __MUSB_GADGET_H |
37 | 37 | ||
38 | #include <linux/list.h> | ||
39 | |||
40 | enum buffer_map_state { | ||
41 | UN_MAPPED = 0, | ||
42 | PRE_MAPPED, | ||
43 | MUSB_MAPPED | ||
44 | }; | ||
45 | |||
38 | struct musb_request { | 46 | struct musb_request { |
39 | struct usb_request request; | 47 | struct usb_request request; |
48 | struct list_head list; | ||
40 | struct musb_ep *ep; | 49 | struct musb_ep *ep; |
41 | struct musb *musb; | 50 | struct musb *musb; |
42 | u8 tx; /* endpoint direction */ | 51 | u8 tx; /* endpoint direction */ |
43 | u8 epnum; | 52 | u8 epnum; |
44 | u8 mapped; | 53 | enum buffer_map_state map_state; |
45 | }; | 54 | }; |
46 | 55 | ||
47 | static inline struct musb_request *to_musb_request(struct usb_request *req) | 56 | static inline struct musb_request *to_musb_request(struct usb_request *req) |
@@ -79,6 +88,8 @@ struct musb_ep { | |||
79 | 88 | ||
80 | /* true if lock must be dropped but req_list may not be advanced */ | 89 | /* true if lock must be dropped but req_list may not be advanced */ |
81 | u8 busy; | 90 | u8 busy; |
91 | |||
92 | u8 hb_mult; | ||
82 | }; | 93 | }; |
83 | 94 | ||
84 | static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) | 95 | static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) |
@@ -86,13 +97,13 @@ static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) | |||
86 | return ep ? container_of(ep, struct musb_ep, end_point) : NULL; | 97 | return ep ? container_of(ep, struct musb_ep, end_point) : NULL; |
87 | } | 98 | } |
88 | 99 | ||
89 | static inline struct usb_request *next_request(struct musb_ep *ep) | 100 | static inline struct musb_request *next_request(struct musb_ep *ep) |
90 | { | 101 | { |
91 | struct list_head *queue = &ep->req_list; | 102 | struct list_head *queue = &ep->req_list; |
92 | 103 | ||
93 | if (list_empty(queue)) | 104 | if (list_empty(queue)) |
94 | return NULL; | 105 | return NULL; |
95 | return container_of(queue->next, struct usb_request, list); | 106 | return container_of(queue->next, struct musb_request, list); |
96 | } | 107 | } |
97 | 108 | ||
98 | extern void musb_g_tx(struct musb *musb, u8 epnum); | 109 | extern void musb_g_tx(struct musb *musb, u8 epnum); |
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 6dd03f4c5f49..b2faff235507 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
@@ -209,7 +209,7 @@ static inline void musb_try_b_hnp_enable(struct musb *musb) | |||
209 | void __iomem *mbase = musb->mregs; | 209 | void __iomem *mbase = musb->mregs; |
210 | u8 devctl; | 210 | u8 devctl; |
211 | 211 | ||
212 | DBG(1, "HNP: Setting HR\n"); | 212 | dev_dbg(musb->controller, "HNP: Setting HR\n"); |
213 | devctl = musb_readb(mbase, MUSB_DEVCTL); | 213 | devctl = musb_readb(mbase, MUSB_DEVCTL); |
214 | musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); | 214 | musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); |
215 | } | 215 | } |
@@ -304,10 +304,9 @@ __acquires(musb->lock) | |||
304 | } | 304 | } |
305 | 305 | ||
306 | /* Maybe start the first request in the queue */ | 306 | /* Maybe start the first request in the queue */ |
307 | request = to_musb_request( | 307 | request = next_request(musb_ep); |
308 | next_request(musb_ep)); | ||
309 | if (!musb_ep->busy && request) { | 308 | if (!musb_ep->busy && request) { |
310 | DBG(3, "restarting the request\n"); | 309 | dev_dbg(musb->controller, "restarting the request\n"); |
311 | musb_ep_restart(musb, request); | 310 | musb_ep_restart(musb, request); |
312 | } | 311 | } |
313 | 312 | ||
@@ -491,10 +490,12 @@ stall: | |||
491 | static void ep0_rxstate(struct musb *musb) | 490 | static void ep0_rxstate(struct musb *musb) |
492 | { | 491 | { |
493 | void __iomem *regs = musb->control_ep->regs; | 492 | void __iomem *regs = musb->control_ep->regs; |
493 | struct musb_request *request; | ||
494 | struct usb_request *req; | 494 | struct usb_request *req; |
495 | u16 count, csr; | 495 | u16 count, csr; |
496 | 496 | ||
497 | req = next_ep0_request(musb); | 497 | request = next_ep0_request(musb); |
498 | req = &request->request; | ||
498 | 499 | ||
499 | /* read packet and ack; or stall because of gadget driver bug: | 500 | /* read packet and ack; or stall because of gadget driver bug: |
500 | * should have provided the rx buffer before setup() returned. | 501 | * should have provided the rx buffer before setup() returned. |
@@ -544,17 +545,20 @@ static void ep0_rxstate(struct musb *musb) | |||
544 | static void ep0_txstate(struct musb *musb) | 545 | static void ep0_txstate(struct musb *musb) |
545 | { | 546 | { |
546 | void __iomem *regs = musb->control_ep->regs; | 547 | void __iomem *regs = musb->control_ep->regs; |
547 | struct usb_request *request = next_ep0_request(musb); | 548 | struct musb_request *req = next_ep0_request(musb); |
549 | struct usb_request *request; | ||
548 | u16 csr = MUSB_CSR0_TXPKTRDY; | 550 | u16 csr = MUSB_CSR0_TXPKTRDY; |
549 | u8 *fifo_src; | 551 | u8 *fifo_src; |
550 | u8 fifo_count; | 552 | u8 fifo_count; |
551 | 553 | ||
552 | if (!request) { | 554 | if (!req) { |
553 | /* WARN_ON(1); */ | 555 | /* WARN_ON(1); */ |
554 | DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); | 556 | dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); |
555 | return; | 557 | return; |
556 | } | 558 | } |
557 | 559 | ||
560 | request = &req->request; | ||
561 | |||
558 | /* load the data */ | 562 | /* load the data */ |
559 | fifo_src = (u8 *) request->buf + request->actual; | 563 | fifo_src = (u8 *) request->buf + request->actual; |
560 | fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, | 564 | fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, |
@@ -598,7 +602,7 @@ static void ep0_txstate(struct musb *musb) | |||
598 | static void | 602 | static void |
599 | musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) | 603 | musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) |
600 | { | 604 | { |
601 | struct usb_request *r; | 605 | struct musb_request *r; |
602 | void __iomem *regs = musb->control_ep->regs; | 606 | void __iomem *regs = musb->control_ep->regs; |
603 | 607 | ||
604 | musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); | 608 | musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); |
@@ -606,7 +610,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) | |||
606 | /* NOTE: earlier 2.6 versions changed setup packets to host | 610 | /* NOTE: earlier 2.6 versions changed setup packets to host |
607 | * order, but now USB packets always stay in USB byte order. | 611 | * order, but now USB packets always stay in USB byte order. |
608 | */ | 612 | */ |
609 | DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n", | 613 | dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n", |
610 | req->bRequestType, | 614 | req->bRequestType, |
611 | req->bRequest, | 615 | req->bRequest, |
612 | le16_to_cpu(req->wValue), | 616 | le16_to_cpu(req->wValue), |
@@ -616,7 +620,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) | |||
616 | /* clean up any leftover transfers */ | 620 | /* clean up any leftover transfers */ |
617 | r = next_ep0_request(musb); | 621 | r = next_ep0_request(musb); |
618 | if (r) | 622 | if (r) |
619 | musb_g_ep0_giveback(musb, r); | 623 | musb_g_ep0_giveback(musb, &r->request); |
620 | 624 | ||
621 | /* For zero-data requests we want to delay the STATUS stage to | 625 | /* For zero-data requests we want to delay the STATUS stage to |
622 | * avoid SETUPEND errors. If we read data (OUT), delay accepting | 626 | * avoid SETUPEND errors. If we read data (OUT), delay accepting |
@@ -674,7 +678,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) | |||
674 | csr = musb_readw(regs, MUSB_CSR0); | 678 | csr = musb_readw(regs, MUSB_CSR0); |
675 | len = musb_readb(regs, MUSB_COUNT0); | 679 | len = musb_readb(regs, MUSB_COUNT0); |
676 | 680 | ||
677 | DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n", | 681 | dev_dbg(musb->controller, "csr %04x, count %d, myaddr %d, ep0stage %s\n", |
678 | csr, len, | 682 | csr, len, |
679 | musb_readb(mbase, MUSB_FADDR), | 683 | musb_readb(mbase, MUSB_FADDR), |
680 | decode_ep0stage(musb->ep0_state)); | 684 | decode_ep0stage(musb->ep0_state)); |
@@ -745,7 +749,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) | |||
745 | 749 | ||
746 | /* enter test mode if needed (exit by reset) */ | 750 | /* enter test mode if needed (exit by reset) */ |
747 | else if (musb->test_mode) { | 751 | else if (musb->test_mode) { |
748 | DBG(1, "entering TESTMODE\n"); | 752 | dev_dbg(musb->controller, "entering TESTMODE\n"); |
749 | 753 | ||
750 | if (MUSB_TEST_PACKET == musb->test_mode_nr) | 754 | if (MUSB_TEST_PACKET == musb->test_mode_nr) |
751 | musb_load_testpacket(musb); | 755 | musb_load_testpacket(musb); |
@@ -758,11 +762,11 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) | |||
758 | case MUSB_EP0_STAGE_STATUSOUT: | 762 | case MUSB_EP0_STAGE_STATUSOUT: |
759 | /* end of sequence #1: write to host (TX state) */ | 763 | /* end of sequence #1: write to host (TX state) */ |
760 | { | 764 | { |
761 | struct usb_request *req; | 765 | struct musb_request *req; |
762 | 766 | ||
763 | req = next_ep0_request(musb); | 767 | req = next_ep0_request(musb); |
764 | if (req) | 768 | if (req) |
765 | musb_g_ep0_giveback(musb, req); | 769 | musb_g_ep0_giveback(musb, &req->request); |
766 | } | 770 | } |
767 | 771 | ||
768 | /* | 772 | /* |
@@ -857,7 +861,7 @@ setup: | |||
857 | break; | 861 | break; |
858 | } | 862 | } |
859 | 863 | ||
860 | DBG(3, "handled %d, csr %04x, ep0stage %s\n", | 864 | dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n", |
861 | handled, csr, | 865 | handled, csr, |
862 | decode_ep0stage(musb->ep0_state)); | 866 | decode_ep0stage(musb->ep0_state)); |
863 | 867 | ||
@@ -874,7 +878,7 @@ setup: | |||
874 | if (handled < 0) { | 878 | if (handled < 0) { |
875 | musb_ep_select(mbase, 0); | 879 | musb_ep_select(mbase, 0); |
876 | stall: | 880 | stall: |
877 | DBG(3, "stall (%d)\n", handled); | 881 | dev_dbg(musb->controller, "stall (%d)\n", handled); |
878 | musb->ackpend |= MUSB_CSR0_P_SENDSTALL; | 882 | musb->ackpend |= MUSB_CSR0_P_SENDSTALL; |
879 | musb->ep0_state = MUSB_EP0_STAGE_IDLE; | 883 | musb->ep0_state = MUSB_EP0_STAGE_IDLE; |
880 | finish: | 884 | finish: |
@@ -954,16 +958,16 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) | |||
954 | status = 0; | 958 | status = 0; |
955 | break; | 959 | break; |
956 | default: | 960 | default: |
957 | DBG(1, "ep0 request queued in state %d\n", | 961 | dev_dbg(musb->controller, "ep0 request queued in state %d\n", |
958 | musb->ep0_state); | 962 | musb->ep0_state); |
959 | status = -EINVAL; | 963 | status = -EINVAL; |
960 | goto cleanup; | 964 | goto cleanup; |
961 | } | 965 | } |
962 | 966 | ||
963 | /* add request to the list */ | 967 | /* add request to the list */ |
964 | list_add_tail(&(req->request.list), &(ep->req_list)); | 968 | list_add_tail(&req->list, &ep->req_list); |
965 | 969 | ||
966 | DBG(3, "queue to %s (%s), length=%d\n", | 970 | dev_dbg(musb->controller, "queue to %s (%s), length=%d\n", |
967 | ep->name, ep->is_in ? "IN/TX" : "OUT/RX", | 971 | ep->name, ep->is_in ? "IN/TX" : "OUT/RX", |
968 | req->request.length); | 972 | req->request.length); |
969 | 973 | ||
@@ -1056,7 +1060,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value) | |||
1056 | musb->ackpend = 0; | 1060 | musb->ackpend = 0; |
1057 | break; | 1061 | break; |
1058 | default: | 1062 | default: |
1059 | DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state); | 1063 | dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state); |
1060 | status = -EINVAL; | 1064 | status = -EINVAL; |
1061 | } | 1065 | } |
1062 | 1066 | ||
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 9e65c47cc98b..8b2473fa0f47 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/errno.h> | 41 | #include <linux/errno.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/list.h> | 43 | #include <linux/list.h> |
44 | #include <linux/dma-mapping.h> | ||
44 | 45 | ||
45 | #include "musb_core.h" | 46 | #include "musb_core.h" |
46 | #include "musb_host.h" | 47 | #include "musb_host.h" |
@@ -105,6 +106,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
105 | */ | 106 | */ |
106 | static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | 107 | static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) |
107 | { | 108 | { |
109 | struct musb *musb = ep->musb; | ||
108 | void __iomem *epio = ep->regs; | 110 | void __iomem *epio = ep->regs; |
109 | u16 csr; | 111 | u16 csr; |
110 | u16 lastcsr = 0; | 112 | u16 lastcsr = 0; |
@@ -113,7 +115,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | |||
113 | csr = musb_readw(epio, MUSB_TXCSR); | 115 | csr = musb_readw(epio, MUSB_TXCSR); |
114 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 116 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
115 | if (csr != lastcsr) | 117 | if (csr != lastcsr) |
116 | DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | 118 | dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); |
117 | lastcsr = csr; | 119 | lastcsr = csr; |
118 | csr |= MUSB_TXCSR_FLUSHFIFO; | 120 | csr |= MUSB_TXCSR_FLUSHFIFO; |
119 | musb_writew(epio, MUSB_TXCSR, csr); | 121 | musb_writew(epio, MUSB_TXCSR, csr); |
@@ -239,7 +241,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
239 | len = urb->transfer_buffer_length - urb->actual_length; | 241 | len = urb->transfer_buffer_length - urb->actual_length; |
240 | } | 242 | } |
241 | 243 | ||
242 | DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", | 244 | dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", |
243 | qh, urb, address, qh->epnum, | 245 | qh, urb, address, qh->epnum, |
244 | is_in ? "in" : "out", | 246 | is_in ? "in" : "out", |
245 | ({char *s; switch (qh->type) { | 247 | ({char *s; switch (qh->type) { |
@@ -262,7 +264,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
262 | switch (qh->type) { | 264 | switch (qh->type) { |
263 | case USB_ENDPOINT_XFER_ISOC: | 265 | case USB_ENDPOINT_XFER_ISOC: |
264 | case USB_ENDPOINT_XFER_INT: | 266 | case USB_ENDPOINT_XFER_INT: |
265 | DBG(3, "check whether there's still time for periodic Tx\n"); | 267 | dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n"); |
266 | frame = musb_readw(mbase, MUSB_FRAME); | 268 | frame = musb_readw(mbase, MUSB_FRAME); |
267 | /* FIXME this doesn't implement that scheduling policy ... | 269 | /* FIXME this doesn't implement that scheduling policy ... |
268 | * or handle framecounter wrapping | 270 | * or handle framecounter wrapping |
@@ -277,7 +279,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
277 | } else { | 279 | } else { |
278 | qh->frame = urb->start_frame; | 280 | qh->frame = urb->start_frame; |
279 | /* enable SOF interrupt so we can count down */ | 281 | /* enable SOF interrupt so we can count down */ |
280 | DBG(1, "SOF for %d\n", epnum); | 282 | dev_dbg(musb->controller, "SOF for %d\n", epnum); |
281 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ | 283 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ |
282 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); | 284 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); |
283 | #endif | 285 | #endif |
@@ -285,7 +287,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
285 | break; | 287 | break; |
286 | default: | 288 | default: |
287 | start: | 289 | start: |
288 | DBG(4, "Start TX%d %s\n", epnum, | 290 | dev_dbg(musb->controller, "Start TX%d %s\n", epnum, |
289 | hw_ep->tx_channel ? "dma" : "pio"); | 291 | hw_ep->tx_channel ? "dma" : "pio"); |
290 | 292 | ||
291 | if (!hw_ep->tx_channel) | 293 | if (!hw_ep->tx_channel) |
@@ -300,21 +302,7 @@ static void musb_giveback(struct musb *musb, struct urb *urb, int status) | |||
300 | __releases(musb->lock) | 302 | __releases(musb->lock) |
301 | __acquires(musb->lock) | 303 | __acquires(musb->lock) |
302 | { | 304 | { |
303 | DBG(({ int level; switch (status) { | 305 | dev_dbg(musb->controller, |
304 | case 0: | ||
305 | level = 4; | ||
306 | break; | ||
307 | /* common/boring faults */ | ||
308 | case -EREMOTEIO: | ||
309 | case -ESHUTDOWN: | ||
310 | case -ECONNRESET: | ||
311 | case -EPIPE: | ||
312 | level = 3; | ||
313 | break; | ||
314 | default: | ||
315 | level = 2; | ||
316 | break; | ||
317 | }; level; }), | ||
318 | "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", | 306 | "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", |
319 | urb, urb->complete, status, | 307 | urb, urb->complete, status, |
320 | usb_pipedevice(urb->pipe), | 308 | usb_pipedevice(urb->pipe), |
@@ -425,7 +413,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, | |||
425 | } | 413 | } |
426 | 414 | ||
427 | if (qh != NULL && qh->is_ready) { | 415 | if (qh != NULL && qh->is_ready) { |
428 | DBG(4, "... next ep%d %cX urb %p\n", | 416 | dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", |
429 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); | 417 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
430 | musb_start_urb(musb, is_in, qh); | 418 | musb_start_urb(musb, is_in, qh); |
431 | } | 419 | } |
@@ -470,7 +458,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | |||
470 | 458 | ||
471 | /* musb_ep_select(mbase, epnum); */ | 459 | /* musb_ep_select(mbase, epnum); */ |
472 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | 460 | rx_count = musb_readw(epio, MUSB_RXCOUNT); |
473 | DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, | 461 | dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, |
474 | urb->transfer_buffer, qh->offset, | 462 | urb->transfer_buffer, qh->offset, |
475 | urb->transfer_buffer_length); | 463 | urb->transfer_buffer_length); |
476 | 464 | ||
@@ -492,7 +480,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | |||
492 | status = -EOVERFLOW; | 480 | status = -EOVERFLOW; |
493 | urb->error_count++; | 481 | urb->error_count++; |
494 | } | 482 | } |
495 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | 483 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); |
496 | do_flush = 1; | 484 | do_flush = 1; |
497 | } else | 485 | } else |
498 | length = rx_count; | 486 | length = rx_count; |
@@ -510,7 +498,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | |||
510 | if (rx_count > length) { | 498 | if (rx_count > length) { |
511 | if (urb->status == -EINPROGRESS) | 499 | if (urb->status == -EINPROGRESS) |
512 | urb->status = -EOVERFLOW; | 500 | urb->status = -EOVERFLOW; |
513 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | 501 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); |
514 | do_flush = 1; | 502 | do_flush = 1; |
515 | } else | 503 | } else |
516 | length = rx_count; | 504 | length = rx_count; |
@@ -608,7 +596,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | |||
608 | /* Set RXMAXP with the FIFO size of the endpoint | 596 | /* Set RXMAXP with the FIFO size of the endpoint |
609 | * to disable double buffer mode. | 597 | * to disable double buffer mode. |
610 | */ | 598 | */ |
611 | if (musb->hwvers < MUSB_HWVERS_2000) | 599 | if (musb->double_buffer_not_ok) |
612 | musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); | 600 | musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); |
613 | else | 601 | else |
614 | musb_writew(ep->regs, MUSB_RXMAXP, | 602 | musb_writew(ep->regs, MUSB_RXMAXP, |
@@ -696,7 +684,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
696 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); | 684 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); |
697 | u16 packet_sz = qh->maxpacket; | 685 | u16 packet_sz = qh->maxpacket; |
698 | 686 | ||
699 | DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " | 687 | dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " |
700 | "h_addr%02x h_port%02x bytes %d\n", | 688 | "h_addr%02x h_port%02x bytes %d\n", |
701 | is_out ? "-->" : "<--", | 689 | is_out ? "-->" : "<--", |
702 | epnum, urb, urb->dev->speed, | 690 | epnum, urb, urb->dev->speed, |
@@ -783,14 +771,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
783 | /* protocol/endpoint/interval/NAKlimit */ | 771 | /* protocol/endpoint/interval/NAKlimit */ |
784 | if (epnum) { | 772 | if (epnum) { |
785 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); | 773 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); |
786 | if (can_bulk_split(musb, qh->type)) | 774 | if (musb->double_buffer_not_ok) |
787 | musb_writew(epio, MUSB_TXMAXP, | 775 | musb_writew(epio, MUSB_TXMAXP, |
788 | packet_sz | 776 | hw_ep->max_packet_sz_tx); |
789 | | ((hw_ep->max_packet_sz_tx / | ||
790 | packet_sz) - 1) << 11); | ||
791 | else | 777 | else |
792 | musb_writew(epio, MUSB_TXMAXP, | 778 | musb_writew(epio, MUSB_TXMAXP, |
793 | packet_sz); | 779 | qh->maxpacket | |
780 | ((qh->hb_mult - 1) << 11)); | ||
794 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); | 781 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); |
795 | } else { | 782 | } else { |
796 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); | 783 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); |
@@ -850,37 +837,32 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
850 | /* kick things off */ | 837 | /* kick things off */ |
851 | 838 | ||
852 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | 839 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { |
853 | /* candidate for DMA */ | 840 | /* Candidate for DMA */ |
854 | if (dma_channel) { | 841 | dma_channel->actual_len = 0L; |
855 | dma_channel->actual_len = 0L; | 842 | qh->segsize = len; |
856 | qh->segsize = len; | 843 | |
857 | 844 | /* AUTOREQ is in a DMA register */ | |
858 | /* AUTOREQ is in a DMA register */ | 845 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
859 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 846 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
860 | csr = musb_readw(hw_ep->regs, | 847 | |
861 | MUSB_RXCSR); | 848 | /* |
862 | 849 | * Unless caller treats short RX transfers as | |
863 | /* unless caller treats short rx transfers as | 850 | * errors, we dare not queue multiple transfers. |
864 | * errors, we dare not queue multiple transfers. | 851 | */ |
865 | */ | 852 | dma_ok = dma_controller->channel_program(dma_channel, |
866 | dma_ok = dma_controller->channel_program( | 853 | packet_sz, !(urb->transfer_flags & |
867 | dma_channel, packet_sz, | 854 | URB_SHORT_NOT_OK), |
868 | !(urb->transfer_flags | 855 | urb->transfer_dma + offset, |
869 | & URB_SHORT_NOT_OK), | 856 | qh->segsize); |
870 | urb->transfer_dma + offset, | 857 | if (!dma_ok) { |
871 | qh->segsize); | 858 | dma_controller->channel_release(dma_channel); |
872 | if (!dma_ok) { | 859 | hw_ep->rx_channel = dma_channel = NULL; |
873 | dma_controller->channel_release( | 860 | } else |
874 | dma_channel); | 861 | csr |= MUSB_RXCSR_DMAENAB; |
875 | hw_ep->rx_channel = NULL; | ||
876 | dma_channel = NULL; | ||
877 | } else | ||
878 | csr |= MUSB_RXCSR_DMAENAB; | ||
879 | } | ||
880 | } | 862 | } |
881 | 863 | ||
882 | csr |= MUSB_RXCSR_H_REQPKT; | 864 | csr |= MUSB_RXCSR_H_REQPKT; |
883 | DBG(7, "RXCSR%d := %04x\n", epnum, csr); | 865 | dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr); |
884 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 866 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
885 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | 867 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
886 | } | 868 | } |
@@ -923,15 +905,15 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) | |||
923 | request = (struct usb_ctrlrequest *) urb->setup_packet; | 905 | request = (struct usb_ctrlrequest *) urb->setup_packet; |
924 | 906 | ||
925 | if (!request->wLength) { | 907 | if (!request->wLength) { |
926 | DBG(4, "start no-DATA\n"); | 908 | dev_dbg(musb->controller, "start no-DATA\n"); |
927 | break; | 909 | break; |
928 | } else if (request->bRequestType & USB_DIR_IN) { | 910 | } else if (request->bRequestType & USB_DIR_IN) { |
929 | DBG(4, "start IN-DATA\n"); | 911 | dev_dbg(musb->controller, "start IN-DATA\n"); |
930 | musb->ep0_stage = MUSB_EP0_IN; | 912 | musb->ep0_stage = MUSB_EP0_IN; |
931 | more = true; | 913 | more = true; |
932 | break; | 914 | break; |
933 | } else { | 915 | } else { |
934 | DBG(4, "start OUT-DATA\n"); | 916 | dev_dbg(musb->controller, "start OUT-DATA\n"); |
935 | musb->ep0_stage = MUSB_EP0_OUT; | 917 | musb->ep0_stage = MUSB_EP0_OUT; |
936 | more = true; | 918 | more = true; |
937 | } | 919 | } |
@@ -943,7 +925,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) | |||
943 | if (fifo_count) { | 925 | if (fifo_count) { |
944 | fifo_dest = (u8 *) (urb->transfer_buffer | 926 | fifo_dest = (u8 *) (urb->transfer_buffer |
945 | + urb->actual_length); | 927 | + urb->actual_length); |
946 | DBG(3, "Sending %d byte%s to ep0 fifo %p\n", | 928 | dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n", |
947 | fifo_count, | 929 | fifo_count, |
948 | (fifo_count == 1) ? "" : "s", | 930 | (fifo_count == 1) ? "" : "s", |
949 | fifo_dest); | 931 | fifo_dest); |
@@ -988,7 +970,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
988 | ? musb_readb(epio, MUSB_COUNT0) | 970 | ? musb_readb(epio, MUSB_COUNT0) |
989 | : 0; | 971 | : 0; |
990 | 972 | ||
991 | DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", | 973 | dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", |
992 | csr, qh, len, urb, musb->ep0_stage); | 974 | csr, qh, len, urb, musb->ep0_stage); |
993 | 975 | ||
994 | /* if we just did status stage, we are done */ | 976 | /* if we just did status stage, we are done */ |
@@ -999,15 +981,15 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
999 | 981 | ||
1000 | /* prepare status */ | 982 | /* prepare status */ |
1001 | if (csr & MUSB_CSR0_H_RXSTALL) { | 983 | if (csr & MUSB_CSR0_H_RXSTALL) { |
1002 | DBG(6, "STALLING ENDPOINT\n"); | 984 | dev_dbg(musb->controller, "STALLING ENDPOINT\n"); |
1003 | status = -EPIPE; | 985 | status = -EPIPE; |
1004 | 986 | ||
1005 | } else if (csr & MUSB_CSR0_H_ERROR) { | 987 | } else if (csr & MUSB_CSR0_H_ERROR) { |
1006 | DBG(2, "no response, csr0 %04x\n", csr); | 988 | dev_dbg(musb->controller, "no response, csr0 %04x\n", csr); |
1007 | status = -EPROTO; | 989 | status = -EPROTO; |
1008 | 990 | ||
1009 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { | 991 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { |
1010 | DBG(2, "control NAK timeout\n"); | 992 | dev_dbg(musb->controller, "control NAK timeout\n"); |
1011 | 993 | ||
1012 | /* NOTE: this code path would be a good place to PAUSE a | 994 | /* NOTE: this code path would be a good place to PAUSE a |
1013 | * control transfer, if another one is queued, so that | 995 | * control transfer, if another one is queued, so that |
@@ -1022,7 +1004,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
1022 | } | 1004 | } |
1023 | 1005 | ||
1024 | if (status) { | 1006 | if (status) { |
1025 | DBG(6, "aborting\n"); | 1007 | dev_dbg(musb->controller, "aborting\n"); |
1026 | retval = IRQ_HANDLED; | 1008 | retval = IRQ_HANDLED; |
1027 | if (urb) | 1009 | if (urb) |
1028 | urb->status = status; | 1010 | urb->status = status; |
@@ -1072,7 +1054,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
1072 | /* flag status stage */ | 1054 | /* flag status stage */ |
1073 | musb->ep0_stage = MUSB_EP0_STATUS; | 1055 | musb->ep0_stage = MUSB_EP0_STATUS; |
1074 | 1056 | ||
1075 | DBG(5, "ep0 STATUS, csr %04x\n", csr); | 1057 | dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr); |
1076 | 1058 | ||
1077 | } | 1059 | } |
1078 | musb_writew(epio, MUSB_CSR0, csr); | 1060 | musb_writew(epio, MUSB_CSR0, csr); |
@@ -1119,37 +1101,38 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1119 | u32 status = 0; | 1101 | u32 status = 0; |
1120 | void __iomem *mbase = musb->mregs; | 1102 | void __iomem *mbase = musb->mregs; |
1121 | struct dma_channel *dma; | 1103 | struct dma_channel *dma; |
1104 | bool transfer_pending = false; | ||
1122 | 1105 | ||
1123 | musb_ep_select(mbase, epnum); | 1106 | musb_ep_select(mbase, epnum); |
1124 | tx_csr = musb_readw(epio, MUSB_TXCSR); | 1107 | tx_csr = musb_readw(epio, MUSB_TXCSR); |
1125 | 1108 | ||
1126 | /* with CPPI, DMA sometimes triggers "extra" irqs */ | 1109 | /* with CPPI, DMA sometimes triggers "extra" irqs */ |
1127 | if (!urb) { | 1110 | if (!urb) { |
1128 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 1111 | dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); |
1129 | return; | 1112 | return; |
1130 | } | 1113 | } |
1131 | 1114 | ||
1132 | pipe = urb->pipe; | 1115 | pipe = urb->pipe; |
1133 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; | 1116 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; |
1134 | DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, | 1117 | dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, |
1135 | dma ? ", dma" : ""); | 1118 | dma ? ", dma" : ""); |
1136 | 1119 | ||
1137 | /* check for errors */ | 1120 | /* check for errors */ |
1138 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { | 1121 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { |
1139 | /* dma was disabled, fifo flushed */ | 1122 | /* dma was disabled, fifo flushed */ |
1140 | DBG(3, "TX end %d stall\n", epnum); | 1123 | dev_dbg(musb->controller, "TX end %d stall\n", epnum); |
1141 | 1124 | ||
1142 | /* stall; record URB status */ | 1125 | /* stall; record URB status */ |
1143 | status = -EPIPE; | 1126 | status = -EPIPE; |
1144 | 1127 | ||
1145 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { | 1128 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { |
1146 | /* (NON-ISO) dma was disabled, fifo flushed */ | 1129 | /* (NON-ISO) dma was disabled, fifo flushed */ |
1147 | DBG(3, "TX 3strikes on ep=%d\n", epnum); | 1130 | dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); |
1148 | 1131 | ||
1149 | status = -ETIMEDOUT; | 1132 | status = -ETIMEDOUT; |
1150 | 1133 | ||
1151 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { | 1134 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { |
1152 | DBG(6, "TX end=%d device not responding\n", epnum); | 1135 | dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum); |
1153 | 1136 | ||
1154 | /* NOTE: this code path would be a good place to PAUSE a | 1137 | /* NOTE: this code path would be a good place to PAUSE a |
1155 | * transfer, if there's some other (nonperiodic) tx urb | 1138 | * transfer, if there's some other (nonperiodic) tx urb |
@@ -1194,7 +1177,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1194 | 1177 | ||
1195 | /* second cppi case */ | 1178 | /* second cppi case */ |
1196 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 1179 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
1197 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 1180 | dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); |
1198 | return; | 1181 | return; |
1199 | } | 1182 | } |
1200 | 1183 | ||
@@ -1253,7 +1236,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1253 | * FIFO mode too... | 1236 | * FIFO mode too... |
1254 | */ | 1237 | */ |
1255 | if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { | 1238 | if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { |
1256 | DBG(2, "DMA complete but packet still in FIFO, " | 1239 | dev_dbg(musb->controller, "DMA complete but packet still in FIFO, " |
1257 | "CSR %04x\n", tx_csr); | 1240 | "CSR %04x\n", tx_csr); |
1258 | return; | 1241 | return; |
1259 | } | 1242 | } |
@@ -1279,7 +1262,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1279 | offset = d->offset; | 1262 | offset = d->offset; |
1280 | length = d->length; | 1263 | length = d->length; |
1281 | } | 1264 | } |
1282 | } else if (dma) { | 1265 | } else if (dma && urb->transfer_buffer_length == qh->offset) { |
1283 | done = true; | 1266 | done = true; |
1284 | } else { | 1267 | } else { |
1285 | /* see if we need to send more data, or ZLP */ | 1268 | /* see if we need to send more data, or ZLP */ |
@@ -1292,6 +1275,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1292 | if (!done) { | 1275 | if (!done) { |
1293 | offset = qh->offset; | 1276 | offset = qh->offset; |
1294 | length = urb->transfer_buffer_length - offset; | 1277 | length = urb->transfer_buffer_length - offset; |
1278 | transfer_pending = true; | ||
1295 | } | 1279 | } |
1296 | } | 1280 | } |
1297 | } | 1281 | } |
@@ -1311,7 +1295,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1311 | urb->actual_length = qh->offset; | 1295 | urb->actual_length = qh->offset; |
1312 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | 1296 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); |
1313 | return; | 1297 | return; |
1314 | } else if (usb_pipeisoc(pipe) && dma) { | 1298 | } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { |
1315 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, | 1299 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, |
1316 | offset, length)) { | 1300 | offset, length)) { |
1317 | if (is_cppi_enabled() || tusb_dma_omap()) | 1301 | if (is_cppi_enabled() || tusb_dma_omap()) |
@@ -1319,7 +1303,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1319 | return; | 1303 | return; |
1320 | } | 1304 | } |
1321 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { | 1305 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { |
1322 | DBG(1, "not complete, but DMA enabled?\n"); | 1306 | dev_dbg(musb->controller, "not complete, but DMA enabled?\n"); |
1323 | return; | 1307 | return; |
1324 | } | 1308 | } |
1325 | 1309 | ||
@@ -1332,6 +1316,8 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1332 | */ | 1316 | */ |
1333 | if (length > qh->maxpacket) | 1317 | if (length > qh->maxpacket) |
1334 | length = qh->maxpacket; | 1318 | length = qh->maxpacket; |
1319 | /* Unmap the buffer so that CPU can use it */ | ||
1320 | usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); | ||
1335 | musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); | 1321 | musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); |
1336 | qh->segsize = length; | 1322 | qh->segsize = length; |
1337 | 1323 | ||
@@ -1458,7 +1444,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1458 | * usbtest #11 (unlinks) triggers it regularly, sometimes | 1444 | * usbtest #11 (unlinks) triggers it regularly, sometimes |
1459 | * with fifo full. (Only with DMA??) | 1445 | * with fifo full. (Only with DMA??) |
1460 | */ | 1446 | */ |
1461 | DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, | 1447 | dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, |
1462 | musb_readw(epio, MUSB_RXCOUNT)); | 1448 | musb_readw(epio, MUSB_RXCOUNT)); |
1463 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | 1449 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); |
1464 | return; | 1450 | return; |
@@ -1466,20 +1452,20 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1466 | 1452 | ||
1467 | pipe = urb->pipe; | 1453 | pipe = urb->pipe; |
1468 | 1454 | ||
1469 | DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", | 1455 | dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", |
1470 | epnum, rx_csr, urb->actual_length, | 1456 | epnum, rx_csr, urb->actual_length, |
1471 | dma ? dma->actual_len : 0); | 1457 | dma ? dma->actual_len : 0); |
1472 | 1458 | ||
1473 | /* check for errors, concurrent stall & unlink is not really | 1459 | /* check for errors, concurrent stall & unlink is not really |
1474 | * handled yet! */ | 1460 | * handled yet! */ |
1475 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { | 1461 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { |
1476 | DBG(3, "RX end %d STALL\n", epnum); | 1462 | dev_dbg(musb->controller, "RX end %d STALL\n", epnum); |
1477 | 1463 | ||
1478 | /* stall; record URB status */ | 1464 | /* stall; record URB status */ |
1479 | status = -EPIPE; | 1465 | status = -EPIPE; |
1480 | 1466 | ||
1481 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { | 1467 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { |
1482 | DBG(3, "end %d RX proto error\n", epnum); | 1468 | dev_dbg(musb->controller, "end %d RX proto error\n", epnum); |
1483 | 1469 | ||
1484 | status = -EPROTO; | 1470 | status = -EPROTO; |
1485 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | 1471 | musb_writeb(epio, MUSB_RXINTERVAL, 0); |
@@ -1487,7 +1473,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1487 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | 1473 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { |
1488 | 1474 | ||
1489 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | 1475 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { |
1490 | DBG(6, "RX end %d NAK timeout\n", epnum); | 1476 | dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum); |
1491 | 1477 | ||
1492 | /* NOTE: NAKing is *NOT* an error, so we want to | 1478 | /* NOTE: NAKing is *NOT* an error, so we want to |
1493 | * continue. Except ... if there's a request for | 1479 | * continue. Except ... if there's a request for |
@@ -1510,12 +1496,12 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1510 | 1496 | ||
1511 | goto finish; | 1497 | goto finish; |
1512 | } else { | 1498 | } else { |
1513 | DBG(4, "RX end %d ISO data error\n", epnum); | 1499 | dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum); |
1514 | /* packet error reported later */ | 1500 | /* packet error reported later */ |
1515 | iso_err = true; | 1501 | iso_err = true; |
1516 | } | 1502 | } |
1517 | } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { | 1503 | } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { |
1518 | DBG(3, "end %d high bandwidth incomplete ISO packet RX\n", | 1504 | dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n", |
1519 | epnum); | 1505 | epnum); |
1520 | status = -EPROTO; | 1506 | status = -EPROTO; |
1521 | } | 1507 | } |
@@ -1561,7 +1547,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1561 | done = true; | 1547 | done = true; |
1562 | } | 1548 | } |
1563 | 1549 | ||
1564 | DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, | 1550 | dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, |
1565 | xfer_len, dma ? ", dma" : ""); | 1551 | xfer_len, dma ? ", dma" : ""); |
1566 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; | 1552 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; |
1567 | 1553 | ||
@@ -1589,7 +1575,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1589 | /* even if there was an error, we did the dma | 1575 | /* even if there was an error, we did the dma |
1590 | * for iso_frame_desc->length | 1576 | * for iso_frame_desc->length |
1591 | */ | 1577 | */ |
1592 | if (d->status != EILSEQ && d->status != -EOVERFLOW) | 1578 | if (d->status != -EILSEQ && d->status != -EOVERFLOW) |
1593 | d->status = 0; | 1579 | d->status = 0; |
1594 | 1580 | ||
1595 | if (++qh->iso_idx >= urb->number_of_packets) | 1581 | if (++qh->iso_idx >= urb->number_of_packets) |
@@ -1611,7 +1597,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1611 | MUSB_RXCSR_H_WZC_BITS | val); | 1597 | MUSB_RXCSR_H_WZC_BITS | val); |
1612 | } | 1598 | } |
1613 | 1599 | ||
1614 | DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, | 1600 | dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, |
1615 | done ? "off" : "reset", | 1601 | done ? "off" : "reset", |
1616 | musb_readw(epio, MUSB_RXCSR), | 1602 | musb_readw(epio, MUSB_RXCSR), |
1617 | musb_readw(epio, MUSB_RXCOUNT)); | 1603 | musb_readw(epio, MUSB_RXCOUNT)); |
@@ -1644,7 +1630,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1644 | 1630 | ||
1645 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | 1631 | rx_count = musb_readw(epio, MUSB_RXCOUNT); |
1646 | 1632 | ||
1647 | DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", | 1633 | dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n", |
1648 | epnum, rx_count, | 1634 | epnum, rx_count, |
1649 | urb->transfer_dma | 1635 | urb->transfer_dma |
1650 | + urb->actual_length, | 1636 | + urb->actual_length, |
@@ -1668,7 +1654,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1668 | d_status = -EOVERFLOW; | 1654 | d_status = -EOVERFLOW; |
1669 | urb->error_count++; | 1655 | urb->error_count++; |
1670 | } | 1656 | } |
1671 | DBG(2, "** OVERFLOW %d into %d\n",\ | 1657 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\ |
1672 | rx_count, d->length); | 1658 | rx_count, d->length); |
1673 | 1659 | ||
1674 | length = d->length; | 1660 | length = d->length; |
@@ -1752,9 +1738,11 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1752 | #endif /* Mentor DMA */ | 1738 | #endif /* Mentor DMA */ |
1753 | 1739 | ||
1754 | if (!dma) { | 1740 | if (!dma) { |
1741 | /* Unmap the buffer so that CPU can use it */ | ||
1742 | usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); | ||
1755 | done = musb_host_packet_rx(musb, urb, | 1743 | done = musb_host_packet_rx(musb, urb, |
1756 | epnum, iso_err); | 1744 | epnum, iso_err); |
1757 | DBG(6, "read %spacket\n", done ? "last " : ""); | 1745 | dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); |
1758 | } | 1746 | } |
1759 | } | 1747 | } |
1760 | 1748 | ||
@@ -1875,7 +1863,7 @@ static int musb_schedule( | |||
1875 | idle = 1; | 1863 | idle = 1; |
1876 | qh->mux = 0; | 1864 | qh->mux = 0; |
1877 | hw_ep = musb->endpoints + best_end; | 1865 | hw_ep = musb->endpoints + best_end; |
1878 | DBG(4, "qh %p periodic slot %d\n", qh, best_end); | 1866 | dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end); |
1879 | success: | 1867 | success: |
1880 | if (head) { | 1868 | if (head) { |
1881 | idle = list_empty(head); | 1869 | idle = list_empty(head); |
@@ -2081,6 +2069,7 @@ done: | |||
2081 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) | 2069 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) |
2082 | { | 2070 | { |
2083 | struct musb_hw_ep *ep = qh->hw_ep; | 2071 | struct musb_hw_ep *ep = qh->hw_ep; |
2072 | struct musb *musb = ep->musb; | ||
2084 | void __iomem *epio = ep->regs; | 2073 | void __iomem *epio = ep->regs; |
2085 | unsigned hw_end = ep->epnum; | 2074 | unsigned hw_end = ep->epnum; |
2086 | void __iomem *regs = ep->musb->mregs; | 2075 | void __iomem *regs = ep->musb->mregs; |
@@ -2096,7 +2085,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) | |||
2096 | dma = is_in ? ep->rx_channel : ep->tx_channel; | 2085 | dma = is_in ? ep->rx_channel : ep->tx_channel; |
2097 | if (dma) { | 2086 | if (dma) { |
2098 | status = ep->musb->dma_controller->channel_abort(dma); | 2087 | status = ep->musb->dma_controller->channel_abort(dma); |
2099 | DBG(status ? 1 : 3, | 2088 | dev_dbg(musb->controller, |
2100 | "abort %cX%d DMA for urb %p --> %d\n", | 2089 | "abort %cX%d DMA for urb %p --> %d\n", |
2101 | is_in ? 'R' : 'T', ep->epnum, | 2090 | is_in ? 'R' : 'T', ep->epnum, |
2102 | urb, status); | 2091 | urb, status); |
@@ -2143,7 +2132,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
2143 | int is_in = usb_pipein(urb->pipe); | 2132 | int is_in = usb_pipein(urb->pipe); |
2144 | int ret; | 2133 | int ret; |
2145 | 2134 | ||
2146 | DBG(4, "urb=%p, dev%d ep%d%s\n", urb, | 2135 | dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, |
2147 | usb_pipedevice(urb->pipe), | 2136 | usb_pipedevice(urb->pipe), |
2148 | usb_pipeendpoint(urb->pipe), | 2137 | usb_pipeendpoint(urb->pipe), |
2149 | is_in ? "in" : "out"); | 2138 | is_in ? "in" : "out"); |
@@ -2298,7 +2287,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd) | |||
2298 | 2287 | ||
2299 | if (musb->is_active) { | 2288 | if (musb->is_active) { |
2300 | WARNING("trying to suspend as %s while active\n", | 2289 | WARNING("trying to suspend as %s while active\n", |
2301 | otg_state_string(musb)); | 2290 | otg_state_string(musb->xceiv->state)); |
2302 | return -EBUSY; | 2291 | return -EBUSY; |
2303 | } else | 2292 | } else |
2304 | return 0; | 2293 | return 0; |
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h index b06e9ef00cfc..03c6ccdbb3be 100644 --- a/drivers/usb/musb/musb_io.h +++ b/drivers/usb/musb/musb_io.h | |||
@@ -74,7 +74,7 @@ static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data) | |||
74 | { __raw_writel(data, addr + offset); } | 74 | { __raw_writel(data, addr + offset); } |
75 | 75 | ||
76 | 76 | ||
77 | #ifdef CONFIG_USB_TUSB6010 | 77 | #ifdef CONFIG_USB_MUSB_TUSB6010 |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum. | 80 | * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum. |
@@ -114,7 +114,7 @@ static inline u8 musb_readb(const void __iomem *addr, unsigned offset) | |||
114 | static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) | 114 | static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) |
115 | { __raw_writeb(data, addr + offset); } | 115 | { __raw_writeb(data, addr + offset); } |
116 | 116 | ||
117 | #endif /* CONFIG_USB_TUSB6010 */ | 117 | #endif /* CONFIG_USB_MUSB_TUSB6010 */ |
118 | 118 | ||
119 | #else | 119 | #else |
120 | 120 | ||
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 244267527a60..82410703dcd3 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h | |||
@@ -234,7 +234,7 @@ | |||
234 | #define MUSB_TESTMODE 0x0F /* 8 bit */ | 234 | #define MUSB_TESTMODE 0x0F /* 8 bit */ |
235 | 235 | ||
236 | /* Get offset for a given FIFO from musb->mregs */ | 236 | /* Get offset for a given FIFO from musb->mregs */ |
237 | #ifdef CONFIG_USB_TUSB6010 | 237 | #ifdef CONFIG_USB_MUSB_TUSB6010 |
238 | #define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) | 238 | #define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) |
239 | #else | 239 | #else |
240 | #define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) | 240 | #define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) |
@@ -295,7 +295,7 @@ | |||
295 | #define MUSB_FLAT_OFFSET(_epnum, _offset) \ | 295 | #define MUSB_FLAT_OFFSET(_epnum, _offset) \ |
296 | (0x100 + (0x10*(_epnum)) + (_offset)) | 296 | (0x100 + (0x10*(_epnum)) + (_offset)) |
297 | 297 | ||
298 | #ifdef CONFIG_USB_TUSB6010 | 298 | #ifdef CONFIG_USB_MUSB_TUSB6010 |
299 | /* TUSB6010 EP0 configuration register is special */ | 299 | /* TUSB6010 EP0 configuration register is special */ |
300 | #define MUSB_TUSB_OFFSET(_epnum, _offset) \ | 300 | #define MUSB_TUSB_OFFSET(_epnum, _offset) \ |
301 | (0x10 + _offset) | 301 | (0x10 + _offset) |
@@ -633,8 +633,9 @@ static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) | |||
633 | return 0; | 633 | return 0; |
634 | } | 634 | } |
635 | 635 | ||
636 | static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) | 636 | static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum) |
637 | { | 637 | { |
638 | return 0; | ||
638 | } | 639 | } |
639 | 640 | ||
640 | #endif /* CONFIG_BLACKFIN */ | 641 | #endif /* CONFIG_BLACKFIN */ |
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 43233c397b6e..2d80a5758838 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c | |||
@@ -74,7 +74,7 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend) | |||
74 | break; | 74 | break; |
75 | } | 75 | } |
76 | 76 | ||
77 | DBG(3, "Root port suspended, power %02x\n", power); | 77 | dev_dbg(musb->controller, "Root port suspended, power %02x\n", power); |
78 | 78 | ||
79 | musb->port1_status |= USB_PORT_STAT_SUSPEND; | 79 | musb->port1_status |= USB_PORT_STAT_SUSPEND; |
80 | switch (musb->xceiv->state) { | 80 | switch (musb->xceiv->state) { |
@@ -97,15 +97,15 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend) | |||
97 | break; | 97 | break; |
98 | #endif | 98 | #endif |
99 | default: | 99 | default: |
100 | DBG(1, "bogus rh suspend? %s\n", | 100 | dev_dbg(musb->controller, "bogus rh suspend? %s\n", |
101 | otg_state_string(musb)); | 101 | otg_state_string(musb->xceiv->state)); |
102 | } | 102 | } |
103 | } else if (power & MUSB_POWER_SUSPENDM) { | 103 | } else if (power & MUSB_POWER_SUSPENDM) { |
104 | power &= ~MUSB_POWER_SUSPENDM; | 104 | power &= ~MUSB_POWER_SUSPENDM; |
105 | power |= MUSB_POWER_RESUME; | 105 | power |= MUSB_POWER_RESUME; |
106 | musb_writeb(mbase, MUSB_POWER, power); | 106 | musb_writeb(mbase, MUSB_POWER, power); |
107 | 107 | ||
108 | DBG(3, "Root port resuming, power %02x\n", power); | 108 | dev_dbg(musb->controller, "Root port resuming, power %02x\n", power); |
109 | 109 | ||
110 | /* later, GetPortStatus will stop RESUME signaling */ | 110 | /* later, GetPortStatus will stop RESUME signaling */ |
111 | musb->port1_status |= MUSB_PORT_STAT_RESUME; | 111 | musb->port1_status |= MUSB_PORT_STAT_RESUME; |
@@ -120,7 +120,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset) | |||
120 | 120 | ||
121 | #ifdef CONFIG_USB_MUSB_OTG | 121 | #ifdef CONFIG_USB_MUSB_OTG |
122 | if (musb->xceiv->state == OTG_STATE_B_IDLE) { | 122 | if (musb->xceiv->state == OTG_STATE_B_IDLE) { |
123 | DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n"); | 123 | dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n"); |
124 | musb->port1_status &= ~USB_PORT_STAT_RESET; | 124 | musb->port1_status &= ~USB_PORT_STAT_RESET; |
125 | return; | 125 | return; |
126 | } | 126 | } |
@@ -159,7 +159,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset) | |||
159 | musb->port1_status &= ~USB_PORT_STAT_ENABLE; | 159 | musb->port1_status &= ~USB_PORT_STAT_ENABLE; |
160 | musb->rh_timer = jiffies + msecs_to_jiffies(50); | 160 | musb->rh_timer = jiffies + msecs_to_jiffies(50); |
161 | } else { | 161 | } else { |
162 | DBG(4, "root port reset stopped\n"); | 162 | dev_dbg(musb->controller, "root port reset stopped\n"); |
163 | musb_writeb(mbase, MUSB_POWER, | 163 | musb_writeb(mbase, MUSB_POWER, |
164 | power & ~MUSB_POWER_RESET); | 164 | power & ~MUSB_POWER_RESET); |
165 | 165 | ||
@@ -167,7 +167,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset) | |||
167 | 167 | ||
168 | power = musb_readb(mbase, MUSB_POWER); | 168 | power = musb_readb(mbase, MUSB_POWER); |
169 | if (power & MUSB_POWER_HSMODE) { | 169 | if (power & MUSB_POWER_HSMODE) { |
170 | DBG(4, "high-speed device connected\n"); | 170 | dev_dbg(musb->controller, "high-speed device connected\n"); |
171 | musb->port1_status |= USB_PORT_STAT_HIGH_SPEED; | 171 | musb->port1_status |= USB_PORT_STAT_HIGH_SPEED; |
172 | } | 172 | } |
173 | 173 | ||
@@ -208,7 +208,8 @@ void musb_root_disconnect(struct musb *musb) | |||
208 | musb->xceiv->state = OTG_STATE_B_IDLE; | 208 | musb->xceiv->state = OTG_STATE_B_IDLE; |
209 | break; | 209 | break; |
210 | default: | 210 | default: |
211 | DBG(1, "host disconnect (%s)\n", otg_state_string(musb)); | 211 | dev_dbg(musb->controller, "host disconnect (%s)\n", |
212 | otg_state_string(musb->xceiv->state)); | ||
212 | } | 213 | } |
213 | } | 214 | } |
214 | 215 | ||
@@ -276,7 +277,7 @@ int musb_hub_control( | |||
276 | break; | 277 | break; |
277 | case USB_PORT_FEAT_POWER: | 278 | case USB_PORT_FEAT_POWER: |
278 | if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) | 279 | if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) |
279 | musb_set_vbus(musb, 0); | 280 | musb_platform_set_vbus(musb, 0); |
280 | break; | 281 | break; |
281 | case USB_PORT_FEAT_C_CONNECTION: | 282 | case USB_PORT_FEAT_C_CONNECTION: |
282 | case USB_PORT_FEAT_C_ENABLE: | 283 | case USB_PORT_FEAT_C_ENABLE: |
@@ -287,7 +288,7 @@ int musb_hub_control( | |||
287 | default: | 288 | default: |
288 | goto error; | 289 | goto error; |
289 | } | 290 | } |
290 | DBG(5, "clear feature %d\n", wValue); | 291 | dev_dbg(musb->controller, "clear feature %d\n", wValue); |
291 | musb->port1_status &= ~(1 << wValue); | 292 | musb->port1_status &= ~(1 << wValue); |
292 | break; | 293 | break; |
293 | case GetHubDescriptor: | 294 | case GetHubDescriptor: |
@@ -305,8 +306,8 @@ int musb_hub_control( | |||
305 | desc->bHubContrCurrent = 0; | 306 | desc->bHubContrCurrent = 0; |
306 | 307 | ||
307 | /* workaround bogus struct definition */ | 308 | /* workaround bogus struct definition */ |
308 | desc->DeviceRemovable[0] = 0x02; /* port 1 */ | 309 | desc->u.hs.DeviceRemovable[0] = 0x02; /* port 1 */ |
309 | desc->DeviceRemovable[1] = 0xff; | 310 | desc->u.hs.DeviceRemovable[1] = 0xff; |
310 | } | 311 | } |
311 | break; | 312 | break; |
312 | case GetHubStatus: | 313 | case GetHubStatus: |
@@ -329,7 +330,7 @@ int musb_hub_control( | |||
329 | 330 | ||
330 | power = musb_readb(musb->mregs, MUSB_POWER); | 331 | power = musb_readb(musb->mregs, MUSB_POWER); |
331 | power &= ~MUSB_POWER_RESUME; | 332 | power &= ~MUSB_POWER_RESUME; |
332 | DBG(4, "root port resume stopped, power %02x\n", | 333 | dev_dbg(musb->controller, "root port resume stopped, power %02x\n", |
333 | power); | 334 | power); |
334 | musb_writeb(musb->mregs, MUSB_POWER, power); | 335 | musb_writeb(musb->mregs, MUSB_POWER, power); |
335 | 336 | ||
@@ -352,7 +353,7 @@ int musb_hub_control( | |||
352 | (__le32 *) buf); | 353 | (__le32 *) buf); |
353 | 354 | ||
354 | /* port change status is more interesting */ | 355 | /* port change status is more interesting */ |
355 | DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n", | 356 | dev_dbg(musb->controller, "port status %08x\n", |
356 | musb->port1_status); | 357 | musb->port1_status); |
357 | break; | 358 | break; |
358 | case SetPortFeature: | 359 | case SetPortFeature: |
@@ -423,7 +424,7 @@ int musb_hub_control( | |||
423 | default: | 424 | default: |
424 | goto error; | 425 | goto error; |
425 | } | 426 | } |
426 | DBG(5, "set feature %d\n", wValue); | 427 | dev_dbg(musb->controller, "set feature %d\n", wValue); |
427 | musb->port1_status |= 1 << wValue; | 428 | musb->port1_status |= 1 << wValue; |
428 | break; | 429 | break; |
429 | 430 | ||
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 6dc107f25245..f70c5a577736 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
@@ -91,7 +91,7 @@ static struct dma_channel *dma_channel_allocate(struct dma_controller *c, | |||
91 | channel = &(musb_channel->channel); | 91 | channel = &(musb_channel->channel); |
92 | channel->private_data = musb_channel; | 92 | channel->private_data = musb_channel; |
93 | channel->status = MUSB_DMA_STATUS_FREE; | 93 | channel->status = MUSB_DMA_STATUS_FREE; |
94 | channel->max_len = 0x10000; | 94 | channel->max_len = 0x100000; |
95 | /* Tx => mode 1; Rx => mode 0 */ | 95 | /* Tx => mode 1; Rx => mode 0 */ |
96 | channel->desired_mode = transmit; | 96 | channel->desired_mode = transmit; |
97 | channel->actual_len = 0; | 97 | channel->actual_len = 0; |
@@ -122,11 +122,12 @@ static void configure_channel(struct dma_channel *channel, | |||
122 | { | 122 | { |
123 | struct musb_dma_channel *musb_channel = channel->private_data; | 123 | struct musb_dma_channel *musb_channel = channel->private_data; |
124 | struct musb_dma_controller *controller = musb_channel->controller; | 124 | struct musb_dma_controller *controller = musb_channel->controller; |
125 | struct musb *musb = controller->private_data; | ||
125 | void __iomem *mbase = controller->base; | 126 | void __iomem *mbase = controller->base; |
126 | u8 bchannel = musb_channel->idx; | 127 | u8 bchannel = musb_channel->idx; |
127 | u16 csr = 0; | 128 | u16 csr = 0; |
128 | 129 | ||
129 | DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", | 130 | dev_dbg(musb->controller, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", |
130 | channel, packet_sz, dma_addr, len, mode); | 131 | channel, packet_sz, dma_addr, len, mode); |
131 | 132 | ||
132 | if (mode) { | 133 | if (mode) { |
@@ -158,8 +159,10 @@ static int dma_channel_program(struct dma_channel *channel, | |||
158 | dma_addr_t dma_addr, u32 len) | 159 | dma_addr_t dma_addr, u32 len) |
159 | { | 160 | { |
160 | struct musb_dma_channel *musb_channel = channel->private_data; | 161 | struct musb_dma_channel *musb_channel = channel->private_data; |
162 | struct musb_dma_controller *controller = musb_channel->controller; | ||
163 | struct musb *musb = controller->private_data; | ||
161 | 164 | ||
162 | DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", | 165 | dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", |
163 | musb_channel->epnum, | 166 | musb_channel->epnum, |
164 | musb_channel->transmit ? "Tx" : "Rx", | 167 | musb_channel->transmit ? "Tx" : "Rx", |
165 | packet_sz, dma_addr, len, mode); | 168 | packet_sz, dma_addr, len, mode); |
@@ -167,6 +170,26 @@ static int dma_channel_program(struct dma_channel *channel, | |||
167 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || | 170 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || |
168 | channel->status == MUSB_DMA_STATUS_BUSY); | 171 | channel->status == MUSB_DMA_STATUS_BUSY); |
169 | 172 | ||
173 | /* Let targets check/tweak the arguments */ | ||
174 | if (musb->ops->adjust_channel_params) { | ||
175 | int ret = musb->ops->adjust_channel_params(channel, | ||
176 | packet_sz, &mode, &dma_addr, &len); | ||
177 | if (ret) | ||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * The DMA engine in RTL1.8 and above cannot handle | ||
183 | * DMA addresses that are not aligned to a 4 byte boundary. | ||
184 | * It ends up masking the last two bits of the address | ||
185 | * programmed in DMA_ADDR. | ||
186 | * | ||
187 | * Fail such DMA transfers, so that the backup PIO mode | ||
188 | * can carry out the transfer | ||
189 | */ | ||
190 | if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4)) | ||
191 | return false; | ||
192 | |||
170 | channel->actual_len = 0; | 193 | channel->actual_len = 0; |
171 | musb_channel->start_addr = dma_addr; | 194 | musb_channel->start_addr = dma_addr; |
172 | musb_channel->len = len; | 195 | musb_channel->len = len; |
@@ -252,7 +275,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
252 | #endif | 275 | #endif |
253 | 276 | ||
254 | if (!int_hsdma) { | 277 | if (!int_hsdma) { |
255 | DBG(2, "spurious DMA irq\n"); | 278 | dev_dbg(musb->controller, "spurious DMA irq\n"); |
256 | 279 | ||
257 | for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { | 280 | for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { |
258 | musb_channel = (struct musb_dma_channel *) | 281 | musb_channel = (struct musb_dma_channel *) |
@@ -266,7 +289,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
266 | } | 289 | } |
267 | } | 290 | } |
268 | 291 | ||
269 | DBG(2, "int_hsdma = 0x%x\n", int_hsdma); | 292 | dev_dbg(musb->controller, "int_hsdma = 0x%x\n", int_hsdma); |
270 | 293 | ||
271 | if (!int_hsdma) | 294 | if (!int_hsdma) |
272 | goto done; | 295 | goto done; |
@@ -293,7 +316,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
293 | channel->actual_len = addr | 316 | channel->actual_len = addr |
294 | - musb_channel->start_addr; | 317 | - musb_channel->start_addr; |
295 | 318 | ||
296 | DBG(2, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n", | 319 | dev_dbg(musb->controller, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n", |
297 | channel, musb_channel->start_addr, | 320 | channel, musb_channel->start_addr, |
298 | addr, channel->actual_len, | 321 | addr, channel->actual_len, |
299 | musb_channel->len, | 322 | musb_channel->len, |
@@ -363,7 +386,7 @@ dma_controller_create(struct musb *musb, void __iomem *base) | |||
363 | struct musb_dma_controller *controller; | 386 | struct musb_dma_controller *controller; |
364 | struct device *dev = musb->controller; | 387 | struct device *dev = musb->controller; |
365 | struct platform_device *pdev = to_platform_device(dev); | 388 | struct platform_device *pdev = to_platform_device(dev); |
366 | int irq = platform_get_irq(pdev, 1); | 389 | int irq = platform_get_irq_byname(pdev, "dma"); |
367 | 390 | ||
368 | if (irq == 0) { | 391 | if (irq == 0) { |
369 | dev_err(dev, "No DMA interrupt line!\n"); | 392 | dev_err(dev, "No DMA interrupt line!\n"); |
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h index f763d62f151c..320fd4afb93f 100644 --- a/drivers/usb/musb/musbhsdma.h +++ b/drivers/usb/musb/musbhsdma.h | |||
@@ -31,7 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | 34 | #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) |
35 | #include "omap2430.h" | 35 | #include "omap2430.h" |
36 | #endif | 36 | #endif |
37 | 37 | ||
@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase, | |||
94 | { | 94 | { |
95 | musb_writew(mbase, | 95 | musb_writew(mbase, |
96 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW), | 96 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW), |
97 | ((u16)((u32) dma_addr & 0xFFFF))); | 97 | dma_addr); |
98 | musb_writew(mbase, | 98 | musb_writew(mbase, |
99 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH), | 99 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH), |
100 | ((u16)(((u32) dma_addr >> 16) & 0xFFFF))); | 100 | (dma_addr >> 16)); |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) | 103 | static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) |
104 | { | 104 | { |
105 | return musb_readl(mbase, | 105 | u32 count = musb_readw(mbase, |
106 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); | 106 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); |
107 | |||
108 | count = count << 16; | ||
109 | |||
110 | count |= musb_readw(mbase, | ||
111 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW)); | ||
112 | |||
113 | return count; | ||
107 | } | 114 | } |
108 | 115 | ||
109 | static inline void musb_write_hsdma_count(void __iomem *mbase, | 116 | static inline void musb_write_hsdma_count(void __iomem *mbase, |
110 | u8 bchannel, u32 len) | 117 | u8 bchannel, u32 len) |
111 | { | 118 | { |
112 | musb_writel(mbase, | 119 | musb_writew(mbase, |
120 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len); | ||
121 | musb_writew(mbase, | ||
113 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH), | 122 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH), |
114 | len); | 123 | (len >> 16)); |
115 | } | 124 | } |
116 | 125 | ||
117 | #endif /* CONFIG_BLACKFIN */ | 126 | #endif /* CONFIG_BLACKFIN */ |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 2111a241dd03..c5d4c44d0ffa 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
@@ -31,10 +31,19 @@ | |||
31 | #include <linux/list.h> | 31 | #include <linux/list.h> |
32 | #include <linux/clk.h> | 32 | #include <linux/clk.h> |
33 | #include <linux/io.h> | 33 | #include <linux/io.h> |
34 | #include <linux/platform_device.h> | ||
35 | #include <linux/dma-mapping.h> | ||
36 | #include <linux/pm_runtime.h> | ||
37 | #include <linux/err.h> | ||
34 | 38 | ||
35 | #include "musb_core.h" | 39 | #include "musb_core.h" |
36 | #include "omap2430.h" | 40 | #include "omap2430.h" |
37 | 41 | ||
42 | struct omap2430_glue { | ||
43 | struct device *dev; | ||
44 | struct platform_device *musb; | ||
45 | }; | ||
46 | #define glue_to_musb(g) platform_get_drvdata(g->musb) | ||
38 | 47 | ||
39 | static struct timer_list musb_idle_timer; | 48 | static struct timer_list musb_idle_timer; |
40 | 49 | ||
@@ -49,12 +58,8 @@ static void musb_do_idle(unsigned long _musb) | |||
49 | 58 | ||
50 | spin_lock_irqsave(&musb->lock, flags); | 59 | spin_lock_irqsave(&musb->lock, flags); |
51 | 60 | ||
52 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
53 | |||
54 | switch (musb->xceiv->state) { | 61 | switch (musb->xceiv->state) { |
55 | case OTG_STATE_A_WAIT_BCON: | 62 | case OTG_STATE_A_WAIT_BCON: |
56 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
57 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
58 | 63 | ||
59 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | 64 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
60 | if (devctl & MUSB_DEVCTL_BDEVICE) { | 65 | if (devctl & MUSB_DEVCTL_BDEVICE) { |
@@ -71,7 +76,7 @@ static void musb_do_idle(unsigned long _musb) | |||
71 | if (musb->port1_status & MUSB_PORT_STAT_RESUME) { | 76 | if (musb->port1_status & MUSB_PORT_STAT_RESUME) { |
72 | power = musb_readb(musb->mregs, MUSB_POWER); | 77 | power = musb_readb(musb->mregs, MUSB_POWER); |
73 | power &= ~MUSB_POWER_RESUME; | 78 | power &= ~MUSB_POWER_RESUME; |
74 | DBG(1, "root port resume stopped, power %02x\n", power); | 79 | dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power); |
75 | musb_writeb(musb->mregs, MUSB_POWER, power); | 80 | musb_writeb(musb->mregs, MUSB_POWER, power); |
76 | musb->is_active = 1; | 81 | musb->is_active = 1; |
77 | musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | 82 | musb->port1_status &= ~(USB_PORT_STAT_SUSPEND |
@@ -98,7 +103,7 @@ static void musb_do_idle(unsigned long _musb) | |||
98 | } | 103 | } |
99 | 104 | ||
100 | 105 | ||
101 | void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | 106 | static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout) |
102 | { | 107 | { |
103 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); | 108 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); |
104 | static unsigned long last_timer; | 109 | static unsigned long last_timer; |
@@ -109,7 +114,8 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | |||
109 | /* Never idle if active, or when VBUS timeout is not set as host */ | 114 | /* Never idle if active, or when VBUS timeout is not set as host */ |
110 | if (musb->is_active || ((musb->a_wait_bcon == 0) | 115 | if (musb->is_active || ((musb->a_wait_bcon == 0) |
111 | && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { | 116 | && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { |
112 | DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); | 117 | dev_dbg(musb->controller, "%s active, deleting timer\n", |
118 | otg_state_string(musb->xceiv->state)); | ||
113 | del_timer(&musb_idle_timer); | 119 | del_timer(&musb_idle_timer); |
114 | last_timer = jiffies; | 120 | last_timer = jiffies; |
115 | return; | 121 | return; |
@@ -119,27 +125,23 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | |||
119 | if (!timer_pending(&musb_idle_timer)) | 125 | if (!timer_pending(&musb_idle_timer)) |
120 | last_timer = timeout; | 126 | last_timer = timeout; |
121 | else { | 127 | else { |
122 | DBG(4, "Longer idle timer already pending, ignoring\n"); | 128 | dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); |
123 | return; | 129 | return; |
124 | } | 130 | } |
125 | } | 131 | } |
126 | last_timer = timeout; | 132 | last_timer = timeout; |
127 | 133 | ||
128 | DBG(4, "%s inactive, for idle timer for %lu ms\n", | 134 | dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", |
129 | otg_state_string(musb), | 135 | otg_state_string(musb->xceiv->state), |
130 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); | 136 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); |
131 | mod_timer(&musb_idle_timer, timeout); | 137 | mod_timer(&musb_idle_timer, timeout); |
132 | } | 138 | } |
133 | 139 | ||
134 | void musb_platform_enable(struct musb *musb) | 140 | static void omap2430_musb_set_vbus(struct musb *musb, int is_on) |
135 | { | ||
136 | } | ||
137 | void musb_platform_disable(struct musb *musb) | ||
138 | { | ||
139 | } | ||
140 | static void omap_set_vbus(struct musb *musb, int is_on) | ||
141 | { | 141 | { |
142 | u8 devctl; | 142 | u8 devctl; |
143 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | ||
144 | int ret = 1; | ||
143 | /* HDRC controls CPEN, but beware current surges during device | 145 | /* HDRC controls CPEN, but beware current surges during device |
144 | * connect. They can trigger transient overcurrent conditions | 146 | * connect. They can trigger transient overcurrent conditions |
145 | * that must be ignored. | 147 | * that must be ignored. |
@@ -148,12 +150,35 @@ static void omap_set_vbus(struct musb *musb, int is_on) | |||
148 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | 150 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
149 | 151 | ||
150 | if (is_on) { | 152 | if (is_on) { |
151 | musb->is_active = 1; | 153 | if (musb->xceiv->state == OTG_STATE_A_IDLE) { |
152 | musb->xceiv->default_a = 1; | 154 | /* start the session */ |
153 | musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; | 155 | devctl |= MUSB_DEVCTL_SESSION; |
154 | devctl |= MUSB_DEVCTL_SESSION; | 156 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); |
155 | 157 | /* | |
156 | MUSB_HST_MODE(musb); | 158 | * Wait for the musb to set as A device to enable the |
159 | * VBUS | ||
160 | */ | ||
161 | while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) { | ||
162 | |||
163 | cpu_relax(); | ||
164 | |||
165 | if (time_after(jiffies, timeout)) { | ||
166 | dev_err(musb->controller, | ||
167 | "configured as A device timeout"); | ||
168 | ret = -EINVAL; | ||
169 | break; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | if (ret && musb->xceiv->set_vbus) | ||
174 | otg_set_vbus(musb->xceiv, 1); | ||
175 | } else { | ||
176 | musb->is_active = 1; | ||
177 | musb->xceiv->default_a = 1; | ||
178 | musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; | ||
179 | devctl |= MUSB_DEVCTL_SESSION; | ||
180 | MUSB_HST_MODE(musb); | ||
181 | } | ||
157 | } else { | 182 | } else { |
158 | musb->is_active = 0; | 183 | musb->is_active = 0; |
159 | 184 | ||
@@ -169,15 +194,13 @@ static void omap_set_vbus(struct musb *musb, int is_on) | |||
169 | } | 194 | } |
170 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | 195 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); |
171 | 196 | ||
172 | DBG(1, "VBUS %s, devctl %02x " | 197 | dev_dbg(musb->controller, "VBUS %s, devctl %02x " |
173 | /* otg %3x conf %08x prcm %08x */ "\n", | 198 | /* otg %3x conf %08x prcm %08x */ "\n", |
174 | otg_state_string(musb), | 199 | otg_state_string(musb->xceiv->state), |
175 | musb_readb(musb->mregs, MUSB_DEVCTL)); | 200 | musb_readb(musb->mregs, MUSB_DEVCTL)); |
176 | } | 201 | } |
177 | 202 | ||
178 | static int musb_platform_resume(struct musb *musb); | 203 | static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode) |
179 | |||
180 | int musb_platform_set_mode(struct musb *musb, u8 musb_mode) | ||
181 | { | 204 | { |
182 | u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | 205 | u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
183 | 206 | ||
@@ -187,10 +210,95 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode) | |||
187 | return 0; | 210 | return 0; |
188 | } | 211 | } |
189 | 212 | ||
190 | int __init musb_platform_init(struct musb *musb, void *board_data) | 213 | static inline void omap2430_low_level_exit(struct musb *musb) |
191 | { | 214 | { |
192 | u32 l; | 215 | u32 l; |
193 | struct omap_musb_board_data *data = board_data; | 216 | |
217 | /* in any role */ | ||
218 | l = musb_readl(musb->mregs, OTG_FORCESTDBY); | ||
219 | l |= ENABLEFORCE; /* enable MSTANDBY */ | ||
220 | musb_writel(musb->mregs, OTG_FORCESTDBY, l); | ||
221 | } | ||
222 | |||
223 | static inline void omap2430_low_level_init(struct musb *musb) | ||
224 | { | ||
225 | u32 l; | ||
226 | |||
227 | l = musb_readl(musb->mregs, OTG_FORCESTDBY); | ||
228 | l &= ~ENABLEFORCE; /* disable MSTANDBY */ | ||
229 | musb_writel(musb->mregs, OTG_FORCESTDBY, l); | ||
230 | } | ||
231 | |||
232 | /* blocking notifier support */ | ||
233 | static int musb_otg_notifications(struct notifier_block *nb, | ||
234 | unsigned long event, void *unused) | ||
235 | { | ||
236 | struct musb *musb = container_of(nb, struct musb, nb); | ||
237 | struct device *dev = musb->controller; | ||
238 | struct musb_hdrc_platform_data *pdata = dev->platform_data; | ||
239 | struct omap_musb_board_data *data = pdata->board_data; | ||
240 | |||
241 | switch (event) { | ||
242 | case USB_EVENT_ID: | ||
243 | dev_dbg(musb->controller, "ID GND\n"); | ||
244 | |||
245 | if (is_otg_enabled(musb)) { | ||
246 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
247 | if (musb->gadget_driver) { | ||
248 | pm_runtime_get_sync(musb->controller); | ||
249 | otg_init(musb->xceiv); | ||
250 | omap2430_musb_set_vbus(musb, 1); | ||
251 | } | ||
252 | #endif | ||
253 | } else { | ||
254 | pm_runtime_get_sync(musb->controller); | ||
255 | otg_init(musb->xceiv); | ||
256 | omap2430_musb_set_vbus(musb, 1); | ||
257 | } | ||
258 | break; | ||
259 | |||
260 | case USB_EVENT_VBUS: | ||
261 | dev_dbg(musb->controller, "VBUS Connect\n"); | ||
262 | |||
263 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
264 | if (musb->gadget_driver) | ||
265 | pm_runtime_get_sync(musb->controller); | ||
266 | #endif | ||
267 | otg_init(musb->xceiv); | ||
268 | break; | ||
269 | |||
270 | case USB_EVENT_NONE: | ||
271 | dev_dbg(musb->controller, "VBUS Disconnect\n"); | ||
272 | |||
273 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
274 | if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) | ||
275 | if (musb->gadget_driver) | ||
276 | #endif | ||
277 | { | ||
278 | pm_runtime_mark_last_busy(musb->controller); | ||
279 | pm_runtime_put_autosuspend(musb->controller); | ||
280 | } | ||
281 | |||
282 | if (data->interface_type == MUSB_INTERFACE_UTMI) { | ||
283 | if (musb->xceiv->set_vbus) | ||
284 | otg_set_vbus(musb->xceiv, 0); | ||
285 | } | ||
286 | otg_shutdown(musb->xceiv); | ||
287 | break; | ||
288 | default: | ||
289 | dev_dbg(musb->controller, "ID float\n"); | ||
290 | return NOTIFY_DONE; | ||
291 | } | ||
292 | |||
293 | return NOTIFY_OK; | ||
294 | } | ||
295 | |||
296 | static int omap2430_musb_init(struct musb *musb) | ||
297 | { | ||
298 | u32 l, status = 0; | ||
299 | struct device *dev = musb->controller; | ||
300 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
301 | struct omap_musb_board_data *data = plat->board_data; | ||
194 | 302 | ||
195 | /* We require some kind of external transceiver, hooked | 303 | /* We require some kind of external transceiver, hooked |
196 | * up through ULPI. TWL4030-family PMICs include one, | 304 | * up through ULPI. TWL4030-family PMICs include one, |
@@ -202,22 +310,11 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
202 | return -ENODEV; | 310 | return -ENODEV; |
203 | } | 311 | } |
204 | 312 | ||
205 | musb_platform_resume(musb); | 313 | status = pm_runtime_get_sync(dev); |
206 | 314 | if (status < 0) { | |
207 | l = musb_readl(musb->mregs, OTG_SYSCONFIG); | 315 | dev_err(dev, "pm_runtime_get_sync FAILED"); |
208 | l &= ~ENABLEWAKEUP; /* disable wakeup */ | 316 | goto err1; |
209 | l &= ~NOSTDBY; /* remove possible nostdby */ | 317 | } |
210 | l |= SMARTSTDBY; /* enable smart standby */ | ||
211 | l &= ~AUTOIDLE; /* disable auto idle */ | ||
212 | l &= ~NOIDLE; /* remove possible noidle */ | ||
213 | l |= SMARTIDLE; /* enable smart idle */ | ||
214 | /* | ||
215 | * MUSB AUTOIDLE don't work in 3430. | ||
216 | * Workaround by Richard Woodruff/TI | ||
217 | */ | ||
218 | if (!cpu_is_omap3430()) | ||
219 | l |= AUTOIDLE; /* enable auto idle */ | ||
220 | musb_writel(musb->mregs, OTG_SYSCONFIG, l); | ||
221 | 318 | ||
222 | l = musb_readl(musb->mregs, OTG_INTERFSEL); | 319 | l = musb_readl(musb->mregs, OTG_INTERFSEL); |
223 | 320 | ||
@@ -239,86 +336,221 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
239 | musb_readl(musb->mregs, OTG_INTERFSEL), | 336 | musb_readl(musb->mregs, OTG_INTERFSEL), |
240 | musb_readl(musb->mregs, OTG_SIMENABLE)); | 337 | musb_readl(musb->mregs, OTG_SIMENABLE)); |
241 | 338 | ||
242 | if (is_host_enabled(musb)) | 339 | musb->nb.notifier_call = musb_otg_notifications; |
243 | musb->board_set_vbus = omap_set_vbus; | 340 | status = otg_register_notifier(musb->xceiv, &musb->nb); |
341 | |||
342 | if (status) | ||
343 | dev_dbg(musb->controller, "notification register failed\n"); | ||
244 | 344 | ||
245 | setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); | 345 | setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); |
246 | 346 | ||
247 | return 0; | 347 | return 0; |
348 | |||
349 | err1: | ||
350 | pm_runtime_disable(dev); | ||
351 | return status; | ||
248 | } | 352 | } |
249 | 353 | ||
250 | #ifdef CONFIG_PM | 354 | static void omap2430_musb_enable(struct musb *musb) |
251 | void musb_platform_save_context(struct musb *musb, | ||
252 | struct musb_context_registers *musb_context) | ||
253 | { | 355 | { |
254 | musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG); | 356 | u8 devctl; |
255 | musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY); | 357 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); |
358 | struct device *dev = musb->controller; | ||
359 | struct musb_hdrc_platform_data *pdata = dev->platform_data; | ||
360 | struct omap_musb_board_data *data = pdata->board_data; | ||
361 | |||
362 | switch (musb->xceiv->last_event) { | ||
363 | |||
364 | case USB_EVENT_ID: | ||
365 | otg_init(musb->xceiv); | ||
366 | if (data->interface_type == MUSB_INTERFACE_UTMI) { | ||
367 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
368 | /* start the session */ | ||
369 | devctl |= MUSB_DEVCTL_SESSION; | ||
370 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
371 | while (musb_readb(musb->mregs, MUSB_DEVCTL) & | ||
372 | MUSB_DEVCTL_BDEVICE) { | ||
373 | cpu_relax(); | ||
374 | |||
375 | if (time_after(jiffies, timeout)) { | ||
376 | dev_err(musb->controller, | ||
377 | "configured as A device timeout"); | ||
378 | break; | ||
379 | } | ||
380 | } | ||
381 | } | ||
382 | break; | ||
383 | |||
384 | case USB_EVENT_VBUS: | ||
385 | otg_init(musb->xceiv); | ||
386 | break; | ||
387 | |||
388 | default: | ||
389 | break; | ||
390 | } | ||
256 | } | 391 | } |
257 | 392 | ||
258 | void musb_platform_restore_context(struct musb *musb, | 393 | static void omap2430_musb_disable(struct musb *musb) |
259 | struct musb_context_registers *musb_context) | ||
260 | { | 394 | { |
261 | musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig); | 395 | if (musb->xceiv->last_event) |
262 | musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby); | 396 | otg_shutdown(musb->xceiv); |
263 | } | 397 | } |
264 | #endif | ||
265 | 398 | ||
266 | static int musb_platform_suspend(struct musb *musb) | 399 | static int omap2430_musb_exit(struct musb *musb) |
267 | { | 400 | { |
268 | u32 l; | 401 | del_timer_sync(&musb_idle_timer); |
269 | 402 | ||
270 | if (!musb->clock) | 403 | omap2430_low_level_exit(musb); |
271 | return 0; | 404 | otg_put_transceiver(musb->xceiv); |
272 | 405 | ||
273 | /* in any role */ | 406 | return 0; |
274 | l = musb_readl(musb->mregs, OTG_FORCESTDBY); | 407 | } |
275 | l |= ENABLEFORCE; /* enable MSTANDBY */ | ||
276 | musb_writel(musb->mregs, OTG_FORCESTDBY, l); | ||
277 | 408 | ||
278 | l = musb_readl(musb->mregs, OTG_SYSCONFIG); | 409 | static const struct musb_platform_ops omap2430_ops = { |
279 | l |= ENABLEWAKEUP; /* enable wakeup */ | 410 | .init = omap2430_musb_init, |
280 | musb_writel(musb->mregs, OTG_SYSCONFIG, l); | 411 | .exit = omap2430_musb_exit, |
281 | 412 | ||
282 | otg_set_suspend(musb->xceiv, 1); | 413 | .set_mode = omap2430_musb_set_mode, |
414 | .try_idle = omap2430_musb_try_idle, | ||
415 | |||
416 | .set_vbus = omap2430_musb_set_vbus, | ||
417 | |||
418 | .enable = omap2430_musb_enable, | ||
419 | .disable = omap2430_musb_disable, | ||
420 | }; | ||
283 | 421 | ||
284 | if (musb->set_clock) | 422 | static u64 omap2430_dmamask = DMA_BIT_MASK(32); |
285 | musb->set_clock(musb->clock, 0); | 423 | |
286 | else | 424 | static int __init omap2430_probe(struct platform_device *pdev) |
287 | clk_disable(musb->clock); | 425 | { |
426 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | ||
427 | struct platform_device *musb; | ||
428 | struct omap2430_glue *glue; | ||
429 | int ret = -ENOMEM; | ||
430 | |||
431 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); | ||
432 | if (!glue) { | ||
433 | dev_err(&pdev->dev, "failed to allocate glue context\n"); | ||
434 | goto err0; | ||
435 | } | ||
436 | |||
437 | musb = platform_device_alloc("musb-hdrc", -1); | ||
438 | if (!musb) { | ||
439 | dev_err(&pdev->dev, "failed to allocate musb device\n"); | ||
440 | goto err1; | ||
441 | } | ||
442 | |||
443 | musb->dev.parent = &pdev->dev; | ||
444 | musb->dev.dma_mask = &omap2430_dmamask; | ||
445 | musb->dev.coherent_dma_mask = omap2430_dmamask; | ||
446 | |||
447 | glue->dev = &pdev->dev; | ||
448 | glue->musb = musb; | ||
449 | |||
450 | pdata->platform_ops = &omap2430_ops; | ||
451 | |||
452 | platform_set_drvdata(pdev, glue); | ||
453 | |||
454 | ret = platform_device_add_resources(musb, pdev->resource, | ||
455 | pdev->num_resources); | ||
456 | if (ret) { | ||
457 | dev_err(&pdev->dev, "failed to add resources\n"); | ||
458 | goto err2; | ||
459 | } | ||
460 | |||
461 | ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); | ||
462 | if (ret) { | ||
463 | dev_err(&pdev->dev, "failed to add platform_data\n"); | ||
464 | goto err2; | ||
465 | } | ||
466 | |||
467 | ret = platform_device_add(musb); | ||
468 | if (ret) { | ||
469 | dev_err(&pdev->dev, "failed to register musb device\n"); | ||
470 | goto err2; | ||
471 | } | ||
472 | |||
473 | pm_runtime_enable(&pdev->dev); | ||
288 | 474 | ||
289 | return 0; | 475 | return 0; |
476 | |||
477 | err2: | ||
478 | platform_device_put(musb); | ||
479 | |||
480 | err1: | ||
481 | kfree(glue); | ||
482 | |||
483 | err0: | ||
484 | return ret; | ||
290 | } | 485 | } |
291 | 486 | ||
292 | static int musb_platform_resume(struct musb *musb) | 487 | static int __exit omap2430_remove(struct platform_device *pdev) |
293 | { | 488 | { |
294 | u32 l; | 489 | struct omap2430_glue *glue = platform_get_drvdata(pdev); |
295 | 490 | ||
296 | if (!musb->clock) | 491 | platform_device_del(glue->musb); |
297 | return 0; | 492 | platform_device_put(glue->musb); |
493 | pm_runtime_put(&pdev->dev); | ||
494 | pm_runtime_disable(&pdev->dev); | ||
495 | kfree(glue); | ||
298 | 496 | ||
299 | otg_set_suspend(musb->xceiv, 0); | 497 | return 0; |
498 | } | ||
300 | 499 | ||
301 | if (musb->set_clock) | 500 | #ifdef CONFIG_PM |
302 | musb->set_clock(musb->clock, 1); | ||
303 | else | ||
304 | clk_enable(musb->clock); | ||
305 | 501 | ||
306 | l = musb_readl(musb->mregs, OTG_SYSCONFIG); | 502 | static int omap2430_runtime_suspend(struct device *dev) |
307 | l &= ~ENABLEWAKEUP; /* disable wakeup */ | 503 | { |
308 | musb_writel(musb->mregs, OTG_SYSCONFIG, l); | 504 | struct omap2430_glue *glue = dev_get_drvdata(dev); |
505 | struct musb *musb = glue_to_musb(glue); | ||
309 | 506 | ||
310 | l = musb_readl(musb->mregs, OTG_FORCESTDBY); | 507 | omap2430_low_level_exit(musb); |
311 | l &= ~ENABLEFORCE; /* disable MSTANDBY */ | 508 | otg_set_suspend(musb->xceiv, 1); |
312 | musb_writel(musb->mregs, OTG_FORCESTDBY, l); | ||
313 | 509 | ||
314 | return 0; | 510 | return 0; |
315 | } | 511 | } |
316 | 512 | ||
317 | 513 | static int omap2430_runtime_resume(struct device *dev) | |
318 | int musb_platform_exit(struct musb *musb) | ||
319 | { | 514 | { |
515 | struct omap2430_glue *glue = dev_get_drvdata(dev); | ||
516 | struct musb *musb = glue_to_musb(glue); | ||
320 | 517 | ||
321 | musb_platform_suspend(musb); | 518 | omap2430_low_level_init(musb); |
519 | otg_set_suspend(musb->xceiv, 0); | ||
322 | 520 | ||
323 | return 0; | 521 | return 0; |
324 | } | 522 | } |
523 | |||
524 | static struct dev_pm_ops omap2430_pm_ops = { | ||
525 | .runtime_suspend = omap2430_runtime_suspend, | ||
526 | .runtime_resume = omap2430_runtime_resume, | ||
527 | }; | ||
528 | |||
529 | #define DEV_PM_OPS (&omap2430_pm_ops) | ||
530 | #else | ||
531 | #define DEV_PM_OPS NULL | ||
532 | #endif | ||
533 | |||
534 | static struct platform_driver omap2430_driver = { | ||
535 | .remove = __exit_p(omap2430_remove), | ||
536 | .driver = { | ||
537 | .name = "musb-omap2430", | ||
538 | .pm = DEV_PM_OPS, | ||
539 | }, | ||
540 | }; | ||
541 | |||
542 | MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); | ||
543 | MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); | ||
544 | MODULE_LICENSE("GPL v2"); | ||
545 | |||
546 | static int __init omap2430_init(void) | ||
547 | { | ||
548 | return platform_driver_probe(&omap2430_driver, omap2430_probe); | ||
549 | } | ||
550 | subsys_initcall(omap2430_init); | ||
551 | |||
552 | static void __exit omap2430_exit(void) | ||
553 | { | ||
554 | platform_driver_unregister(&omap2430_driver); | ||
555 | } | ||
556 | module_exit(omap2430_exit); | ||
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 3c48e77a0aa2..b410357cf016 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c | |||
@@ -21,10 +21,16 @@ | |||
21 | #include <linux/usb.h> | 21 | #include <linux/usb.h> |
22 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/dma-mapping.h> | ||
24 | 25 | ||
25 | #include "musb_core.h" | 26 | #include "musb_core.h" |
26 | 27 | ||
27 | static void tusb_source_power(struct musb *musb, int is_on); | 28 | struct tusb6010_glue { |
29 | struct device *dev; | ||
30 | struct platform_device *musb; | ||
31 | }; | ||
32 | |||
33 | static void tusb_musb_set_vbus(struct musb *musb, int is_on); | ||
28 | 34 | ||
29 | #define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) | 35 | #define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) |
30 | #define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) | 36 | #define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) |
@@ -50,7 +56,7 @@ u8 tusb_get_revision(struct musb *musb) | |||
50 | return rev; | 56 | return rev; |
51 | } | 57 | } |
52 | 58 | ||
53 | static int __init tusb_print_revision(struct musb *musb) | 59 | static int tusb_print_revision(struct musb *musb) |
54 | { | 60 | { |
55 | void __iomem *tbase = musb->ctrl_base; | 61 | void __iomem *tbase = musb->ctrl_base; |
56 | u8 rev; | 62 | u8 rev; |
@@ -100,7 +106,7 @@ static void tusb_wbus_quirk(struct musb *musb, int enabled) | |||
100 | tmp = phy_otg_ena & ~WBUS_QUIRK_MASK; | 106 | tmp = phy_otg_ena & ~WBUS_QUIRK_MASK; |
101 | tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2; | 107 | tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2; |
102 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); | 108 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); |
103 | DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n", | 109 | dev_dbg(musb->controller, "Enabled tusb wbus quirk ctrl %08x ena %08x\n", |
104 | musb_readl(tbase, TUSB_PHY_OTG_CTRL), | 110 | musb_readl(tbase, TUSB_PHY_OTG_CTRL), |
105 | musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); | 111 | musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); |
106 | } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE) | 112 | } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE) |
@@ -109,7 +115,7 @@ static void tusb_wbus_quirk(struct musb *musb, int enabled) | |||
109 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); | 115 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); |
110 | tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena; | 116 | tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena; |
111 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); | 117 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); |
112 | DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n", | 118 | dev_dbg(musb->controller, "Disabled tusb wbus quirk ctrl %08x ena %08x\n", |
113 | musb_readl(tbase, TUSB_PHY_OTG_CTRL), | 119 | musb_readl(tbase, TUSB_PHY_OTG_CTRL), |
114 | musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); | 120 | musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); |
115 | phy_otg_ctrl = 0; | 121 | phy_otg_ctrl = 0; |
@@ -166,13 +172,14 @@ static inline void tusb_fifo_read_unaligned(void __iomem *fifo, | |||
166 | 172 | ||
167 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf) | 173 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf) |
168 | { | 174 | { |
175 | struct musb *musb = hw_ep->musb; | ||
169 | void __iomem *ep_conf = hw_ep->conf; | 176 | void __iomem *ep_conf = hw_ep->conf; |
170 | void __iomem *fifo = hw_ep->fifo; | 177 | void __iomem *fifo = hw_ep->fifo; |
171 | u8 epnum = hw_ep->epnum; | 178 | u8 epnum = hw_ep->epnum; |
172 | 179 | ||
173 | prefetch(buf); | 180 | prefetch(buf); |
174 | 181 | ||
175 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | 182 | dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", |
176 | 'T', epnum, fifo, len, buf); | 183 | 'T', epnum, fifo, len, buf); |
177 | 184 | ||
178 | if (epnum) | 185 | if (epnum) |
@@ -215,11 +222,12 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf) | |||
215 | 222 | ||
216 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf) | 223 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf) |
217 | { | 224 | { |
225 | struct musb *musb = hw_ep->musb; | ||
218 | void __iomem *ep_conf = hw_ep->conf; | 226 | void __iomem *ep_conf = hw_ep->conf; |
219 | void __iomem *fifo = hw_ep->fifo; | 227 | void __iomem *fifo = hw_ep->fifo; |
220 | u8 epnum = hw_ep->epnum; | 228 | u8 epnum = hw_ep->epnum; |
221 | 229 | ||
222 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | 230 | dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", |
223 | 'R', epnum, fifo, len, buf); | 231 | 'R', epnum, fifo, len, buf); |
224 | 232 | ||
225 | if (epnum) | 233 | if (epnum) |
@@ -275,17 +283,6 @@ static int tusb_draw_power(struct otg_transceiver *x, unsigned mA) | |||
275 | void __iomem *tbase = musb->ctrl_base; | 283 | void __iomem *tbase = musb->ctrl_base; |
276 | u32 reg; | 284 | u32 reg; |
277 | 285 | ||
278 | /* | ||
279 | * Keep clock active when enabled. Note that this is not tied to | ||
280 | * drawing VBUS, as with OTG mA can be less than musb->min_power. | ||
281 | */ | ||
282 | if (musb->set_clock) { | ||
283 | if (mA) | ||
284 | musb->set_clock(musb->clock, 1); | ||
285 | else | ||
286 | musb->set_clock(musb->clock, 0); | ||
287 | } | ||
288 | |||
289 | /* tps65030 seems to consume max 100mA, with maybe 60mA available | 286 | /* tps65030 seems to consume max 100mA, with maybe 60mA available |
290 | * (measured on one board) for things other than tps and tusb. | 287 | * (measured on one board) for things other than tps and tusb. |
291 | * | 288 | * |
@@ -309,7 +306,7 @@ static int tusb_draw_power(struct otg_transceiver *x, unsigned mA) | |||
309 | } | 306 | } |
310 | musb_writel(tbase, TUSB_PRCM_MNGMT, reg); | 307 | musb_writel(tbase, TUSB_PRCM_MNGMT, reg); |
311 | 308 | ||
312 | DBG(2, "draw max %d mA VBUS\n", mA); | 309 | dev_dbg(musb->controller, "draw max %d mA VBUS\n", mA); |
313 | return 0; | 310 | return 0; |
314 | } | 311 | } |
315 | 312 | ||
@@ -348,7 +345,7 @@ static void tusb_set_clock_source(struct musb *musb, unsigned mode) | |||
348 | * USB link is not suspended ... and tells us the relevant wakeup | 345 | * USB link is not suspended ... and tells us the relevant wakeup |
349 | * events. SW_EN for voltage is handled separately. | 346 | * events. SW_EN for voltage is handled separately. |
350 | */ | 347 | */ |
351 | void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) | 348 | static void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) |
352 | { | 349 | { |
353 | void __iomem *tbase = musb->ctrl_base; | 350 | void __iomem *tbase = musb->ctrl_base; |
354 | u32 reg; | 351 | u32 reg; |
@@ -379,13 +376,13 @@ void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) | |||
379 | reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE; | 376 | reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE; |
380 | musb_writel(tbase, TUSB_PRCM_MNGMT, reg); | 377 | musb_writel(tbase, TUSB_PRCM_MNGMT, reg); |
381 | 378 | ||
382 | DBG(6, "idle, wake on %02x\n", wakeup_enables); | 379 | dev_dbg(musb->controller, "idle, wake on %02x\n", wakeup_enables); |
383 | } | 380 | } |
384 | 381 | ||
385 | /* | 382 | /* |
386 | * Updates cable VBUS status. Caller must take care of locking. | 383 | * Updates cable VBUS status. Caller must take care of locking. |
387 | */ | 384 | */ |
388 | int musb_platform_get_vbus_status(struct musb *musb) | 385 | static int tusb_musb_vbus_status(struct musb *musb) |
389 | { | 386 | { |
390 | void __iomem *tbase = musb->ctrl_base; | 387 | void __iomem *tbase = musb->ctrl_base; |
391 | u32 otg_stat, prcm_mngmt; | 388 | u32 otg_stat, prcm_mngmt; |
@@ -426,12 +423,12 @@ static void musb_do_idle(unsigned long _musb) | |||
426 | if ((musb->a_wait_bcon != 0) | 423 | if ((musb->a_wait_bcon != 0) |
427 | && (musb->idle_timeout == 0 | 424 | && (musb->idle_timeout == 0 |
428 | || time_after(jiffies, musb->idle_timeout))) { | 425 | || time_after(jiffies, musb->idle_timeout))) { |
429 | DBG(4, "Nothing connected %s, turning off VBUS\n", | 426 | dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n", |
430 | otg_state_string(musb)); | 427 | otg_state_string(musb->xceiv->state)); |
431 | } | 428 | } |
432 | /* FALLTHROUGH */ | 429 | /* FALLTHROUGH */ |
433 | case OTG_STATE_A_IDLE: | 430 | case OTG_STATE_A_IDLE: |
434 | tusb_source_power(musb, 0); | 431 | tusb_musb_set_vbus(musb, 0); |
435 | default: | 432 | default: |
436 | break; | 433 | break; |
437 | } | 434 | } |
@@ -475,7 +472,7 @@ done: | |||
475 | * we don't want to treat that full speed J as a wakeup event. | 472 | * we don't want to treat that full speed J as a wakeup event. |
476 | * ... peripherals must draw only suspend current after 10 msec. | 473 | * ... peripherals must draw only suspend current after 10 msec. |
477 | */ | 474 | */ |
478 | void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | 475 | static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout) |
479 | { | 476 | { |
480 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); | 477 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); |
481 | static unsigned long last_timer; | 478 | static unsigned long last_timer; |
@@ -486,7 +483,8 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | |||
486 | /* Never idle if active, or when VBUS timeout is not set as host */ | 483 | /* Never idle if active, or when VBUS timeout is not set as host */ |
487 | if (musb->is_active || ((musb->a_wait_bcon == 0) | 484 | if (musb->is_active || ((musb->a_wait_bcon == 0) |
488 | && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { | 485 | && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { |
489 | DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); | 486 | dev_dbg(musb->controller, "%s active, deleting timer\n", |
487 | otg_state_string(musb->xceiv->state)); | ||
490 | del_timer(&musb_idle_timer); | 488 | del_timer(&musb_idle_timer); |
491 | last_timer = jiffies; | 489 | last_timer = jiffies; |
492 | return; | 490 | return; |
@@ -496,14 +494,14 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | |||
496 | if (!timer_pending(&musb_idle_timer)) | 494 | if (!timer_pending(&musb_idle_timer)) |
497 | last_timer = timeout; | 495 | last_timer = timeout; |
498 | else { | 496 | else { |
499 | DBG(4, "Longer idle timer already pending, ignoring\n"); | 497 | dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); |
500 | return; | 498 | return; |
501 | } | 499 | } |
502 | } | 500 | } |
503 | last_timer = timeout; | 501 | last_timer = timeout; |
504 | 502 | ||
505 | DBG(4, "%s inactive, for idle timer for %lu ms\n", | 503 | dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", |
506 | otg_state_string(musb), | 504 | otg_state_string(musb->xceiv->state), |
507 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); | 505 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); |
508 | mod_timer(&musb_idle_timer, timeout); | 506 | mod_timer(&musb_idle_timer, timeout); |
509 | } | 507 | } |
@@ -515,7 +513,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | |||
515 | | TUSB_DEV_OTG_TIMER_ENABLE) \ | 513 | | TUSB_DEV_OTG_TIMER_ENABLE) \ |
516 | : 0) | 514 | : 0) |
517 | 515 | ||
518 | static void tusb_source_power(struct musb *musb, int is_on) | 516 | static void tusb_musb_set_vbus(struct musb *musb, int is_on) |
519 | { | 517 | { |
520 | void __iomem *tbase = musb->ctrl_base; | 518 | void __iomem *tbase = musb->ctrl_base; |
521 | u32 conf, prcm, timer; | 519 | u32 conf, prcm, timer; |
@@ -531,8 +529,6 @@ static void tusb_source_power(struct musb *musb, int is_on) | |||
531 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | 529 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
532 | 530 | ||
533 | if (is_on) { | 531 | if (is_on) { |
534 | if (musb->set_clock) | ||
535 | musb->set_clock(musb->clock, 1); | ||
536 | timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); | 532 | timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); |
537 | musb->xceiv->default_a = 1; | 533 | musb->xceiv->default_a = 1; |
538 | musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; | 534 | musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; |
@@ -571,8 +567,6 @@ static void tusb_source_power(struct musb *musb, int is_on) | |||
571 | 567 | ||
572 | devctl &= ~MUSB_DEVCTL_SESSION; | 568 | devctl &= ~MUSB_DEVCTL_SESSION; |
573 | conf &= ~TUSB_DEV_CONF_USB_HOST_MODE; | 569 | conf &= ~TUSB_DEV_CONF_USB_HOST_MODE; |
574 | if (musb->set_clock) | ||
575 | musb->set_clock(musb->clock, 0); | ||
576 | } | 570 | } |
577 | prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); | 571 | prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); |
578 | 572 | ||
@@ -581,8 +575,8 @@ static void tusb_source_power(struct musb *musb, int is_on) | |||
581 | musb_writel(tbase, TUSB_DEV_CONF, conf); | 575 | musb_writel(tbase, TUSB_DEV_CONF, conf); |
582 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | 576 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); |
583 | 577 | ||
584 | DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n", | 578 | dev_dbg(musb->controller, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n", |
585 | otg_state_string(musb), | 579 | otg_state_string(musb->xceiv->state), |
586 | musb_readb(musb->mregs, MUSB_DEVCTL), | 580 | musb_readb(musb->mregs, MUSB_DEVCTL), |
587 | musb_readl(tbase, TUSB_DEV_OTG_STAT), | 581 | musb_readl(tbase, TUSB_DEV_OTG_STAT), |
588 | conf, prcm); | 582 | conf, prcm); |
@@ -599,7 +593,7 @@ static void tusb_source_power(struct musb *musb, int is_on) | |||
599 | * and peripheral modes in non-OTG configurations by reconfiguring hardware | 593 | * and peripheral modes in non-OTG configurations by reconfiguring hardware |
600 | * and then setting musb->board_mode. For now, only support OTG mode. | 594 | * and then setting musb->board_mode. For now, only support OTG mode. |
601 | */ | 595 | */ |
602 | int musb_platform_set_mode(struct musb *musb, u8 musb_mode) | 596 | static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode) |
603 | { | 597 | { |
604 | void __iomem *tbase = musb->ctrl_base; | 598 | void __iomem *tbase = musb->ctrl_base; |
605 | u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; | 599 | u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; |
@@ -642,7 +636,7 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode) | |||
642 | #endif | 636 | #endif |
643 | 637 | ||
644 | default: | 638 | default: |
645 | DBG(2, "Trying to set mode %i\n", musb_mode); | 639 | dev_dbg(musb->controller, "Trying to set mode %i\n", musb_mode); |
646 | return -EINVAL; | 640 | return -EINVAL; |
647 | } | 641 | } |
648 | 642 | ||
@@ -675,9 +669,9 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
675 | default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS); | 669 | default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS); |
676 | else | 670 | else |
677 | default_a = is_host_enabled(musb); | 671 | default_a = is_host_enabled(musb); |
678 | DBG(2, "Default-%c\n", default_a ? 'A' : 'B'); | 672 | dev_dbg(musb->controller, "Default-%c\n", default_a ? 'A' : 'B'); |
679 | musb->xceiv->default_a = default_a; | 673 | musb->xceiv->default_a = default_a; |
680 | tusb_source_power(musb, default_a); | 674 | tusb_musb_set_vbus(musb, default_a); |
681 | 675 | ||
682 | /* Don't allow idling immediately */ | 676 | /* Don't allow idling immediately */ |
683 | if (default_a) | 677 | if (default_a) |
@@ -702,7 +696,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
702 | #endif | 696 | #endif |
703 | 697 | ||
704 | if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { | 698 | if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { |
705 | DBG(1, "Forcing disconnect (no interrupt)\n"); | 699 | dev_dbg(musb->controller, "Forcing disconnect (no interrupt)\n"); |
706 | if (musb->xceiv->state != OTG_STATE_B_IDLE) { | 700 | if (musb->xceiv->state != OTG_STATE_B_IDLE) { |
707 | /* INTR_DISCONNECT can hide... */ | 701 | /* INTR_DISCONNECT can hide... */ |
708 | musb->xceiv->state = OTG_STATE_B_IDLE; | 702 | musb->xceiv->state = OTG_STATE_B_IDLE; |
@@ -710,19 +704,19 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
710 | } | 704 | } |
711 | musb->is_active = 0; | 705 | musb->is_active = 0; |
712 | } | 706 | } |
713 | DBG(2, "vbus change, %s, otg %03x\n", | 707 | dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", |
714 | otg_state_string(musb), otg_stat); | 708 | otg_state_string(musb->xceiv->state), otg_stat); |
715 | idle_timeout = jiffies + (1 * HZ); | 709 | idle_timeout = jiffies + (1 * HZ); |
716 | schedule_work(&musb->irq_work); | 710 | schedule_work(&musb->irq_work); |
717 | 711 | ||
718 | } else /* A-dev state machine */ { | 712 | } else /* A-dev state machine */ { |
719 | DBG(2, "vbus change, %s, otg %03x\n", | 713 | dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", |
720 | otg_state_string(musb), otg_stat); | 714 | otg_state_string(musb->xceiv->state), otg_stat); |
721 | 715 | ||
722 | switch (musb->xceiv->state) { | 716 | switch (musb->xceiv->state) { |
723 | case OTG_STATE_A_IDLE: | 717 | case OTG_STATE_A_IDLE: |
724 | DBG(2, "Got SRP, turning on VBUS\n"); | 718 | dev_dbg(musb->controller, "Got SRP, turning on VBUS\n"); |
725 | musb_set_vbus(musb, 1); | 719 | musb_platform_set_vbus(musb, 1); |
726 | 720 | ||
727 | /* CONNECT can wake if a_wait_bcon is set */ | 721 | /* CONNECT can wake if a_wait_bcon is set */ |
728 | if (musb->a_wait_bcon != 0) | 722 | if (musb->a_wait_bcon != 0) |
@@ -748,11 +742,11 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
748 | */ | 742 | */ |
749 | if (musb->vbuserr_retry) { | 743 | if (musb->vbuserr_retry) { |
750 | musb->vbuserr_retry--; | 744 | musb->vbuserr_retry--; |
751 | tusb_source_power(musb, 1); | 745 | tusb_musb_set_vbus(musb, 1); |
752 | } else { | 746 | } else { |
753 | musb->vbuserr_retry | 747 | musb->vbuserr_retry |
754 | = VBUSERR_RETRY_COUNT; | 748 | = VBUSERR_RETRY_COUNT; |
755 | tusb_source_power(musb, 0); | 749 | tusb_musb_set_vbus(musb, 0); |
756 | } | 750 | } |
757 | break; | 751 | break; |
758 | default: | 752 | default: |
@@ -765,7 +759,8 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
765 | if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) { | 759 | if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) { |
766 | u8 devctl; | 760 | u8 devctl; |
767 | 761 | ||
768 | DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat); | 762 | dev_dbg(musb->controller, "%s timer, %03x\n", |
763 | otg_state_string(musb->xceiv->state), otg_stat); | ||
769 | 764 | ||
770 | switch (musb->xceiv->state) { | 765 | switch (musb->xceiv->state) { |
771 | case OTG_STATE_A_WAIT_VRISE: | 766 | case OTG_STATE_A_WAIT_VRISE: |
@@ -776,7 +771,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
776 | if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) { | 771 | if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) { |
777 | if ((devctl & MUSB_DEVCTL_VBUS) | 772 | if ((devctl & MUSB_DEVCTL_VBUS) |
778 | != MUSB_DEVCTL_VBUS) { | 773 | != MUSB_DEVCTL_VBUS) { |
779 | DBG(2, "devctl %02x\n", devctl); | 774 | dev_dbg(musb->controller, "devctl %02x\n", devctl); |
780 | break; | 775 | break; |
781 | } | 776 | } |
782 | musb->xceiv->state = OTG_STATE_A_WAIT_BCON; | 777 | musb->xceiv->state = OTG_STATE_A_WAIT_BCON; |
@@ -786,7 +781,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
786 | } else { | 781 | } else { |
787 | /* REVISIT report overcurrent to hub? */ | 782 | /* REVISIT report overcurrent to hub? */ |
788 | ERR("vbus too slow, devctl %02x\n", devctl); | 783 | ERR("vbus too slow, devctl %02x\n", devctl); |
789 | tusb_source_power(musb, 0); | 784 | tusb_musb_set_vbus(musb, 0); |
790 | } | 785 | } |
791 | break; | 786 | break; |
792 | case OTG_STATE_A_WAIT_BCON: | 787 | case OTG_STATE_A_WAIT_BCON: |
@@ -807,7 +802,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
807 | return idle_timeout; | 802 | return idle_timeout; |
808 | } | 803 | } |
809 | 804 | ||
810 | static irqreturn_t tusb_interrupt(int irq, void *__hci) | 805 | static irqreturn_t tusb_musb_interrupt(int irq, void *__hci) |
811 | { | 806 | { |
812 | struct musb *musb = __hci; | 807 | struct musb *musb = __hci; |
813 | void __iomem *tbase = musb->ctrl_base; | 808 | void __iomem *tbase = musb->ctrl_base; |
@@ -821,7 +816,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci) | |||
821 | musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); | 816 | musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); |
822 | 817 | ||
823 | int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS; | 818 | int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS; |
824 | DBG(3, "TUSB IRQ %08x\n", int_src); | 819 | dev_dbg(musb->controller, "TUSB IRQ %08x\n", int_src); |
825 | 820 | ||
826 | musb->int_usb = (u8) int_src; | 821 | musb->int_usb = (u8) int_src; |
827 | 822 | ||
@@ -842,7 +837,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci) | |||
842 | reg = musb_readl(tbase, TUSB_SCRATCH_PAD); | 837 | reg = musb_readl(tbase, TUSB_SCRATCH_PAD); |
843 | if (reg == i) | 838 | if (reg == i) |
844 | break; | 839 | break; |
845 | DBG(6, "TUSB NOR not ready\n"); | 840 | dev_dbg(musb->controller, "TUSB NOR not ready\n"); |
846 | } | 841 | } |
847 | 842 | ||
848 | /* work around issue 13 (2nd half) */ | 843 | /* work around issue 13 (2nd half) */ |
@@ -854,7 +849,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci) | |||
854 | musb->is_active = 1; | 849 | musb->is_active = 1; |
855 | schedule_work(&musb->irq_work); | 850 | schedule_work(&musb->irq_work); |
856 | } | 851 | } |
857 | DBG(3, "wake %sactive %02x\n", | 852 | dev_dbg(musb->controller, "wake %sactive %02x\n", |
858 | musb->is_active ? "" : "in", reg); | 853 | musb->is_active ? "" : "in", reg); |
859 | 854 | ||
860 | /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */ | 855 | /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */ |
@@ -876,7 +871,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci) | |||
876 | u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC); | 871 | u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC); |
877 | u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK); | 872 | u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK); |
878 | 873 | ||
879 | DBG(3, "DMA IRQ %08x\n", dma_src); | 874 | dev_dbg(musb->controller, "DMA IRQ %08x\n", dma_src); |
880 | real_dma_src = ~real_dma_src & dma_src; | 875 | real_dma_src = ~real_dma_src & dma_src; |
881 | if (tusb_dma_omap() && real_dma_src) { | 876 | if (tusb_dma_omap() && real_dma_src) { |
882 | int tx_source = (real_dma_src & 0xffff); | 877 | int tx_source = (real_dma_src & 0xffff); |
@@ -884,7 +879,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci) | |||
884 | 879 | ||
885 | for (i = 1; i <= 15; i++) { | 880 | for (i = 1; i <= 15; i++) { |
886 | if (tx_source & (1 << i)) { | 881 | if (tx_source & (1 << i)) { |
887 | DBG(3, "completing ep%i %s\n", i, "tx"); | 882 | dev_dbg(musb->controller, "completing ep%i %s\n", i, "tx"); |
888 | musb_dma_completion(musb, i, 1); | 883 | musb_dma_completion(musb, i, 1); |
889 | } | 884 | } |
890 | } | 885 | } |
@@ -911,7 +906,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci) | |||
911 | musb_writel(tbase, TUSB_INT_SRC_CLEAR, | 906 | musb_writel(tbase, TUSB_INT_SRC_CLEAR, |
912 | int_src & ~TUSB_INT_MASK_RESERVED_BITS); | 907 | int_src & ~TUSB_INT_MASK_RESERVED_BITS); |
913 | 908 | ||
914 | musb_platform_try_idle(musb, idle_timeout); | 909 | tusb_musb_try_idle(musb, idle_timeout); |
915 | 910 | ||
916 | musb_writel(tbase, TUSB_INT_MASK, int_mask); | 911 | musb_writel(tbase, TUSB_INT_MASK, int_mask); |
917 | spin_unlock_irqrestore(&musb->lock, flags); | 912 | spin_unlock_irqrestore(&musb->lock, flags); |
@@ -926,7 +921,7 @@ static int dma_off; | |||
926 | * REVISIT: | 921 | * REVISIT: |
927 | * - Check what is unnecessary in MGC_HdrcStart() | 922 | * - Check what is unnecessary in MGC_HdrcStart() |
928 | */ | 923 | */ |
929 | void musb_platform_enable(struct musb *musb) | 924 | static void tusb_musb_enable(struct musb *musb) |
930 | { | 925 | { |
931 | void __iomem *tbase = musb->ctrl_base; | 926 | void __iomem *tbase = musb->ctrl_base; |
932 | 927 | ||
@@ -952,7 +947,7 @@ void musb_platform_enable(struct musb *musb) | |||
952 | musb_writel(tbase, TUSB_INT_CTRL_CONF, | 947 | musb_writel(tbase, TUSB_INT_CTRL_CONF, |
953 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); | 948 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); |
954 | 949 | ||
955 | set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); | 950 | irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); |
956 | 951 | ||
957 | /* maybe force into the Default-A OTG state machine */ | 952 | /* maybe force into the Default-A OTG state machine */ |
958 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) | 953 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) |
@@ -970,7 +965,7 @@ void musb_platform_enable(struct musb *musb) | |||
970 | /* | 965 | /* |
971 | * Disables TUSB6010. Caller must take care of locking. | 966 | * Disables TUSB6010. Caller must take care of locking. |
972 | */ | 967 | */ |
973 | void musb_platform_disable(struct musb *musb) | 968 | static void tusb_musb_disable(struct musb *musb) |
974 | { | 969 | { |
975 | void __iomem *tbase = musb->ctrl_base; | 970 | void __iomem *tbase = musb->ctrl_base; |
976 | 971 | ||
@@ -995,7 +990,7 @@ void musb_platform_disable(struct musb *musb) | |||
995 | * Sets up TUSB6010 CPU interface specific signals and registers | 990 | * Sets up TUSB6010 CPU interface specific signals and registers |
996 | * Note: Settings optimized for OMAP24xx | 991 | * Note: Settings optimized for OMAP24xx |
997 | */ | 992 | */ |
998 | static void __init tusb_setup_cpu_interface(struct musb *musb) | 993 | static void tusb_setup_cpu_interface(struct musb *musb) |
999 | { | 994 | { |
1000 | void __iomem *tbase = musb->ctrl_base; | 995 | void __iomem *tbase = musb->ctrl_base; |
1001 | 996 | ||
@@ -1022,7 +1017,7 @@ static void __init tusb_setup_cpu_interface(struct musb *musb) | |||
1022 | musb_writel(tbase, TUSB_WAIT_COUNT, 1); | 1017 | musb_writel(tbase, TUSB_WAIT_COUNT, 1); |
1023 | } | 1018 | } |
1024 | 1019 | ||
1025 | static int __init tusb_start(struct musb *musb) | 1020 | static int tusb_musb_start(struct musb *musb) |
1026 | { | 1021 | { |
1027 | void __iomem *tbase = musb->ctrl_base; | 1022 | void __iomem *tbase = musb->ctrl_base; |
1028 | int ret = 0; | 1023 | int ret = 0; |
@@ -1091,7 +1086,7 @@ err: | |||
1091 | return -ENODEV; | 1086 | return -ENODEV; |
1092 | } | 1087 | } |
1093 | 1088 | ||
1094 | int __init musb_platform_init(struct musb *musb, void *board_data) | 1089 | static int tusb_musb_init(struct musb *musb) |
1095 | { | 1090 | { |
1096 | struct platform_device *pdev; | 1091 | struct platform_device *pdev; |
1097 | struct resource *mem; | 1092 | struct resource *mem; |
@@ -1131,16 +1126,14 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
1131 | */ | 1126 | */ |
1132 | musb->mregs += TUSB_BASE_OFFSET; | 1127 | musb->mregs += TUSB_BASE_OFFSET; |
1133 | 1128 | ||
1134 | ret = tusb_start(musb); | 1129 | ret = tusb_musb_start(musb); |
1135 | if (ret) { | 1130 | if (ret) { |
1136 | printk(KERN_ERR "Could not start tusb6010 (%d)\n", | 1131 | printk(KERN_ERR "Could not start tusb6010 (%d)\n", |
1137 | ret); | 1132 | ret); |
1138 | goto done; | 1133 | goto done; |
1139 | } | 1134 | } |
1140 | musb->isr = tusb_interrupt; | 1135 | musb->isr = tusb_musb_interrupt; |
1141 | 1136 | ||
1142 | if (is_host_enabled(musb)) | ||
1143 | musb->board_set_vbus = tusb_source_power; | ||
1144 | if (is_peripheral_enabled(musb)) { | 1137 | if (is_peripheral_enabled(musb)) { |
1145 | musb->xceiv->set_power = tusb_draw_power; | 1138 | musb->xceiv->set_power = tusb_draw_power; |
1146 | the_musb = musb; | 1139 | the_musb = musb; |
@@ -1152,12 +1145,14 @@ done: | |||
1152 | if (ret < 0) { | 1145 | if (ret < 0) { |
1153 | if (sync) | 1146 | if (sync) |
1154 | iounmap(sync); | 1147 | iounmap(sync); |
1148 | |||
1149 | otg_put_transceiver(musb->xceiv); | ||
1155 | usb_nop_xceiv_unregister(); | 1150 | usb_nop_xceiv_unregister(); |
1156 | } | 1151 | } |
1157 | return ret; | 1152 | return ret; |
1158 | } | 1153 | } |
1159 | 1154 | ||
1160 | int musb_platform_exit(struct musb *musb) | 1155 | static int tusb_musb_exit(struct musb *musb) |
1161 | { | 1156 | { |
1162 | del_timer_sync(&musb_idle_timer); | 1157 | del_timer_sync(&musb_idle_timer); |
1163 | the_musb = NULL; | 1158 | the_musb = NULL; |
@@ -1166,6 +1161,120 @@ int musb_platform_exit(struct musb *musb) | |||
1166 | musb->board_set_power(0); | 1161 | musb->board_set_power(0); |
1167 | 1162 | ||
1168 | iounmap(musb->sync_va); | 1163 | iounmap(musb->sync_va); |
1164 | |||
1165 | otg_put_transceiver(musb->xceiv); | ||
1169 | usb_nop_xceiv_unregister(); | 1166 | usb_nop_xceiv_unregister(); |
1170 | return 0; | 1167 | return 0; |
1171 | } | 1168 | } |
1169 | |||
1170 | static const struct musb_platform_ops tusb_ops = { | ||
1171 | .init = tusb_musb_init, | ||
1172 | .exit = tusb_musb_exit, | ||
1173 | |||
1174 | .enable = tusb_musb_enable, | ||
1175 | .disable = tusb_musb_disable, | ||
1176 | |||
1177 | .set_mode = tusb_musb_set_mode, | ||
1178 | .try_idle = tusb_musb_try_idle, | ||
1179 | |||
1180 | .vbus_status = tusb_musb_vbus_status, | ||
1181 | .set_vbus = tusb_musb_set_vbus, | ||
1182 | }; | ||
1183 | |||
1184 | static u64 tusb_dmamask = DMA_BIT_MASK(32); | ||
1185 | |||
1186 | static int __init tusb_probe(struct platform_device *pdev) | ||
1187 | { | ||
1188 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | ||
1189 | struct platform_device *musb; | ||
1190 | struct tusb6010_glue *glue; | ||
1191 | |||
1192 | int ret = -ENOMEM; | ||
1193 | |||
1194 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); | ||
1195 | if (!glue) { | ||
1196 | dev_err(&pdev->dev, "failed to allocate glue context\n"); | ||
1197 | goto err0; | ||
1198 | } | ||
1199 | |||
1200 | musb = platform_device_alloc("musb-hdrc", -1); | ||
1201 | if (!musb) { | ||
1202 | dev_err(&pdev->dev, "failed to allocate musb device\n"); | ||
1203 | goto err1; | ||
1204 | } | ||
1205 | |||
1206 | musb->dev.parent = &pdev->dev; | ||
1207 | musb->dev.dma_mask = &tusb_dmamask; | ||
1208 | musb->dev.coherent_dma_mask = tusb_dmamask; | ||
1209 | |||
1210 | glue->dev = &pdev->dev; | ||
1211 | glue->musb = musb; | ||
1212 | |||
1213 | pdata->platform_ops = &tusb_ops; | ||
1214 | |||
1215 | platform_set_drvdata(pdev, glue); | ||
1216 | |||
1217 | ret = platform_device_add_resources(musb, pdev->resource, | ||
1218 | pdev->num_resources); | ||
1219 | if (ret) { | ||
1220 | dev_err(&pdev->dev, "failed to add resources\n"); | ||
1221 | goto err2; | ||
1222 | } | ||
1223 | |||
1224 | ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); | ||
1225 | if (ret) { | ||
1226 | dev_err(&pdev->dev, "failed to add platform_data\n"); | ||
1227 | goto err2; | ||
1228 | } | ||
1229 | |||
1230 | ret = platform_device_add(musb); | ||
1231 | if (ret) { | ||
1232 | dev_err(&pdev->dev, "failed to register musb device\n"); | ||
1233 | goto err1; | ||
1234 | } | ||
1235 | |||
1236 | return 0; | ||
1237 | |||
1238 | err2: | ||
1239 | platform_device_put(musb); | ||
1240 | |||
1241 | err1: | ||
1242 | kfree(glue); | ||
1243 | |||
1244 | err0: | ||
1245 | return ret; | ||
1246 | } | ||
1247 | |||
1248 | static int __exit tusb_remove(struct platform_device *pdev) | ||
1249 | { | ||
1250 | struct tusb6010_glue *glue = platform_get_drvdata(pdev); | ||
1251 | |||
1252 | platform_device_del(glue->musb); | ||
1253 | platform_device_put(glue->musb); | ||
1254 | kfree(glue); | ||
1255 | |||
1256 | return 0; | ||
1257 | } | ||
1258 | |||
1259 | static struct platform_driver tusb_driver = { | ||
1260 | .remove = __exit_p(tusb_remove), | ||
1261 | .driver = { | ||
1262 | .name = "musb-tusb", | ||
1263 | }, | ||
1264 | }; | ||
1265 | |||
1266 | MODULE_DESCRIPTION("TUSB6010 MUSB Glue Layer"); | ||
1267 | MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); | ||
1268 | MODULE_LICENSE("GPL v2"); | ||
1269 | |||
1270 | static int __init tusb_init(void) | ||
1271 | { | ||
1272 | return platform_driver_probe(&tusb_driver, tusb_probe); | ||
1273 | } | ||
1274 | subsys_initcall(tusb_init); | ||
1275 | |||
1276 | static void __exit tusb_exit(void) | ||
1277 | { | ||
1278 | platform_driver_unregister(&tusb_driver); | ||
1279 | } | ||
1280 | module_exit(tusb_exit); | ||
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c index c061a88f2b0f..c784e6c03aac 100644 --- a/drivers/usb/musb/tusb6010_omap.c +++ b/drivers/usb/musb/tusb6010_omap.c | |||
@@ -65,7 +65,7 @@ static int tusb_omap_dma_start(struct dma_controller *c) | |||
65 | 65 | ||
66 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | 66 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); |
67 | 67 | ||
68 | /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ | 68 | /* dev_dbg(musb->controller, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ |
69 | 69 | ||
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
@@ -76,7 +76,7 @@ static int tusb_omap_dma_stop(struct dma_controller *c) | |||
76 | 76 | ||
77 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | 77 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); |
78 | 78 | ||
79 | /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ | 79 | /* dev_dbg(musb->controller, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ |
80 | 80 | ||
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
@@ -89,7 +89,7 @@ static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) | |||
89 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | 89 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); |
90 | 90 | ||
91 | if (reg != 0) { | 91 | if (reg != 0) { |
92 | DBG(3, "ep%i dmareq0 is busy for ep%i\n", | 92 | dev_dbg(musb->controller, "ep%i dmareq0 is busy for ep%i\n", |
93 | chdat->epnum, reg & 0xf); | 93 | chdat->epnum, reg & 0xf); |
94 | return -EAGAIN; | 94 | return -EAGAIN; |
95 | } | 95 | } |
@@ -143,7 +143,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) | |||
143 | if (ch_status != OMAP_DMA_BLOCK_IRQ) | 143 | if (ch_status != OMAP_DMA_BLOCK_IRQ) |
144 | printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status); | 144 | printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status); |
145 | 145 | ||
146 | DBG(3, "ep%i %s dma callback ch: %i status: %x\n", | 146 | dev_dbg(musb->controller, "ep%i %s dma callback ch: %i status: %x\n", |
147 | chdat->epnum, chdat->tx ? "tx" : "rx", | 147 | chdat->epnum, chdat->tx ? "tx" : "rx", |
148 | ch, ch_status); | 148 | ch, ch_status); |
149 | 149 | ||
@@ -156,7 +156,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) | |||
156 | 156 | ||
157 | /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */ | 157 | /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */ |
158 | if (unlikely(remaining > chdat->transfer_len)) { | 158 | if (unlikely(remaining > chdat->transfer_len)) { |
159 | DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n", | 159 | dev_dbg(musb->controller, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n", |
160 | chdat->tx ? "tx" : "rx", chdat->ch, | 160 | chdat->tx ? "tx" : "rx", chdat->ch, |
161 | remaining); | 161 | remaining); |
162 | remaining = 0; | 162 | remaining = 0; |
@@ -165,13 +165,13 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) | |||
165 | channel->actual_len = chdat->transfer_len - remaining; | 165 | channel->actual_len = chdat->transfer_len - remaining; |
166 | pio = chdat->len - channel->actual_len; | 166 | pio = chdat->len - channel->actual_len; |
167 | 167 | ||
168 | DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len); | 168 | dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len); |
169 | 169 | ||
170 | /* Transfer remaining 1 - 31 bytes */ | 170 | /* Transfer remaining 1 - 31 bytes */ |
171 | if (pio > 0 && pio < 32) { | 171 | if (pio > 0 && pio < 32) { |
172 | u8 *buf; | 172 | u8 *buf; |
173 | 173 | ||
174 | DBG(3, "Using PIO for remaining %lu bytes\n", pio); | 174 | dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio); |
175 | buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len; | 175 | buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len; |
176 | if (chdat->tx) { | 176 | if (chdat->tx) { |
177 | dma_unmap_single(dev, chdat->dma_addr, | 177 | dma_unmap_single(dev, chdat->dma_addr, |
@@ -209,7 +209,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) | |||
209 | u16 csr; | 209 | u16 csr; |
210 | 210 | ||
211 | if (chdat->tx) { | 211 | if (chdat->tx) { |
212 | DBG(3, "terminating short tx packet\n"); | 212 | dev_dbg(musb->controller, "terminating short tx packet\n"); |
213 | musb_ep_select(mbase, chdat->epnum); | 213 | musb_ep_select(mbase, chdat->epnum); |
214 | csr = musb_readw(hw_ep->regs, MUSB_TXCSR); | 214 | csr = musb_readw(hw_ep->regs, MUSB_TXCSR); |
215 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY | 215 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY |
@@ -264,7 +264,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, | |||
264 | 264 | ||
265 | dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); | 265 | dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); |
266 | if (dma_remaining) { | 266 | if (dma_remaining) { |
267 | DBG(2, "Busy %s dma ch%i, not using: %08x\n", | 267 | dev_dbg(musb->controller, "Busy %s dma ch%i, not using: %08x\n", |
268 | chdat->tx ? "tx" : "rx", chdat->ch, | 268 | chdat->tx ? "tx" : "rx", chdat->ch, |
269 | dma_remaining); | 269 | dma_remaining); |
270 | return false; | 270 | return false; |
@@ -283,7 +283,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, | |||
283 | sync_dev = chdat->sync_dev; | 283 | sync_dev = chdat->sync_dev; |
284 | } else { | 284 | } else { |
285 | if (tusb_omap_use_shared_dmareq(chdat) != 0) { | 285 | if (tusb_omap_use_shared_dmareq(chdat) != 0) { |
286 | DBG(3, "could not get dma for ep%i\n", chdat->epnum); | 286 | dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum); |
287 | return false; | 287 | return false; |
288 | } | 288 | } |
289 | if (tusb_dma->ch < 0) { | 289 | if (tusb_dma->ch < 0) { |
@@ -326,7 +326,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, | |||
326 | 326 | ||
327 | dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ | 327 | dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ |
328 | 328 | ||
329 | DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", | 329 | dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", |
330 | chdat->epnum, chdat->tx ? "tx" : "rx", | 330 | chdat->epnum, chdat->tx ? "tx" : "rx", |
331 | ch, dma_addr, chdat->transfer_len, len, | 331 | ch, dma_addr, chdat->transfer_len, len, |
332 | chdat->transfer_packet_sz, packet_sz); | 332 | chdat->transfer_packet_sz, packet_sz); |
@@ -370,7 +370,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, | |||
370 | dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */ | 370 | dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */ |
371 | } | 371 | } |
372 | 372 | ||
373 | DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", | 373 | dev_dbg(musb->controller, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", |
374 | chdat->epnum, chdat->tx ? "tx" : "rx", | 374 | chdat->epnum, chdat->tx ? "tx" : "rx", |
375 | (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16, | 375 | (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16, |
376 | ((dma_addr & 0x3) == 0) ? "sync" : "async", | 376 | ((dma_addr & 0x3) == 0) ? "sync" : "async", |
@@ -525,7 +525,7 @@ tusb_omap_dma_allocate(struct dma_controller *c, | |||
525 | 525 | ||
526 | /* REVISIT: Why does dmareq5 not work? */ | 526 | /* REVISIT: Why does dmareq5 not work? */ |
527 | if (hw_ep->epnum == 0) { | 527 | if (hw_ep->epnum == 0) { |
528 | DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx"); | 528 | dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx"); |
529 | return NULL; | 529 | return NULL; |
530 | } | 530 | } |
531 | 531 | ||
@@ -585,7 +585,7 @@ tusb_omap_dma_allocate(struct dma_controller *c, | |||
585 | chdat->ch = -1; | 585 | chdat->ch = -1; |
586 | } | 586 | } |
587 | 587 | ||
588 | DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n", | 588 | dev_dbg(musb->controller, "ep%i %s dma: %s dma%i dmareq%i sync%i\n", |
589 | chdat->epnum, | 589 | chdat->epnum, |
590 | chdat->tx ? "tx" : "rx", | 590 | chdat->tx ? "tx" : "rx", |
591 | chdat->ch >= 0 ? "dedicated" : "shared", | 591 | chdat->ch >= 0 ? "dedicated" : "shared", |
@@ -598,7 +598,7 @@ tusb_omap_dma_allocate(struct dma_controller *c, | |||
598 | free_dmareq: | 598 | free_dmareq: |
599 | tusb_omap_dma_free_dmareq(chdat); | 599 | tusb_omap_dma_free_dmareq(chdat); |
600 | 600 | ||
601 | DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum); | 601 | dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum); |
602 | channel->status = MUSB_DMA_STATUS_UNKNOWN; | 602 | channel->status = MUSB_DMA_STATUS_UNKNOWN; |
603 | 603 | ||
604 | return NULL; | 604 | return NULL; |
@@ -611,7 +611,7 @@ static void tusb_omap_dma_release(struct dma_channel *channel) | |||
611 | void __iomem *tbase = musb->ctrl_base; | 611 | void __iomem *tbase = musb->ctrl_base; |
612 | u32 reg; | 612 | u32 reg; |
613 | 613 | ||
614 | DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch); | 614 | dev_dbg(musb->controller, "ep%i ch%i\n", chdat->epnum, chdat->ch); |
615 | 615 | ||
616 | reg = musb_readl(tbase, TUSB_DMA_INT_MASK); | 616 | reg = musb_readl(tbase, TUSB_DMA_INT_MASK); |
617 | if (chdat->tx) | 617 | if (chdat->tx) |
@@ -680,7 +680,7 @@ dma_controller_create(struct musb *musb, void __iomem *base) | |||
680 | 680 | ||
681 | tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL); | 681 | tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL); |
682 | if (!tusb_dma) | 682 | if (!tusb_dma) |
683 | goto cleanup; | 683 | goto out; |
684 | 684 | ||
685 | tusb_dma->musb = musb; | 685 | tusb_dma->musb = musb; |
686 | tusb_dma->tbase = musb->ctrl_base; | 686 | tusb_dma->tbase = musb->ctrl_base; |
@@ -721,6 +721,6 @@ dma_controller_create(struct musb *musb, void __iomem *base) | |||
721 | 721 | ||
722 | cleanup: | 722 | cleanup: |
723 | dma_controller_destroy(&tusb_dma->controller); | 723 | dma_controller_destroy(&tusb_dma->controller); |
724 | 724 | out: | |
725 | return NULL; | 725 | return NULL; |
726 | } | 726 | } |
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c new file mode 100644 index 000000000000..f7e04bf34a13 --- /dev/null +++ b/drivers/usb/musb/ux500.c | |||
@@ -0,0 +1,218 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 ST-Ericsson AB | ||
3 | * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> | ||
4 | * | ||
5 | * Based on omap2430.c | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/clk.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | |||
29 | #include "musb_core.h" | ||
30 | |||
31 | struct ux500_glue { | ||
32 | struct device *dev; | ||
33 | struct platform_device *musb; | ||
34 | struct clk *clk; | ||
35 | }; | ||
36 | #define glue_to_musb(g) platform_get_drvdata(g->musb) | ||
37 | |||
38 | static int ux500_musb_init(struct musb *musb) | ||
39 | { | ||
40 | musb->xceiv = otg_get_transceiver(); | ||
41 | if (!musb->xceiv) { | ||
42 | pr_err("HS USB OTG: no transceiver configured\n"); | ||
43 | return -ENODEV; | ||
44 | } | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static int ux500_musb_exit(struct musb *musb) | ||
50 | { | ||
51 | otg_put_transceiver(musb->xceiv); | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static const struct musb_platform_ops ux500_ops = { | ||
57 | .init = ux500_musb_init, | ||
58 | .exit = ux500_musb_exit, | ||
59 | }; | ||
60 | |||
61 | static int __init ux500_probe(struct platform_device *pdev) | ||
62 | { | ||
63 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | ||
64 | struct platform_device *musb; | ||
65 | struct ux500_glue *glue; | ||
66 | struct clk *clk; | ||
67 | |||
68 | int ret = -ENOMEM; | ||
69 | |||
70 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); | ||
71 | if (!glue) { | ||
72 | dev_err(&pdev->dev, "failed to allocate glue context\n"); | ||
73 | goto err0; | ||
74 | } | ||
75 | |||
76 | musb = platform_device_alloc("musb-hdrc", -1); | ||
77 | if (!musb) { | ||
78 | dev_err(&pdev->dev, "failed to allocate musb device\n"); | ||
79 | goto err1; | ||
80 | } | ||
81 | |||
82 | clk = clk_get(&pdev->dev, "usb"); | ||
83 | if (IS_ERR(clk)) { | ||
84 | dev_err(&pdev->dev, "failed to get clock\n"); | ||
85 | ret = PTR_ERR(clk); | ||
86 | goto err2; | ||
87 | } | ||
88 | |||
89 | ret = clk_enable(clk); | ||
90 | if (ret) { | ||
91 | dev_err(&pdev->dev, "failed to enable clock\n"); | ||
92 | goto err3; | ||
93 | } | ||
94 | |||
95 | musb->dev.parent = &pdev->dev; | ||
96 | musb->dev.dma_mask = pdev->dev.dma_mask; | ||
97 | musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; | ||
98 | |||
99 | glue->dev = &pdev->dev; | ||
100 | glue->musb = musb; | ||
101 | glue->clk = clk; | ||
102 | |||
103 | pdata->platform_ops = &ux500_ops; | ||
104 | |||
105 | platform_set_drvdata(pdev, glue); | ||
106 | |||
107 | ret = platform_device_add_resources(musb, pdev->resource, | ||
108 | pdev->num_resources); | ||
109 | if (ret) { | ||
110 | dev_err(&pdev->dev, "failed to add resources\n"); | ||
111 | goto err4; | ||
112 | } | ||
113 | |||
114 | ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); | ||
115 | if (ret) { | ||
116 | dev_err(&pdev->dev, "failed to add platform_data\n"); | ||
117 | goto err4; | ||
118 | } | ||
119 | |||
120 | ret = platform_device_add(musb); | ||
121 | if (ret) { | ||
122 | dev_err(&pdev->dev, "failed to register musb device\n"); | ||
123 | goto err4; | ||
124 | } | ||
125 | |||
126 | return 0; | ||
127 | |||
128 | err4: | ||
129 | clk_disable(clk); | ||
130 | |||
131 | err3: | ||
132 | clk_put(clk); | ||
133 | |||
134 | err2: | ||
135 | platform_device_put(musb); | ||
136 | |||
137 | err1: | ||
138 | kfree(glue); | ||
139 | |||
140 | err0: | ||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | static int __exit ux500_remove(struct platform_device *pdev) | ||
145 | { | ||
146 | struct ux500_glue *glue = platform_get_drvdata(pdev); | ||
147 | |||
148 | platform_device_del(glue->musb); | ||
149 | platform_device_put(glue->musb); | ||
150 | clk_disable(glue->clk); | ||
151 | clk_put(glue->clk); | ||
152 | kfree(glue); | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | #ifdef CONFIG_PM | ||
158 | static int ux500_suspend(struct device *dev) | ||
159 | { | ||
160 | struct ux500_glue *glue = dev_get_drvdata(dev); | ||
161 | struct musb *musb = glue_to_musb(glue); | ||
162 | |||
163 | otg_set_suspend(musb->xceiv, 1); | ||
164 | clk_disable(glue->clk); | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static int ux500_resume(struct device *dev) | ||
170 | { | ||
171 | struct ux500_glue *glue = dev_get_drvdata(dev); | ||
172 | struct musb *musb = glue_to_musb(glue); | ||
173 | int ret; | ||
174 | |||
175 | ret = clk_enable(glue->clk); | ||
176 | if (ret) { | ||
177 | dev_err(dev, "failed to enable clock\n"); | ||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | otg_set_suspend(musb->xceiv, 0); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static const struct dev_pm_ops ux500_pm_ops = { | ||
187 | .suspend = ux500_suspend, | ||
188 | .resume = ux500_resume, | ||
189 | }; | ||
190 | |||
191 | #define DEV_PM_OPS (&ux500_pm_ops) | ||
192 | #else | ||
193 | #define DEV_PM_OPS NULL | ||
194 | #endif | ||
195 | |||
196 | static struct platform_driver ux500_driver = { | ||
197 | .remove = __exit_p(ux500_remove), | ||
198 | .driver = { | ||
199 | .name = "musb-ux500", | ||
200 | .pm = DEV_PM_OPS, | ||
201 | }, | ||
202 | }; | ||
203 | |||
204 | MODULE_DESCRIPTION("UX500 MUSB Glue Layer"); | ||
205 | MODULE_AUTHOR("Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>"); | ||
206 | MODULE_LICENSE("GPL v2"); | ||
207 | |||
208 | static int __init ux500_init(void) | ||
209 | { | ||
210 | return platform_driver_probe(&ux500_driver, ux500_probe); | ||
211 | } | ||
212 | subsys_initcall(ux500_init); | ||
213 | |||
214 | static void __exit ux500_exit(void) | ||
215 | { | ||
216 | platform_driver_unregister(&ux500_driver); | ||
217 | } | ||
218 | module_exit(ux500_exit); | ||
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c new file mode 100644 index 000000000000..cecace411832 --- /dev/null +++ b/drivers/usb/musb/ux500_dma.c | |||
@@ -0,0 +1,422 @@ | |||
1 | /* | ||
2 | * drivers/usb/musb/ux500_dma.c | ||
3 | * | ||
4 | * U8500 and U5500 DMA support code | ||
5 | * | ||
6 | * Copyright (C) 2009 STMicroelectronics | ||
7 | * Copyright (C) 2011 ST-Ericsson SA | ||
8 | * Authors: | ||
9 | * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> | ||
10 | * Praveena Nadahally <praveen.nadahally@stericsson.com> | ||
11 | * Rajaram Regupathy <ragupathy.rajaram@stericsson.com> | ||
12 | * | ||
13 | * This program is free software: you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation, either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
25 | */ | ||
26 | |||
27 | #include <linux/device.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/dmaengine.h> | ||
32 | #include <linux/pfn.h> | ||
33 | #include <mach/usb.h> | ||
34 | #include "musb_core.h" | ||
35 | |||
36 | struct ux500_dma_channel { | ||
37 | struct dma_channel channel; | ||
38 | struct ux500_dma_controller *controller; | ||
39 | struct musb_hw_ep *hw_ep; | ||
40 | struct work_struct channel_work; | ||
41 | struct dma_chan *dma_chan; | ||
42 | unsigned int cur_len; | ||
43 | dma_cookie_t cookie; | ||
44 | u8 ch_num; | ||
45 | u8 is_tx; | ||
46 | u8 is_allocated; | ||
47 | }; | ||
48 | |||
49 | struct ux500_dma_controller { | ||
50 | struct dma_controller controller; | ||
51 | struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_CHANNELS]; | ||
52 | struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_TX_CHANNELS]; | ||
53 | u32 num_rx_channels; | ||
54 | u32 num_tx_channels; | ||
55 | void *private_data; | ||
56 | dma_addr_t phy_base; | ||
57 | }; | ||
58 | |||
59 | /* Work function invoked from DMA callback to handle tx transfers. */ | ||
60 | static void ux500_tx_work(struct work_struct *data) | ||
61 | { | ||
62 | struct ux500_dma_channel *ux500_channel = container_of(data, | ||
63 | struct ux500_dma_channel, channel_work); | ||
64 | struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; | ||
65 | struct musb *musb = hw_ep->musb; | ||
66 | unsigned long flags; | ||
67 | |||
68 | DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); | ||
69 | |||
70 | spin_lock_irqsave(&musb->lock, flags); | ||
71 | ux500_channel->channel.actual_len = ux500_channel->cur_len; | ||
72 | ux500_channel->channel.status = MUSB_DMA_STATUS_FREE; | ||
73 | musb_dma_completion(musb, hw_ep->epnum, | ||
74 | ux500_channel->is_tx); | ||
75 | spin_unlock_irqrestore(&musb->lock, flags); | ||
76 | } | ||
77 | |||
78 | /* Work function invoked from DMA callback to handle rx transfers. */ | ||
79 | static void ux500_rx_work(struct work_struct *data) | ||
80 | { | ||
81 | struct ux500_dma_channel *ux500_channel = container_of(data, | ||
82 | struct ux500_dma_channel, channel_work); | ||
83 | struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; | ||
84 | struct musb *musb = hw_ep->musb; | ||
85 | unsigned long flags; | ||
86 | |||
87 | DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum); | ||
88 | |||
89 | spin_lock_irqsave(&musb->lock, flags); | ||
90 | ux500_channel->channel.actual_len = ux500_channel->cur_len; | ||
91 | ux500_channel->channel.status = MUSB_DMA_STATUS_FREE; | ||
92 | musb_dma_completion(musb, hw_ep->epnum, | ||
93 | ux500_channel->is_tx); | ||
94 | spin_unlock_irqrestore(&musb->lock, flags); | ||
95 | } | ||
96 | |||
97 | void ux500_dma_callback(void *private_data) | ||
98 | { | ||
99 | struct dma_channel *channel = (struct dma_channel *)private_data; | ||
100 | struct ux500_dma_channel *ux500_channel = channel->private_data; | ||
101 | |||
102 | schedule_work(&ux500_channel->channel_work); | ||
103 | } | ||
104 | |||
105 | static bool ux500_configure_channel(struct dma_channel *channel, | ||
106 | u16 packet_sz, u8 mode, | ||
107 | dma_addr_t dma_addr, u32 len) | ||
108 | { | ||
109 | struct ux500_dma_channel *ux500_channel = channel->private_data; | ||
110 | struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; | ||
111 | struct dma_chan *dma_chan = ux500_channel->dma_chan; | ||
112 | struct dma_async_tx_descriptor *dma_desc; | ||
113 | enum dma_data_direction direction; | ||
114 | struct scatterlist sg; | ||
115 | struct dma_slave_config slave_conf; | ||
116 | enum dma_slave_buswidth addr_width; | ||
117 | dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) + | ||
118 | ux500_channel->controller->phy_base); | ||
119 | |||
120 | DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", | ||
121 | packet_sz, mode, dma_addr, len, ux500_channel->is_tx); | ||
122 | |||
123 | ux500_channel->cur_len = len; | ||
124 | |||
125 | sg_init_table(&sg, 1); | ||
126 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_addr)), len, | ||
127 | offset_in_page(dma_addr)); | ||
128 | sg_dma_address(&sg) = dma_addr; | ||
129 | sg_dma_len(&sg) = len; | ||
130 | |||
131 | direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | ||
132 | addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE : | ||
133 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
134 | |||
135 | slave_conf.direction = direction; | ||
136 | if (direction == DMA_FROM_DEVICE) { | ||
137 | slave_conf.src_addr = usb_fifo_addr; | ||
138 | slave_conf.src_addr_width = addr_width; | ||
139 | slave_conf.src_maxburst = 16; | ||
140 | } else { | ||
141 | slave_conf.dst_addr = usb_fifo_addr; | ||
142 | slave_conf.dst_addr_width = addr_width; | ||
143 | slave_conf.dst_maxburst = 16; | ||
144 | } | ||
145 | dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, | ||
146 | (unsigned long) &slave_conf); | ||
147 | |||
148 | dma_desc = dma_chan->device-> | ||
149 | device_prep_slave_sg(dma_chan, &sg, 1, direction, | ||
150 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
151 | if (!dma_desc) | ||
152 | return false; | ||
153 | |||
154 | dma_desc->callback = ux500_dma_callback; | ||
155 | dma_desc->callback_param = channel; | ||
156 | ux500_channel->cookie = dma_desc->tx_submit(dma_desc); | ||
157 | |||
158 | dma_async_issue_pending(dma_chan); | ||
159 | |||
160 | return true; | ||
161 | } | ||
162 | |||
163 | static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, | ||
164 | struct musb_hw_ep *hw_ep, u8 is_tx) | ||
165 | { | ||
166 | struct ux500_dma_controller *controller = container_of(c, | ||
167 | struct ux500_dma_controller, controller); | ||
168 | struct ux500_dma_channel *ux500_channel = NULL; | ||
169 | u8 ch_num = hw_ep->epnum - 1; | ||
170 | u32 max_ch; | ||
171 | |||
172 | /* Max 8 DMA channels (0 - 7). Each DMA channel can only be allocated | ||
173 | * to specified hw_ep. For example DMA channel 0 can only be allocated | ||
174 | * to hw_ep 1 and 9. | ||
175 | */ | ||
176 | if (ch_num > 7) | ||
177 | ch_num -= 8; | ||
178 | |||
179 | max_ch = is_tx ? controller->num_tx_channels : | ||
180 | controller->num_rx_channels; | ||
181 | |||
182 | if (ch_num >= max_ch) | ||
183 | return NULL; | ||
184 | |||
185 | ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) : | ||
186 | &(controller->rx_channel[ch_num]) ; | ||
187 | |||
188 | /* Check if channel is already used. */ | ||
189 | if (ux500_channel->is_allocated) | ||
190 | return NULL; | ||
191 | |||
192 | ux500_channel->hw_ep = hw_ep; | ||
193 | ux500_channel->is_allocated = 1; | ||
194 | |||
195 | DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n", | ||
196 | hw_ep->epnum, is_tx, ch_num); | ||
197 | |||
198 | return &(ux500_channel->channel); | ||
199 | } | ||
200 | |||
201 | static void ux500_dma_channel_release(struct dma_channel *channel) | ||
202 | { | ||
203 | struct ux500_dma_channel *ux500_channel = channel->private_data; | ||
204 | |||
205 | DBG(7, "channel=%d\n", ux500_channel->ch_num); | ||
206 | |||
207 | if (ux500_channel->is_allocated) { | ||
208 | ux500_channel->is_allocated = 0; | ||
209 | channel->status = MUSB_DMA_STATUS_FREE; | ||
210 | channel->actual_len = 0; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | static int ux500_dma_is_compatible(struct dma_channel *channel, | ||
215 | u16 maxpacket, void *buf, u32 length) | ||
216 | { | ||
217 | if ((maxpacket & 0x3) || | ||
218 | ((int)buf & 0x3) || | ||
219 | (length < 512) || | ||
220 | (length & 0x3)) | ||
221 | return false; | ||
222 | else | ||
223 | return true; | ||
224 | } | ||
225 | |||
226 | static int ux500_dma_channel_program(struct dma_channel *channel, | ||
227 | u16 packet_sz, u8 mode, | ||
228 | dma_addr_t dma_addr, u32 len) | ||
229 | { | ||
230 | int ret; | ||
231 | |||
232 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || | ||
233 | channel->status == MUSB_DMA_STATUS_BUSY); | ||
234 | |||
235 | if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len)) | ||
236 | return false; | ||
237 | |||
238 | channel->status = MUSB_DMA_STATUS_BUSY; | ||
239 | channel->actual_len = 0; | ||
240 | ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len); | ||
241 | if (!ret) | ||
242 | channel->status = MUSB_DMA_STATUS_FREE; | ||
243 | |||
244 | return ret; | ||
245 | } | ||
246 | |||
247 | static int ux500_dma_channel_abort(struct dma_channel *channel) | ||
248 | { | ||
249 | struct ux500_dma_channel *ux500_channel = channel->private_data; | ||
250 | struct ux500_dma_controller *controller = ux500_channel->controller; | ||
251 | struct musb *musb = controller->private_data; | ||
252 | void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs; | ||
253 | u16 csr; | ||
254 | |||
255 | DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num, | ||
256 | ux500_channel->is_tx); | ||
257 | |||
258 | if (channel->status == MUSB_DMA_STATUS_BUSY) { | ||
259 | if (ux500_channel->is_tx) { | ||
260 | csr = musb_readw(epio, MUSB_TXCSR); | ||
261 | csr &= ~(MUSB_TXCSR_AUTOSET | | ||
262 | MUSB_TXCSR_DMAENAB | | ||
263 | MUSB_TXCSR_DMAMODE); | ||
264 | musb_writew(epio, MUSB_TXCSR, csr); | ||
265 | } else { | ||
266 | csr = musb_readw(epio, MUSB_RXCSR); | ||
267 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | | ||
268 | MUSB_RXCSR_DMAENAB | | ||
269 | MUSB_RXCSR_DMAMODE); | ||
270 | musb_writew(epio, MUSB_RXCSR, csr); | ||
271 | } | ||
272 | |||
273 | ux500_channel->dma_chan->device-> | ||
274 | device_control(ux500_channel->dma_chan, | ||
275 | DMA_TERMINATE_ALL, 0); | ||
276 | channel->status = MUSB_DMA_STATUS_FREE; | ||
277 | } | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static int ux500_dma_controller_stop(struct dma_controller *c) | ||
282 | { | ||
283 | struct ux500_dma_controller *controller = container_of(c, | ||
284 | struct ux500_dma_controller, controller); | ||
285 | struct ux500_dma_channel *ux500_channel; | ||
286 | struct dma_channel *channel; | ||
287 | u8 ch_num; | ||
288 | |||
289 | for (ch_num = 0; ch_num < controller->num_rx_channels; ch_num++) { | ||
290 | channel = &controller->rx_channel[ch_num].channel; | ||
291 | ux500_channel = channel->private_data; | ||
292 | |||
293 | ux500_dma_channel_release(channel); | ||
294 | |||
295 | if (ux500_channel->dma_chan) | ||
296 | dma_release_channel(ux500_channel->dma_chan); | ||
297 | } | ||
298 | |||
299 | for (ch_num = 0; ch_num < controller->num_tx_channels; ch_num++) { | ||
300 | channel = &controller->tx_channel[ch_num].channel; | ||
301 | ux500_channel = channel->private_data; | ||
302 | |||
303 | ux500_dma_channel_release(channel); | ||
304 | |||
305 | if (ux500_channel->dma_chan) | ||
306 | dma_release_channel(ux500_channel->dma_chan); | ||
307 | } | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static int ux500_dma_controller_start(struct dma_controller *c) | ||
313 | { | ||
314 | struct ux500_dma_controller *controller = container_of(c, | ||
315 | struct ux500_dma_controller, controller); | ||
316 | struct ux500_dma_channel *ux500_channel = NULL; | ||
317 | struct musb *musb = controller->private_data; | ||
318 | struct device *dev = musb->controller; | ||
319 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
320 | struct ux500_musb_board_data *data = plat->board_data; | ||
321 | struct dma_channel *dma_channel = NULL; | ||
322 | u32 ch_num; | ||
323 | u8 dir; | ||
324 | u8 is_tx = 0; | ||
325 | |||
326 | void **param_array; | ||
327 | struct ux500_dma_channel *channel_array; | ||
328 | u32 ch_count; | ||
329 | void (*musb_channel_work)(struct work_struct *); | ||
330 | dma_cap_mask_t mask; | ||
331 | |||
332 | if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) || | ||
333 | (data->num_tx_channels > UX500_MUSB_DMA_NUM_TX_CHANNELS)) | ||
334 | return -EINVAL; | ||
335 | |||
336 | controller->num_rx_channels = data->num_rx_channels; | ||
337 | controller->num_tx_channels = data->num_tx_channels; | ||
338 | |||
339 | dma_cap_zero(mask); | ||
340 | dma_cap_set(DMA_SLAVE, mask); | ||
341 | |||
342 | /* Prepare the loop for RX channels */ | ||
343 | channel_array = controller->rx_channel; | ||
344 | ch_count = data->num_rx_channels; | ||
345 | param_array = data->dma_rx_param_array; | ||
346 | musb_channel_work = ux500_rx_work; | ||
347 | |||
348 | for (dir = 0; dir < 2; dir++) { | ||
349 | for (ch_num = 0; ch_num < ch_count; ch_num++) { | ||
350 | ux500_channel = &channel_array[ch_num]; | ||
351 | ux500_channel->controller = controller; | ||
352 | ux500_channel->ch_num = ch_num; | ||
353 | ux500_channel->is_tx = is_tx; | ||
354 | |||
355 | dma_channel = &(ux500_channel->channel); | ||
356 | dma_channel->private_data = ux500_channel; | ||
357 | dma_channel->status = MUSB_DMA_STATUS_FREE; | ||
358 | dma_channel->max_len = SZ_16M; | ||
359 | |||
360 | ux500_channel->dma_chan = dma_request_channel(mask, | ||
361 | data->dma_filter, | ||
362 | param_array[ch_num]); | ||
363 | if (!ux500_channel->dma_chan) { | ||
364 | ERR("Dma pipe allocation error dir=%d ch=%d\n", | ||
365 | dir, ch_num); | ||
366 | |||
367 | /* Release already allocated channels */ | ||
368 | ux500_dma_controller_stop(c); | ||
369 | |||
370 | return -EBUSY; | ||
371 | } | ||
372 | |||
373 | INIT_WORK(&ux500_channel->channel_work, | ||
374 | musb_channel_work); | ||
375 | } | ||
376 | |||
377 | /* Prepare the loop for TX channels */ | ||
378 | channel_array = controller->tx_channel; | ||
379 | ch_count = data->num_tx_channels; | ||
380 | param_array = data->dma_tx_param_array; | ||
381 | musb_channel_work = ux500_tx_work; | ||
382 | is_tx = 1; | ||
383 | } | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | void dma_controller_destroy(struct dma_controller *c) | ||
389 | { | ||
390 | struct ux500_dma_controller *controller = container_of(c, | ||
391 | struct ux500_dma_controller, controller); | ||
392 | |||
393 | kfree(controller); | ||
394 | } | ||
395 | |||
396 | struct dma_controller *__init | ||
397 | dma_controller_create(struct musb *musb, void __iomem *base) | ||
398 | { | ||
399 | struct ux500_dma_controller *controller; | ||
400 | struct platform_device *pdev = to_platform_device(musb->controller); | ||
401 | struct resource *iomem; | ||
402 | |||
403 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); | ||
404 | if (!controller) | ||
405 | return NULL; | ||
406 | |||
407 | controller->private_data = musb; | ||
408 | |||
409 | /* Save physical address for DMA controller. */ | ||
410 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
411 | controller->phy_base = (dma_addr_t) iomem->start; | ||
412 | |||
413 | controller->controller.start = ux500_dma_controller_start; | ||
414 | controller->controller.stop = ux500_dma_controller_stop; | ||
415 | controller->controller.channel_alloc = ux500_dma_channel_allocate; | ||
416 | controller->controller.channel_release = ux500_dma_channel_release; | ||
417 | controller->controller.channel_program = ux500_dma_channel_program; | ||
418 | controller->controller.channel_abort = ux500_dma_channel_abort; | ||
419 | controller->controller.is_compatible = ux500_dma_is_compatible; | ||
420 | |||
421 | return &controller->controller; | ||
422 | } | ||