diff options
-rw-r--r-- | drivers/usb/Makefile | 1 | ||||
-rw-r--r-- | drivers/usb/host/Kconfig | 12 | ||||
-rw-r--r-- | drivers/usb/host/Makefile | 1 | ||||
-rw-r--r-- | drivers/usb/host/fotg210-hcd.c | 6049 | ||||
-rw-r--r-- | drivers/usb/host/fotg210.h | 750 |
5 files changed, 6813 insertions, 0 deletions
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 238c5d47cadb..5460abf045de 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile | |||
@@ -26,6 +26,7 @@ obj-$(CONFIG_USB_ISP1760_HCD) += host/ | |||
26 | obj-$(CONFIG_USB_IMX21_HCD) += host/ | 26 | obj-$(CONFIG_USB_IMX21_HCD) += host/ |
27 | obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/ | 27 | obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/ |
28 | obj-$(CONFIG_USB_FUSBH200_HCD) += host/ | 28 | obj-$(CONFIG_USB_FUSBH200_HCD) += host/ |
29 | obj-$(CONFIG_USB_FOTG210_HCD) += host/ | ||
29 | 30 | ||
30 | obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ | 31 | obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ |
31 | 32 | ||
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 4263d011392c..cf521d6551dd 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig | |||
@@ -354,6 +354,18 @@ config USB_FUSBH200_HCD | |||
354 | To compile this driver as a module, choose M here: the | 354 | To compile this driver as a module, choose M here: the |
355 | module will be called fusbh200-hcd. | 355 | module will be called fusbh200-hcd. |
356 | 356 | ||
357 | config USB_FOTG210_HCD | ||
358 | tristate "FOTG210 HCD support" | ||
359 | depends on USB | ||
360 | default N | ||
361 | ---help--- | ||
362 | Faraday FOTG210 is an OTG controller which can be configured as | ||
363 | an USB2.0 host. It is designed to meet USB2.0 EHCI specification | ||
364 | with minor modification. | ||
365 | |||
366 | To compile this driver as a module, choose M here: the | ||
367 | module will be called fotg210-hcd. | ||
368 | |||
357 | config USB_OHCI_HCD | 369 | config USB_OHCI_HCD |
358 | tristate "OHCI HCD (USB 1.1) support" | 370 | tristate "OHCI HCD (USB 1.1) support" |
359 | select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 | 371 | select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 |
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index bea71127b15f..829a3397882a 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile | |||
@@ -58,3 +58,4 @@ obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o | |||
58 | obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o | 58 | obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o |
59 | obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o | 59 | obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o |
60 | obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o | 60 | obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o |
61 | obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o | ||
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c new file mode 100644 index 000000000000..fce13bcc4a3e --- /dev/null +++ b/drivers/usb/host/fotg210-hcd.c | |||
@@ -0,0 +1,6049 @@ | |||
1 | /* | ||
2 | * Faraday FOTG210 EHCI-like driver | ||
3 | * | ||
4 | * Copyright (c) 2013 Faraday Technology Corporation | ||
5 | * | ||
6 | * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com> | ||
7 | * Feng-Hsin Chiang <john453@faraday-tech.com> | ||
8 | * Po-Yu Chuang <ratbert.chuang@gmail.com> | ||
9 | * | ||
10 | * Most of code borrowed from the Linux-3.7 EHCI driver | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License as published by the | ||
14 | * Free Software Foundation; either version 2 of the License, or (at your | ||
15 | * option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, but | ||
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
19 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
20 | * for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software Foundation, | ||
24 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
25 | */ | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/device.h> | ||
28 | #include <linux/dmapool.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/ioport.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/vmalloc.h> | ||
34 | #include <linux/errno.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/hrtimer.h> | ||
37 | #include <linux/list.h> | ||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/usb.h> | ||
40 | #include <linux/usb/hcd.h> | ||
41 | #include <linux/moduleparam.h> | ||
42 | #include <linux/dma-mapping.h> | ||
43 | #include <linux/debugfs.h> | ||
44 | #include <linux/slab.h> | ||
45 | #include <linux/uaccess.h> | ||
46 | #include <linux/platform_device.h> | ||
47 | #include <linux/io.h> | ||
48 | |||
49 | #include <asm/byteorder.h> | ||
50 | #include <asm/irq.h> | ||
51 | #include <asm/unaligned.h> | ||
52 | |||
53 | /*-------------------------------------------------------------------------*/ | ||
54 | #define DRIVER_AUTHOR "Yuan-Hsin Chen" | ||
55 | #define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver" | ||
56 | |||
57 | static const char hcd_name[] = "fotg210_hcd"; | ||
58 | |||
59 | #undef VERBOSE_DEBUG | ||
60 | #undef FOTG210_URB_TRACE | ||
61 | |||
62 | #ifdef DEBUG | ||
63 | #define FOTG210_STATS | ||
64 | #endif | ||
65 | |||
66 | /* magic numbers that can affect system performance */ | ||
67 | #define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ | ||
68 | #define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ | ||
69 | #define FOTG210_TUNE_RL_TT 0 | ||
70 | #define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ | ||
71 | #define FOTG210_TUNE_MULT_TT 1 | ||
72 | /* | ||
73 | * Some drivers think it's safe to schedule isochronous transfers more than | ||
74 | * 256 ms into the future (partly as a result of an old bug in the scheduling | ||
75 | * code). In an attempt to avoid trouble, we will use a minimum scheduling | ||
76 | * length of 512 frames instead of 256. | ||
77 | */ | ||
78 | #define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */ | ||
79 | |||
80 | /* Initial IRQ latency: faster than hw default */ | ||
81 | static int log2_irq_thresh; /* 0 to 6 */ | ||
82 | module_param(log2_irq_thresh, int, S_IRUGO); | ||
83 | MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); | ||
84 | |||
85 | /* initial park setting: slower than hw default */ | ||
86 | static unsigned park; | ||
87 | module_param(park, uint, S_IRUGO); | ||
88 | MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets"); | ||
89 | |||
90 | /* for link power management(LPM) feature */ | ||
91 | static unsigned int hird; | ||
92 | module_param(hird, int, S_IRUGO); | ||
93 | MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us"); | ||
94 | |||
95 | #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) | ||
96 | |||
97 | #include "fotg210.h" | ||
98 | |||
99 | /*-------------------------------------------------------------------------*/ | ||
100 | |||
101 | #define fotg210_dbg(fotg210, fmt, args...) \ | ||
102 | dev_dbg(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) | ||
103 | #define fotg210_err(fotg210, fmt, args...) \ | ||
104 | dev_err(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) | ||
105 | #define fotg210_info(fotg210, fmt, args...) \ | ||
106 | dev_info(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) | ||
107 | #define fotg210_warn(fotg210, fmt, args...) \ | ||
108 | dev_warn(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) | ||
109 | |||
110 | #ifdef VERBOSE_DEBUG | ||
111 | # define fotg210_vdbg fotg210_dbg | ||
112 | #else | ||
113 | static inline void fotg210_vdbg(struct fotg210_hcd *fotg210, ...) {} | ||
114 | #endif | ||
115 | |||
116 | #ifdef DEBUG | ||
117 | |||
118 | /* check the values in the HCSPARAMS register | ||
119 | * (host controller _Structural_ parameters) | ||
120 | * see EHCI spec, Table 2-4 for each value | ||
121 | */ | ||
122 | static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label) | ||
123 | { | ||
124 | u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params); | ||
125 | |||
126 | fotg210_dbg(fotg210, | ||
127 | "%s hcs_params 0x%x ports=%d\n", | ||
128 | label, params, | ||
129 | HCS_N_PORTS(params) | ||
130 | ); | ||
131 | } | ||
132 | #else | ||
133 | |||
134 | static inline void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label) {} | ||
135 | |||
136 | #endif | ||
137 | |||
138 | #ifdef DEBUG | ||
139 | |||
140 | /* check the values in the HCCPARAMS register | ||
141 | * (host controller _Capability_ parameters) | ||
142 | * see EHCI Spec, Table 2-5 for each value | ||
143 | * */ | ||
144 | static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label) | ||
145 | { | ||
146 | u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params); | ||
147 | |||
148 | fotg210_dbg(fotg210, | ||
149 | "%s hcc_params %04x uframes %s%s\n", | ||
150 | label, | ||
151 | params, | ||
152 | HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", | ||
153 | HCC_CANPARK(params) ? " park" : ""); | ||
154 | } | ||
155 | #else | ||
156 | |||
157 | static inline void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label) {} | ||
158 | |||
159 | #endif | ||
160 | |||
161 | #ifdef DEBUG | ||
162 | |||
163 | static void __maybe_unused | ||
164 | dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd) | ||
165 | { | ||
166 | fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd, | ||
167 | hc32_to_cpup(fotg210, &qtd->hw_next), | ||
168 | hc32_to_cpup(fotg210, &qtd->hw_alt_next), | ||
169 | hc32_to_cpup(fotg210, &qtd->hw_token), | ||
170 | hc32_to_cpup(fotg210, &qtd->hw_buf[0])); | ||
171 | if (qtd->hw_buf[1]) | ||
172 | fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n", | ||
173 | hc32_to_cpup(fotg210, &qtd->hw_buf[1]), | ||
174 | hc32_to_cpup(fotg210, &qtd->hw_buf[2]), | ||
175 | hc32_to_cpup(fotg210, &qtd->hw_buf[3]), | ||
176 | hc32_to_cpup(fotg210, &qtd->hw_buf[4])); | ||
177 | } | ||
178 | |||
179 | static void __maybe_unused | ||
180 | dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
181 | { | ||
182 | struct fotg210_qh_hw *hw = qh->hw; | ||
183 | |||
184 | fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, | ||
185 | qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current); | ||
186 | dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next); | ||
187 | } | ||
188 | |||
189 | static void __maybe_unused | ||
190 | dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd) | ||
191 | { | ||
192 | fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", | ||
193 | label, itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next), | ||
194 | itd->urb); | ||
195 | fotg210_dbg(fotg210, | ||
196 | " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n", | ||
197 | hc32_to_cpu(fotg210, itd->hw_transaction[0]), | ||
198 | hc32_to_cpu(fotg210, itd->hw_transaction[1]), | ||
199 | hc32_to_cpu(fotg210, itd->hw_transaction[2]), | ||
200 | hc32_to_cpu(fotg210, itd->hw_transaction[3]), | ||
201 | hc32_to_cpu(fotg210, itd->hw_transaction[4]), | ||
202 | hc32_to_cpu(fotg210, itd->hw_transaction[5]), | ||
203 | hc32_to_cpu(fotg210, itd->hw_transaction[6]), | ||
204 | hc32_to_cpu(fotg210, itd->hw_transaction[7])); | ||
205 | fotg210_dbg(fotg210, | ||
206 | " buf: %08x %08x %08x %08x %08x %08x %08x\n", | ||
207 | hc32_to_cpu(fotg210, itd->hw_bufp[0]), | ||
208 | hc32_to_cpu(fotg210, itd->hw_bufp[1]), | ||
209 | hc32_to_cpu(fotg210, itd->hw_bufp[2]), | ||
210 | hc32_to_cpu(fotg210, itd->hw_bufp[3]), | ||
211 | hc32_to_cpu(fotg210, itd->hw_bufp[4]), | ||
212 | hc32_to_cpu(fotg210, itd->hw_bufp[5]), | ||
213 | hc32_to_cpu(fotg210, itd->hw_bufp[6])); | ||
214 | fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n", | ||
215 | itd->index[0], itd->index[1], itd->index[2], | ||
216 | itd->index[3], itd->index[4], itd->index[5], | ||
217 | itd->index[6], itd->index[7]); | ||
218 | } | ||
219 | |||
220 | static int __maybe_unused | ||
221 | dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) | ||
222 | { | ||
223 | return scnprintf(buf, len, | ||
224 | "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s", | ||
225 | label, label[0] ? " " : "", status, | ||
226 | (status & STS_ASS) ? " Async" : "", | ||
227 | (status & STS_PSS) ? " Periodic" : "", | ||
228 | (status & STS_RECL) ? " Recl" : "", | ||
229 | (status & STS_HALT) ? " Halt" : "", | ||
230 | (status & STS_IAA) ? " IAA" : "", | ||
231 | (status & STS_FATAL) ? " FATAL" : "", | ||
232 | (status & STS_FLR) ? " FLR" : "", | ||
233 | (status & STS_PCD) ? " PCD" : "", | ||
234 | (status & STS_ERR) ? " ERR" : "", | ||
235 | (status & STS_INT) ? " INT" : "" | ||
236 | ); | ||
237 | } | ||
238 | |||
239 | static int __maybe_unused | ||
240 | dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) | ||
241 | { | ||
242 | return scnprintf(buf, len, | ||
243 | "%s%sintrenable %02x%s%s%s%s%s%s", | ||
244 | label, label[0] ? " " : "", enable, | ||
245 | (enable & STS_IAA) ? " IAA" : "", | ||
246 | (enable & STS_FATAL) ? " FATAL" : "", | ||
247 | (enable & STS_FLR) ? " FLR" : "", | ||
248 | (enable & STS_PCD) ? " PCD" : "", | ||
249 | (enable & STS_ERR) ? " ERR" : "", | ||
250 | (enable & STS_INT) ? " INT" : "" | ||
251 | ); | ||
252 | } | ||
253 | |||
254 | static const char *const fls_strings[] = { "1024", "512", "256", "??" }; | ||
255 | |||
256 | static int | ||
257 | dbg_command_buf(char *buf, unsigned len, const char *label, u32 command) | ||
258 | { | ||
259 | return scnprintf(buf, len, | ||
260 | "%s%scommand %07x %s=%d ithresh=%d%s%s%s " | ||
261 | "period=%s%s %s", | ||
262 | label, label[0] ? " " : "", command, | ||
263 | (command & CMD_PARK) ? " park" : "(park)", | ||
264 | CMD_PARK_CNT(command), | ||
265 | (command >> 16) & 0x3f, | ||
266 | (command & CMD_IAAD) ? " IAAD" : "", | ||
267 | (command & CMD_ASE) ? " Async" : "", | ||
268 | (command & CMD_PSE) ? " Periodic" : "", | ||
269 | fls_strings[(command >> 2) & 0x3], | ||
270 | (command & CMD_RESET) ? " Reset" : "", | ||
271 | (command & CMD_RUN) ? "RUN" : "HALT" | ||
272 | ); | ||
273 | } | ||
274 | |||
275 | static int | ||
276 | dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status) | ||
277 | { | ||
278 | char *sig; | ||
279 | |||
280 | /* signaling state */ | ||
281 | switch (status & (3 << 10)) { | ||
282 | case 0 << 10: | ||
283 | sig = "se0"; | ||
284 | break; | ||
285 | case 1 << 10: | ||
286 | sig = "k"; | ||
287 | break; /* low speed */ | ||
288 | case 2 << 10: | ||
289 | sig = "j"; | ||
290 | break; | ||
291 | default: | ||
292 | sig = "?"; | ||
293 | break; | ||
294 | } | ||
295 | |||
296 | return scnprintf(buf, len, | ||
297 | "%s%sport:%d status %06x %d " | ||
298 | "sig=%s%s%s%s%s%s%s%s", | ||
299 | label, label[0] ? " " : "", port, status, | ||
300 | status>>25,/*device address */ | ||
301 | sig, | ||
302 | (status & PORT_RESET) ? " RESET" : "", | ||
303 | (status & PORT_SUSPEND) ? " SUSPEND" : "", | ||
304 | (status & PORT_RESUME) ? " RESUME" : "", | ||
305 | (status & PORT_PEC) ? " PEC" : "", | ||
306 | (status & PORT_PE) ? " PE" : "", | ||
307 | (status & PORT_CSC) ? " CSC" : "", | ||
308 | (status & PORT_CONNECT) ? " CONNECT" : ""); | ||
309 | } | ||
310 | |||
311 | #else | ||
312 | static inline void __maybe_unused | ||
313 | dbg_qh(char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
314 | {} | ||
315 | |||
316 | static inline int __maybe_unused | ||
317 | dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) | ||
318 | { return 0; } | ||
319 | |||
320 | static inline int __maybe_unused | ||
321 | dbg_command_buf(char *buf, unsigned len, const char *label, u32 command) | ||
322 | { return 0; } | ||
323 | |||
324 | static inline int __maybe_unused | ||
325 | dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) | ||
326 | { return 0; } | ||
327 | |||
328 | static inline int __maybe_unused | ||
329 | dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status) | ||
330 | { return 0; } | ||
331 | |||
332 | #endif /* DEBUG */ | ||
333 | |||
334 | /* functions have the "wrong" filename when they're output... */ | ||
335 | #define dbg_status(fotg210, label, status) { \ | ||
336 | char _buf[80]; \ | ||
337 | dbg_status_buf(_buf, sizeof(_buf), label, status); \ | ||
338 | fotg210_dbg(fotg210, "%s\n", _buf); \ | ||
339 | } | ||
340 | |||
341 | #define dbg_cmd(fotg210, label, command) { \ | ||
342 | char _buf[80]; \ | ||
343 | dbg_command_buf(_buf, sizeof(_buf), label, command); \ | ||
344 | fotg210_dbg(fotg210, "%s\n", _buf); \ | ||
345 | } | ||
346 | |||
347 | #define dbg_port(fotg210, label, port, status) { \ | ||
348 | char _buf[80]; \ | ||
349 | dbg_port_buf(_buf, sizeof(_buf), label, port, status); \ | ||
350 | fotg210_dbg(fotg210, "%s\n", _buf); \ | ||
351 | } | ||
352 | |||
353 | /*-------------------------------------------------------------------------*/ | ||
354 | |||
355 | #ifdef STUB_DEBUG_FILES | ||
356 | |||
357 | static inline void create_debug_files(struct fotg210_hcd *bus) { } | ||
358 | static inline void remove_debug_files(struct fotg210_hcd *bus) { } | ||
359 | |||
360 | #else | ||
361 | |||
362 | /* troubleshooting help: expose state in debugfs */ | ||
363 | |||
364 | static int debug_async_open(struct inode *, struct file *); | ||
365 | static int debug_periodic_open(struct inode *, struct file *); | ||
366 | static int debug_registers_open(struct inode *, struct file *); | ||
367 | static int debug_async_open(struct inode *, struct file *); | ||
368 | |||
369 | static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*); | ||
370 | static int debug_close(struct inode *, struct file *); | ||
371 | |||
372 | static const struct file_operations debug_async_fops = { | ||
373 | .owner = THIS_MODULE, | ||
374 | .open = debug_async_open, | ||
375 | .read = debug_output, | ||
376 | .release = debug_close, | ||
377 | .llseek = default_llseek, | ||
378 | }; | ||
379 | static const struct file_operations debug_periodic_fops = { | ||
380 | .owner = THIS_MODULE, | ||
381 | .open = debug_periodic_open, | ||
382 | .read = debug_output, | ||
383 | .release = debug_close, | ||
384 | .llseek = default_llseek, | ||
385 | }; | ||
386 | static const struct file_operations debug_registers_fops = { | ||
387 | .owner = THIS_MODULE, | ||
388 | .open = debug_registers_open, | ||
389 | .read = debug_output, | ||
390 | .release = debug_close, | ||
391 | .llseek = default_llseek, | ||
392 | }; | ||
393 | |||
394 | static struct dentry *fotg210_debug_root; | ||
395 | |||
396 | struct debug_buffer { | ||
397 | ssize_t (*fill_func)(struct debug_buffer *); /* fill method */ | ||
398 | struct usb_bus *bus; | ||
399 | struct mutex mutex; /* protect filling of buffer */ | ||
400 | size_t count; /* number of characters filled into buffer */ | ||
401 | char *output_buf; | ||
402 | size_t alloc_size; | ||
403 | }; | ||
404 | |||
405 | #define speed_char(info1)({ char tmp; \ | ||
406 | switch (info1 & (3 << 12)) { \ | ||
407 | case QH_FULL_SPEED: \ | ||
408 | tmp = 'f'; break; \ | ||
409 | case QH_LOW_SPEED: \ | ||
410 | tmp = 'l'; break; \ | ||
411 | case QH_HIGH_SPEED: \ | ||
412 | tmp = 'h'; break; \ | ||
413 | default: \ | ||
414 | tmp = '?'; break; \ | ||
415 | }; tmp; }) | ||
416 | |||
417 | static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token) | ||
418 | { | ||
419 | __u32 v = hc32_to_cpu(fotg210, token); | ||
420 | |||
421 | if (v & QTD_STS_ACTIVE) | ||
422 | return '*'; | ||
423 | if (v & QTD_STS_HALT) | ||
424 | return '-'; | ||
425 | if (!IS_SHORT_READ(v)) | ||
426 | return ' '; | ||
427 | /* tries to advance through hw_alt_next */ | ||
428 | return '/'; | ||
429 | } | ||
430 | |||
431 | static void qh_lines( | ||
432 | struct fotg210_hcd *fotg210, | ||
433 | struct fotg210_qh *qh, | ||
434 | char **nextp, | ||
435 | unsigned *sizep | ||
436 | ) | ||
437 | { | ||
438 | u32 scratch; | ||
439 | u32 hw_curr; | ||
440 | struct fotg210_qtd *td; | ||
441 | unsigned temp; | ||
442 | unsigned size = *sizep; | ||
443 | char *next = *nextp; | ||
444 | char mark; | ||
445 | __le32 list_end = FOTG210_LIST_END(fotg210); | ||
446 | struct fotg210_qh_hw *hw = qh->hw; | ||
447 | |||
448 | if (hw->hw_qtd_next == list_end) /* NEC does this */ | ||
449 | mark = '@'; | ||
450 | else | ||
451 | mark = token_mark(fotg210, hw->hw_token); | ||
452 | if (mark == '/') { /* qh_alt_next controls qh advance? */ | ||
453 | if ((hw->hw_alt_next & QTD_MASK(fotg210)) | ||
454 | == fotg210->async->hw->hw_alt_next) | ||
455 | mark = '#'; /* blocked */ | ||
456 | else if (hw->hw_alt_next == list_end) | ||
457 | mark = '.'; /* use hw_qtd_next */ | ||
458 | /* else alt_next points to some other qtd */ | ||
459 | } | ||
460 | scratch = hc32_to_cpup(fotg210, &hw->hw_info1); | ||
461 | hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0; | ||
462 | temp = scnprintf(next, size, | ||
463 | "qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)", | ||
464 | qh, scratch & 0x007f, | ||
465 | speed_char(scratch), | ||
466 | (scratch >> 8) & 0x000f, | ||
467 | scratch, hc32_to_cpup(fotg210, &hw->hw_info2), | ||
468 | hc32_to_cpup(fotg210, &hw->hw_token), mark, | ||
469 | (cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token) | ||
470 | ? "data1" : "data0", | ||
471 | (hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f); | ||
472 | size -= temp; | ||
473 | next += temp; | ||
474 | |||
475 | /* hc may be modifying the list as we read it ... */ | ||
476 | list_for_each_entry(td, &qh->qtd_list, qtd_list) { | ||
477 | scratch = hc32_to_cpup(fotg210, &td->hw_token); | ||
478 | mark = ' '; | ||
479 | if (hw_curr == td->qtd_dma) | ||
480 | mark = '*'; | ||
481 | else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma)) | ||
482 | mark = '+'; | ||
483 | else if (QTD_LENGTH(scratch)) { | ||
484 | if (td->hw_alt_next == fotg210->async->hw->hw_alt_next) | ||
485 | mark = '#'; | ||
486 | else if (td->hw_alt_next != list_end) | ||
487 | mark = '/'; | ||
488 | } | ||
489 | temp = snprintf(next, size, | ||
490 | "\n\t%p%c%s len=%d %08x urb %p", | ||
491 | td, mark, ({ char *tmp; | ||
492 | switch ((scratch>>8)&0x03) { | ||
493 | case 0: | ||
494 | tmp = "out"; | ||
495 | break; | ||
496 | case 1: | ||
497 | tmp = "in"; | ||
498 | break; | ||
499 | case 2: | ||
500 | tmp = "setup"; | ||
501 | break; | ||
502 | default: | ||
503 | tmp = "?"; | ||
504 | break; | ||
505 | } tmp; }), | ||
506 | (scratch >> 16) & 0x7fff, | ||
507 | scratch, | ||
508 | td->urb); | ||
509 | if (size < temp) | ||
510 | temp = size; | ||
511 | size -= temp; | ||
512 | next += temp; | ||
513 | if (temp == size) | ||
514 | goto done; | ||
515 | } | ||
516 | |||
517 | temp = snprintf(next, size, "\n"); | ||
518 | if (size < temp) | ||
519 | temp = size; | ||
520 | size -= temp; | ||
521 | next += temp; | ||
522 | |||
523 | done: | ||
524 | *sizep = size; | ||
525 | *nextp = next; | ||
526 | } | ||
527 | |||
528 | static ssize_t fill_async_buffer(struct debug_buffer *buf) | ||
529 | { | ||
530 | struct usb_hcd *hcd; | ||
531 | struct fotg210_hcd *fotg210; | ||
532 | unsigned long flags; | ||
533 | unsigned temp, size; | ||
534 | char *next; | ||
535 | struct fotg210_qh *qh; | ||
536 | |||
537 | hcd = bus_to_hcd(buf->bus); | ||
538 | fotg210 = hcd_to_fotg210(hcd); | ||
539 | next = buf->output_buf; | ||
540 | size = buf->alloc_size; | ||
541 | |||
542 | *next = 0; | ||
543 | |||
544 | /* dumps a snapshot of the async schedule. | ||
545 | * usually empty except for long-term bulk reads, or head. | ||
546 | * one QH per line, and TDs we know about | ||
547 | */ | ||
548 | spin_lock_irqsave(&fotg210->lock, flags); | ||
549 | for (qh = fotg210->async->qh_next.qh; size > 0 && qh; | ||
550 | qh = qh->qh_next.qh) | ||
551 | qh_lines(fotg210, qh, &next, &size); | ||
552 | if (fotg210->async_unlink && size > 0) { | ||
553 | temp = scnprintf(next, size, "\nunlink =\n"); | ||
554 | size -= temp; | ||
555 | next += temp; | ||
556 | |||
557 | for (qh = fotg210->async_unlink; size > 0 && qh; | ||
558 | qh = qh->unlink_next) | ||
559 | qh_lines(fotg210, qh, &next, &size); | ||
560 | } | ||
561 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
562 | |||
563 | return strlen(buf->output_buf); | ||
564 | } | ||
565 | |||
566 | #define DBG_SCHED_LIMIT 64 | ||
567 | static ssize_t fill_periodic_buffer(struct debug_buffer *buf) | ||
568 | { | ||
569 | struct usb_hcd *hcd; | ||
570 | struct fotg210_hcd *fotg210; | ||
571 | unsigned long flags; | ||
572 | union fotg210_shadow p, *seen; | ||
573 | unsigned temp, size, seen_count; | ||
574 | char *next; | ||
575 | unsigned i; | ||
576 | __hc32 tag; | ||
577 | |||
578 | seen = kmalloc(DBG_SCHED_LIMIT * sizeof(*seen), GFP_ATOMIC); | ||
579 | if (!seen) | ||
580 | return 0; | ||
581 | seen_count = 0; | ||
582 | |||
583 | hcd = bus_to_hcd(buf->bus); | ||
584 | fotg210 = hcd_to_fotg210(hcd); | ||
585 | next = buf->output_buf; | ||
586 | size = buf->alloc_size; | ||
587 | |||
588 | temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size); | ||
589 | size -= temp; | ||
590 | next += temp; | ||
591 | |||
592 | /* dump a snapshot of the periodic schedule. | ||
593 | * iso changes, interrupt usually doesn't. | ||
594 | */ | ||
595 | spin_lock_irqsave(&fotg210->lock, flags); | ||
596 | for (i = 0; i < fotg210->periodic_size; i++) { | ||
597 | p = fotg210->pshadow[i]; | ||
598 | if (likely(!p.ptr)) | ||
599 | continue; | ||
600 | tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]); | ||
601 | |||
602 | temp = scnprintf(next, size, "%4d: ", i); | ||
603 | size -= temp; | ||
604 | next += temp; | ||
605 | |||
606 | do { | ||
607 | struct fotg210_qh_hw *hw; | ||
608 | |||
609 | switch (hc32_to_cpu(fotg210, tag)) { | ||
610 | case Q_TYPE_QH: | ||
611 | hw = p.qh->hw; | ||
612 | temp = scnprintf(next, size, " qh%d-%04x/%p", | ||
613 | p.qh->period, | ||
614 | hc32_to_cpup(fotg210, | ||
615 | &hw->hw_info2) | ||
616 | /* uframe masks */ | ||
617 | & (QH_CMASK | QH_SMASK), | ||
618 | p.qh); | ||
619 | size -= temp; | ||
620 | next += temp; | ||
621 | /* don't repeat what follows this qh */ | ||
622 | for (temp = 0; temp < seen_count; temp++) { | ||
623 | if (seen[temp].ptr != p.ptr) | ||
624 | continue; | ||
625 | if (p.qh->qh_next.ptr) { | ||
626 | temp = scnprintf(next, size, | ||
627 | " ..."); | ||
628 | size -= temp; | ||
629 | next += temp; | ||
630 | } | ||
631 | break; | ||
632 | } | ||
633 | /* show more info the first time around */ | ||
634 | if (temp == seen_count) { | ||
635 | u32 scratch = hc32_to_cpup(fotg210, | ||
636 | &hw->hw_info1); | ||
637 | struct fotg210_qtd *qtd; | ||
638 | char *type = ""; | ||
639 | |||
640 | /* count tds, get ep direction */ | ||
641 | temp = 0; | ||
642 | list_for_each_entry(qtd, | ||
643 | &p.qh->qtd_list, | ||
644 | qtd_list) { | ||
645 | temp++; | ||
646 | switch (0x03 & (hc32_to_cpu( | ||
647 | fotg210, | ||
648 | qtd->hw_token) >> 8)) { | ||
649 | case 0: | ||
650 | type = "out"; | ||
651 | continue; | ||
652 | case 1: | ||
653 | type = "in"; | ||
654 | continue; | ||
655 | } | ||
656 | } | ||
657 | |||
658 | temp = scnprintf(next, size, | ||
659 | "(%c%d ep%d%s " | ||
660 | "[%d/%d] q%d p%d)", | ||
661 | speed_char(scratch), | ||
662 | scratch & 0x007f, | ||
663 | (scratch >> 8) & 0x000f, type, | ||
664 | p.qh->usecs, p.qh->c_usecs, | ||
665 | temp, | ||
666 | 0x7ff & (scratch >> 16)); | ||
667 | |||
668 | if (seen_count < DBG_SCHED_LIMIT) | ||
669 | seen[seen_count++].qh = p.qh; | ||
670 | } else | ||
671 | temp = 0; | ||
672 | tag = Q_NEXT_TYPE(fotg210, hw->hw_next); | ||
673 | p = p.qh->qh_next; | ||
674 | break; | ||
675 | case Q_TYPE_FSTN: | ||
676 | temp = scnprintf(next, size, | ||
677 | " fstn-%8x/%p", p.fstn->hw_prev, | ||
678 | p.fstn); | ||
679 | tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next); | ||
680 | p = p.fstn->fstn_next; | ||
681 | break; | ||
682 | case Q_TYPE_ITD: | ||
683 | temp = scnprintf(next, size, | ||
684 | " itd/%p", p.itd); | ||
685 | tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next); | ||
686 | p = p.itd->itd_next; | ||
687 | break; | ||
688 | } | ||
689 | size -= temp; | ||
690 | next += temp; | ||
691 | } while (p.ptr); | ||
692 | |||
693 | temp = scnprintf(next, size, "\n"); | ||
694 | size -= temp; | ||
695 | next += temp; | ||
696 | } | ||
697 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
698 | kfree(seen); | ||
699 | |||
700 | return buf->alloc_size - size; | ||
701 | } | ||
702 | #undef DBG_SCHED_LIMIT | ||
703 | |||
704 | static const char *rh_state_string(struct fotg210_hcd *fotg210) | ||
705 | { | ||
706 | switch (fotg210->rh_state) { | ||
707 | case FOTG210_RH_HALTED: | ||
708 | return "halted"; | ||
709 | case FOTG210_RH_SUSPENDED: | ||
710 | return "suspended"; | ||
711 | case FOTG210_RH_RUNNING: | ||
712 | return "running"; | ||
713 | case FOTG210_RH_STOPPING: | ||
714 | return "stopping"; | ||
715 | } | ||
716 | return "?"; | ||
717 | } | ||
718 | |||
719 | static ssize_t fill_registers_buffer(struct debug_buffer *buf) | ||
720 | { | ||
721 | struct usb_hcd *hcd; | ||
722 | struct fotg210_hcd *fotg210; | ||
723 | unsigned long flags; | ||
724 | unsigned temp, size, i; | ||
725 | char *next, scratch[80]; | ||
726 | static const char fmt[] = "%*s\n"; | ||
727 | static const char label[] = ""; | ||
728 | |||
729 | hcd = bus_to_hcd(buf->bus); | ||
730 | fotg210 = hcd_to_fotg210(hcd); | ||
731 | next = buf->output_buf; | ||
732 | size = buf->alloc_size; | ||
733 | |||
734 | spin_lock_irqsave(&fotg210->lock, flags); | ||
735 | |||
736 | if (!HCD_HW_ACCESSIBLE(hcd)) { | ||
737 | size = scnprintf(next, size, | ||
738 | "bus %s, device %s\n" | ||
739 | "%s\n" | ||
740 | "SUSPENDED(no register access)\n", | ||
741 | hcd->self.controller->bus->name, | ||
742 | dev_name(hcd->self.controller), | ||
743 | hcd->product_desc); | ||
744 | goto done; | ||
745 | } | ||
746 | |||
747 | /* Capability Registers */ | ||
748 | i = HC_VERSION(fotg210, fotg210_readl(fotg210, | ||
749 | &fotg210->caps->hc_capbase)); | ||
750 | temp = scnprintf(next, size, | ||
751 | "bus %s, device %s\n" | ||
752 | "%s\n" | ||
753 | "EHCI %x.%02x, rh state %s\n", | ||
754 | hcd->self.controller->bus->name, | ||
755 | dev_name(hcd->self.controller), | ||
756 | hcd->product_desc, | ||
757 | i >> 8, i & 0x0ff, rh_state_string(fotg210)); | ||
758 | size -= temp; | ||
759 | next += temp; | ||
760 | |||
761 | /* FIXME interpret both types of params */ | ||
762 | i = fotg210_readl(fotg210, &fotg210->caps->hcs_params); | ||
763 | temp = scnprintf(next, size, "structural params 0x%08x\n", i); | ||
764 | size -= temp; | ||
765 | next += temp; | ||
766 | |||
767 | i = fotg210_readl(fotg210, &fotg210->caps->hcc_params); | ||
768 | temp = scnprintf(next, size, "capability params 0x%08x\n", i); | ||
769 | size -= temp; | ||
770 | next += temp; | ||
771 | |||
772 | /* Operational Registers */ | ||
773 | temp = dbg_status_buf(scratch, sizeof(scratch), label, | ||
774 | fotg210_readl(fotg210, &fotg210->regs->status)); | ||
775 | temp = scnprintf(next, size, fmt, temp, scratch); | ||
776 | size -= temp; | ||
777 | next += temp; | ||
778 | |||
779 | temp = dbg_command_buf(scratch, sizeof(scratch), label, | ||
780 | fotg210_readl(fotg210, &fotg210->regs->command)); | ||
781 | temp = scnprintf(next, size, fmt, temp, scratch); | ||
782 | size -= temp; | ||
783 | next += temp; | ||
784 | |||
785 | temp = dbg_intr_buf(scratch, sizeof(scratch), label, | ||
786 | fotg210_readl(fotg210, &fotg210->regs->intr_enable)); | ||
787 | temp = scnprintf(next, size, fmt, temp, scratch); | ||
788 | size -= temp; | ||
789 | next += temp; | ||
790 | |||
791 | temp = scnprintf(next, size, "uframe %04x\n", | ||
792 | fotg210_read_frame_index(fotg210)); | ||
793 | size -= temp; | ||
794 | next += temp; | ||
795 | |||
796 | if (fotg210->async_unlink) { | ||
797 | temp = scnprintf(next, size, "async unlink qh %p\n", | ||
798 | fotg210->async_unlink); | ||
799 | size -= temp; | ||
800 | next += temp; | ||
801 | } | ||
802 | |||
803 | #ifdef FOTG210_STATS | ||
804 | temp = scnprintf(next, size, | ||
805 | "irq normal %ld err %ld iaa %ld(lost %ld)\n", | ||
806 | fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa, | ||
807 | fotg210->stats.lost_iaa); | ||
808 | size -= temp; | ||
809 | next += temp; | ||
810 | |||
811 | temp = scnprintf(next, size, "complete %ld unlink %ld\n", | ||
812 | fotg210->stats.complete, fotg210->stats.unlink); | ||
813 | size -= temp; | ||
814 | next += temp; | ||
815 | #endif | ||
816 | |||
817 | done: | ||
818 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
819 | |||
820 | return buf->alloc_size - size; | ||
821 | } | ||
822 | |||
823 | static struct debug_buffer *alloc_buffer(struct usb_bus *bus, | ||
824 | ssize_t (*fill_func)(struct debug_buffer *)) | ||
825 | { | ||
826 | struct debug_buffer *buf; | ||
827 | |||
828 | buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); | ||
829 | |||
830 | if (buf) { | ||
831 | buf->bus = bus; | ||
832 | buf->fill_func = fill_func; | ||
833 | mutex_init(&buf->mutex); | ||
834 | buf->alloc_size = PAGE_SIZE; | ||
835 | } | ||
836 | |||
837 | return buf; | ||
838 | } | ||
839 | |||
840 | static int fill_buffer(struct debug_buffer *buf) | ||
841 | { | ||
842 | int ret = 0; | ||
843 | |||
844 | if (!buf->output_buf) | ||
845 | buf->output_buf = vmalloc(buf->alloc_size); | ||
846 | |||
847 | if (!buf->output_buf) { | ||
848 | ret = -ENOMEM; | ||
849 | goto out; | ||
850 | } | ||
851 | |||
852 | ret = buf->fill_func(buf); | ||
853 | |||
854 | if (ret >= 0) { | ||
855 | buf->count = ret; | ||
856 | ret = 0; | ||
857 | } | ||
858 | |||
859 | out: | ||
860 | return ret; | ||
861 | } | ||
862 | |||
863 | static ssize_t debug_output(struct file *file, char __user *user_buf, | ||
864 | size_t len, loff_t *offset) | ||
865 | { | ||
866 | struct debug_buffer *buf = file->private_data; | ||
867 | int ret = 0; | ||
868 | |||
869 | mutex_lock(&buf->mutex); | ||
870 | if (buf->count == 0) { | ||
871 | ret = fill_buffer(buf); | ||
872 | if (ret != 0) { | ||
873 | mutex_unlock(&buf->mutex); | ||
874 | goto out; | ||
875 | } | ||
876 | } | ||
877 | mutex_unlock(&buf->mutex); | ||
878 | |||
879 | ret = simple_read_from_buffer(user_buf, len, offset, | ||
880 | buf->output_buf, buf->count); | ||
881 | |||
882 | out: | ||
883 | return ret; | ||
884 | |||
885 | } | ||
886 | |||
887 | static int debug_close(struct inode *inode, struct file *file) | ||
888 | { | ||
889 | struct debug_buffer *buf = file->private_data; | ||
890 | |||
891 | if (buf) { | ||
892 | vfree(buf->output_buf); | ||
893 | kfree(buf); | ||
894 | } | ||
895 | |||
896 | return 0; | ||
897 | } | ||
898 | static int debug_async_open(struct inode *inode, struct file *file) | ||
899 | { | ||
900 | file->private_data = alloc_buffer(inode->i_private, fill_async_buffer); | ||
901 | |||
902 | return file->private_data ? 0 : -ENOMEM; | ||
903 | } | ||
904 | |||
905 | static int debug_periodic_open(struct inode *inode, struct file *file) | ||
906 | { | ||
907 | struct debug_buffer *buf; | ||
908 | buf = alloc_buffer(inode->i_private, fill_periodic_buffer); | ||
909 | if (!buf) | ||
910 | return -ENOMEM; | ||
911 | |||
912 | buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE; | ||
913 | file->private_data = buf; | ||
914 | return 0; | ||
915 | } | ||
916 | |||
917 | static int debug_registers_open(struct inode *inode, struct file *file) | ||
918 | { | ||
919 | file->private_data = alloc_buffer(inode->i_private, | ||
920 | fill_registers_buffer); | ||
921 | |||
922 | return file->private_data ? 0 : -ENOMEM; | ||
923 | } | ||
924 | |||
925 | static inline void create_debug_files(struct fotg210_hcd *fotg210) | ||
926 | { | ||
927 | struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self; | ||
928 | |||
929 | fotg210->debug_dir = debugfs_create_dir(bus->bus_name, | ||
930 | fotg210_debug_root); | ||
931 | if (!fotg210->debug_dir) | ||
932 | return; | ||
933 | |||
934 | if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus, | ||
935 | &debug_async_fops)) | ||
936 | goto file_error; | ||
937 | |||
938 | if (!debugfs_create_file("periodic", S_IRUGO, fotg210->debug_dir, bus, | ||
939 | &debug_periodic_fops)) | ||
940 | goto file_error; | ||
941 | |||
942 | if (!debugfs_create_file("registers", S_IRUGO, fotg210->debug_dir, bus, | ||
943 | &debug_registers_fops)) | ||
944 | goto file_error; | ||
945 | |||
946 | return; | ||
947 | |||
948 | file_error: | ||
949 | debugfs_remove_recursive(fotg210->debug_dir); | ||
950 | } | ||
951 | |||
952 | static inline void remove_debug_files(struct fotg210_hcd *fotg210) | ||
953 | { | ||
954 | debugfs_remove_recursive(fotg210->debug_dir); | ||
955 | } | ||
956 | |||
957 | #endif /* STUB_DEBUG_FILES */ | ||
958 | /*-------------------------------------------------------------------------*/ | ||
959 | |||
960 | /* | ||
961 | * handshake - spin reading hc until handshake completes or fails | ||
962 | * @ptr: address of hc register to be read | ||
963 | * @mask: bits to look at in result of read | ||
964 | * @done: value of those bits when handshake succeeds | ||
965 | * @usec: timeout in microseconds | ||
966 | * | ||
967 | * Returns negative errno, or zero on success | ||
968 | * | ||
969 | * Success happens when the "mask" bits have the specified value (hardware | ||
970 | * handshake done). There are two failure modes: "usec" have passed (major | ||
971 | * hardware flakeout), or the register reads as all-ones (hardware removed). | ||
972 | * | ||
973 | * That last failure should_only happen in cases like physical cardbus eject | ||
974 | * before driver shutdown. But it also seems to be caused by bugs in cardbus | ||
975 | * bridge shutdown: shutting down the bridge before the devices using it. | ||
976 | */ | ||
977 | static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr, | ||
978 | u32 mask, u32 done, int usec) | ||
979 | { | ||
980 | u32 result; | ||
981 | |||
982 | do { | ||
983 | result = fotg210_readl(fotg210, ptr); | ||
984 | if (result == ~(u32)0) /* card removed */ | ||
985 | return -ENODEV; | ||
986 | result &= mask; | ||
987 | if (result == done) | ||
988 | return 0; | ||
989 | udelay(1); | ||
990 | usec--; | ||
991 | } while (usec > 0); | ||
992 | return -ETIMEDOUT; | ||
993 | } | ||
994 | |||
995 | /* | ||
996 | * Force HC to halt state from unknown (EHCI spec section 2.3). | ||
997 | * Must be called with interrupts enabled and the lock not held. | ||
998 | */ | ||
999 | static int fotg210_halt(struct fotg210_hcd *fotg210) | ||
1000 | { | ||
1001 | u32 temp; | ||
1002 | |||
1003 | spin_lock_irq(&fotg210->lock); | ||
1004 | |||
1005 | /* disable any irqs left enabled by previous code */ | ||
1006 | fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable); | ||
1007 | |||
1008 | /* | ||
1009 | * This routine gets called during probe before fotg210->command | ||
1010 | * has been initialized, so we can't rely on its value. | ||
1011 | */ | ||
1012 | fotg210->command &= ~CMD_RUN; | ||
1013 | temp = fotg210_readl(fotg210, &fotg210->regs->command); | ||
1014 | temp &= ~(CMD_RUN | CMD_IAAD); | ||
1015 | fotg210_writel(fotg210, temp, &fotg210->regs->command); | ||
1016 | |||
1017 | spin_unlock_irq(&fotg210->lock); | ||
1018 | synchronize_irq(fotg210_to_hcd(fotg210)->irq); | ||
1019 | |||
1020 | return handshake(fotg210, &fotg210->regs->status, | ||
1021 | STS_HALT, STS_HALT, 16 * 125); | ||
1022 | } | ||
1023 | |||
1024 | /* | ||
1025 | * Reset a non-running (STS_HALT == 1) controller. | ||
1026 | * Must be called with interrupts enabled and the lock not held. | ||
1027 | */ | ||
1028 | static int fotg210_reset(struct fotg210_hcd *fotg210) | ||
1029 | { | ||
1030 | int retval; | ||
1031 | u32 command = fotg210_readl(fotg210, &fotg210->regs->command); | ||
1032 | |||
1033 | /* If the EHCI debug controller is active, special care must be | ||
1034 | * taken before and after a host controller reset */ | ||
1035 | if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210))) | ||
1036 | fotg210->debug = NULL; | ||
1037 | |||
1038 | command |= CMD_RESET; | ||
1039 | dbg_cmd(fotg210, "reset", command); | ||
1040 | fotg210_writel(fotg210, command, &fotg210->regs->command); | ||
1041 | fotg210->rh_state = FOTG210_RH_HALTED; | ||
1042 | fotg210->next_statechange = jiffies; | ||
1043 | retval = handshake(fotg210, &fotg210->regs->command, | ||
1044 | CMD_RESET, 0, 250 * 1000); | ||
1045 | |||
1046 | if (retval) | ||
1047 | return retval; | ||
1048 | |||
1049 | if (fotg210->debug) | ||
1050 | dbgp_external_startup(fotg210_to_hcd(fotg210)); | ||
1051 | |||
1052 | fotg210->port_c_suspend = fotg210->suspended_ports = | ||
1053 | fotg210->resuming_ports = 0; | ||
1054 | return retval; | ||
1055 | } | ||
1056 | |||
1057 | /* | ||
1058 | * Idle the controller (turn off the schedules). | ||
1059 | * Must be called with interrupts enabled and the lock not held. | ||
1060 | */ | ||
1061 | static void fotg210_quiesce(struct fotg210_hcd *fotg210) | ||
1062 | { | ||
1063 | u32 temp; | ||
1064 | |||
1065 | if (fotg210->rh_state != FOTG210_RH_RUNNING) | ||
1066 | return; | ||
1067 | |||
1068 | /* wait for any schedule enables/disables to take effect */ | ||
1069 | temp = (fotg210->command << 10) & (STS_ASS | STS_PSS); | ||
1070 | handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp, | ||
1071 | 16 * 125); | ||
1072 | |||
1073 | /* then disable anything that's still active */ | ||
1074 | spin_lock_irq(&fotg210->lock); | ||
1075 | fotg210->command &= ~(CMD_ASE | CMD_PSE); | ||
1076 | fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); | ||
1077 | spin_unlock_irq(&fotg210->lock); | ||
1078 | |||
1079 | /* hardware can take 16 microframes to turn off ... */ | ||
1080 | handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0, | ||
1081 | 16 * 125); | ||
1082 | } | ||
1083 | |||
1084 | /*-------------------------------------------------------------------------*/ | ||
1085 | |||
1086 | static void end_unlink_async(struct fotg210_hcd *fotg210); | ||
1087 | static void unlink_empty_async(struct fotg210_hcd *fotg210); | ||
1088 | static void fotg210_work(struct fotg210_hcd *fotg210); | ||
1089 | static void start_unlink_intr(struct fotg210_hcd *fotg210, | ||
1090 | struct fotg210_qh *qh); | ||
1091 | static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); | ||
1092 | |||
1093 | /*-------------------------------------------------------------------------*/ | ||
1094 | |||
1095 | /* Set a bit in the USBCMD register */ | ||
1096 | static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit) | ||
1097 | { | ||
1098 | fotg210->command |= bit; | ||
1099 | fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); | ||
1100 | |||
1101 | /* unblock posted write */ | ||
1102 | fotg210_readl(fotg210, &fotg210->regs->command); | ||
1103 | } | ||
1104 | |||
1105 | /* Clear a bit in the USBCMD register */ | ||
1106 | static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit) | ||
1107 | { | ||
1108 | fotg210->command &= ~bit; | ||
1109 | fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); | ||
1110 | |||
1111 | /* unblock posted write */ | ||
1112 | fotg210_readl(fotg210, &fotg210->regs->command); | ||
1113 | } | ||
1114 | |||
1115 | /*-------------------------------------------------------------------------*/ | ||
1116 | |||
1117 | /* | ||
1118 | * EHCI timer support... Now using hrtimers. | ||
1119 | * | ||
1120 | * Lots of different events are triggered from fotg210->hrtimer. Whenever | ||
1121 | * the timer routine runs, it checks each possible event; events that are | ||
1122 | * currently enabled and whose expiration time has passed get handled. | ||
1123 | * The set of enabled events is stored as a collection of bitflags in | ||
1124 | * fotg210->enabled_hrtimer_events, and they are numbered in order of | ||
1125 | * increasing delay values (ranging between 1 ms and 100 ms). | ||
1126 | * | ||
1127 | * Rather than implementing a sorted list or tree of all pending events, | ||
1128 | * we keep track only of the lowest-numbered pending event, in | ||
1129 | * fotg210->next_hrtimer_event. Whenever fotg210->hrtimer gets restarted, its | ||
1130 | * expiration time is set to the timeout value for this event. | ||
1131 | * | ||
1132 | * As a result, events might not get handled right away; the actual delay | ||
1133 | * could be anywhere up to twice the requested delay. This doesn't | ||
1134 | * matter, because none of the events are especially time-critical. The | ||
1135 | * ones that matter most all have a delay of 1 ms, so they will be | ||
1136 | * handled after 2 ms at most, which is okay. In addition to this, we | ||
1137 | * allow for an expiration range of 1 ms. | ||
1138 | */ | ||
1139 | |||
1140 | /* | ||
1141 | * Delay lengths for the hrtimer event types. | ||
1142 | * Keep this list sorted by delay length, in the same order as | ||
1143 | * the event types indexed by enum fotg210_hrtimer_event in fotg210.h. | ||
1144 | */ | ||
1145 | static unsigned event_delays_ns[] = { | ||
1146 | 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_ASS */ | ||
1147 | 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_PSS */ | ||
1148 | 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_DEAD */ | ||
1149 | 1125 * NSEC_PER_USEC, /* FOTG210_HRTIMER_UNLINK_INTR */ | ||
1150 | 2 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_FREE_ITDS */ | ||
1151 | 6 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_ASYNC_UNLINKS */ | ||
1152 | 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IAA_WATCHDOG */ | ||
1153 | 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_PERIODIC */ | ||
1154 | 15 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_ASYNC */ | ||
1155 | 100 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IO_WATCHDOG */ | ||
1156 | }; | ||
1157 | |||
1158 | /* Enable a pending hrtimer event */ | ||
1159 | static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event, | ||
1160 | bool resched) | ||
1161 | { | ||
1162 | ktime_t *timeout = &fotg210->hr_timeouts[event]; | ||
1163 | |||
1164 | if (resched) | ||
1165 | *timeout = ktime_add(ktime_get(), | ||
1166 | ktime_set(0, event_delays_ns[event])); | ||
1167 | fotg210->enabled_hrtimer_events |= (1 << event); | ||
1168 | |||
1169 | /* Track only the lowest-numbered pending event */ | ||
1170 | if (event < fotg210->next_hrtimer_event) { | ||
1171 | fotg210->next_hrtimer_event = event; | ||
1172 | hrtimer_start_range_ns(&fotg210->hrtimer, *timeout, | ||
1173 | NSEC_PER_MSEC, HRTIMER_MODE_ABS); | ||
1174 | } | ||
1175 | } | ||
1176 | |||
1177 | |||
1178 | /* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */ | ||
1179 | static void fotg210_poll_ASS(struct fotg210_hcd *fotg210) | ||
1180 | { | ||
1181 | unsigned actual, want; | ||
1182 | |||
1183 | /* Don't enable anything if the controller isn't running (e.g., died) */ | ||
1184 | if (fotg210->rh_state != FOTG210_RH_RUNNING) | ||
1185 | return; | ||
1186 | |||
1187 | want = (fotg210->command & CMD_ASE) ? STS_ASS : 0; | ||
1188 | actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS; | ||
1189 | |||
1190 | if (want != actual) { | ||
1191 | |||
1192 | /* Poll again later, but give up after about 20 ms */ | ||
1193 | if (fotg210->ASS_poll_count++ < 20) { | ||
1194 | fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS, | ||
1195 | true); | ||
1196 | return; | ||
1197 | } | ||
1198 | fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n", | ||
1199 | want, actual); | ||
1200 | } | ||
1201 | fotg210->ASS_poll_count = 0; | ||
1202 | |||
1203 | /* The status is up-to-date; restart or stop the schedule as needed */ | ||
1204 | if (want == 0) { /* Stopped */ | ||
1205 | if (fotg210->async_count > 0) | ||
1206 | fotg210_set_command_bit(fotg210, CMD_ASE); | ||
1207 | |||
1208 | } else { /* Running */ | ||
1209 | if (fotg210->async_count == 0) { | ||
1210 | |||
1211 | /* Turn off the schedule after a while */ | ||
1212 | fotg210_enable_event(fotg210, | ||
1213 | FOTG210_HRTIMER_DISABLE_ASYNC, | ||
1214 | true); | ||
1215 | } | ||
1216 | } | ||
1217 | } | ||
1218 | |||
1219 | /* Turn off the async schedule after a brief delay */ | ||
1220 | static void fotg210_disable_ASE(struct fotg210_hcd *fotg210) | ||
1221 | { | ||
1222 | fotg210_clear_command_bit(fotg210, CMD_ASE); | ||
1223 | } | ||
1224 | |||
1225 | |||
1226 | /* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */ | ||
1227 | static void fotg210_poll_PSS(struct fotg210_hcd *fotg210) | ||
1228 | { | ||
1229 | unsigned actual, want; | ||
1230 | |||
1231 | /* Don't do anything if the controller isn't running (e.g., died) */ | ||
1232 | if (fotg210->rh_state != FOTG210_RH_RUNNING) | ||
1233 | return; | ||
1234 | |||
1235 | want = (fotg210->command & CMD_PSE) ? STS_PSS : 0; | ||
1236 | actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS; | ||
1237 | |||
1238 | if (want != actual) { | ||
1239 | |||
1240 | /* Poll again later, but give up after about 20 ms */ | ||
1241 | if (fotg210->PSS_poll_count++ < 20) { | ||
1242 | fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS, | ||
1243 | true); | ||
1244 | return; | ||
1245 | } | ||
1246 | fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n", | ||
1247 | want, actual); | ||
1248 | } | ||
1249 | fotg210->PSS_poll_count = 0; | ||
1250 | |||
1251 | /* The status is up-to-date; restart or stop the schedule as needed */ | ||
1252 | if (want == 0) { /* Stopped */ | ||
1253 | if (fotg210->periodic_count > 0) | ||
1254 | fotg210_set_command_bit(fotg210, CMD_PSE); | ||
1255 | |||
1256 | } else { /* Running */ | ||
1257 | if (fotg210->periodic_count == 0) { | ||
1258 | |||
1259 | /* Turn off the schedule after a while */ | ||
1260 | fotg210_enable_event(fotg210, | ||
1261 | FOTG210_HRTIMER_DISABLE_PERIODIC, | ||
1262 | true); | ||
1263 | } | ||
1264 | } | ||
1265 | } | ||
1266 | |||
1267 | /* Turn off the periodic schedule after a brief delay */ | ||
1268 | static void fotg210_disable_PSE(struct fotg210_hcd *fotg210) | ||
1269 | { | ||
1270 | fotg210_clear_command_bit(fotg210, CMD_PSE); | ||
1271 | } | ||
1272 | |||
1273 | |||
1274 | /* Poll the STS_HALT status bit; see when a dead controller stops */ | ||
1275 | static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210) | ||
1276 | { | ||
1277 | if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) { | ||
1278 | |||
1279 | /* Give up after a few milliseconds */ | ||
1280 | if (fotg210->died_poll_count++ < 5) { | ||
1281 | /* Try again later */ | ||
1282 | fotg210_enable_event(fotg210, | ||
1283 | FOTG210_HRTIMER_POLL_DEAD, true); | ||
1284 | return; | ||
1285 | } | ||
1286 | fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n"); | ||
1287 | } | ||
1288 | |||
1289 | /* Clean up the mess */ | ||
1290 | fotg210->rh_state = FOTG210_RH_HALTED; | ||
1291 | fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable); | ||
1292 | fotg210_work(fotg210); | ||
1293 | end_unlink_async(fotg210); | ||
1294 | |||
1295 | /* Not in process context, so don't try to reset the controller */ | ||
1296 | } | ||
1297 | |||
1298 | |||
1299 | /* Handle unlinked interrupt QHs once they are gone from the hardware */ | ||
1300 | static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210) | ||
1301 | { | ||
1302 | bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING); | ||
1303 | |||
1304 | /* | ||
1305 | * Process all the QHs on the intr_unlink list that were added | ||
1306 | * before the current unlink cycle began. The list is in | ||
1307 | * temporal order, so stop when we reach the first entry in the | ||
1308 | * current cycle. But if the root hub isn't running then | ||
1309 | * process all the QHs on the list. | ||
1310 | */ | ||
1311 | fotg210->intr_unlinking = true; | ||
1312 | while (fotg210->intr_unlink) { | ||
1313 | struct fotg210_qh *qh = fotg210->intr_unlink; | ||
1314 | |||
1315 | if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle) | ||
1316 | break; | ||
1317 | fotg210->intr_unlink = qh->unlink_next; | ||
1318 | qh->unlink_next = NULL; | ||
1319 | end_unlink_intr(fotg210, qh); | ||
1320 | } | ||
1321 | |||
1322 | /* Handle remaining entries later */ | ||
1323 | if (fotg210->intr_unlink) { | ||
1324 | fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR, | ||
1325 | true); | ||
1326 | ++fotg210->intr_unlink_cycle; | ||
1327 | } | ||
1328 | fotg210->intr_unlinking = false; | ||
1329 | } | ||
1330 | |||
1331 | |||
1332 | /* Start another free-iTDs/siTDs cycle */ | ||
1333 | static void start_free_itds(struct fotg210_hcd *fotg210) | ||
1334 | { | ||
1335 | if (!(fotg210->enabled_hrtimer_events & | ||
1336 | BIT(FOTG210_HRTIMER_FREE_ITDS))) { | ||
1337 | fotg210->last_itd_to_free = list_entry( | ||
1338 | fotg210->cached_itd_list.prev, | ||
1339 | struct fotg210_itd, itd_list); | ||
1340 | fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true); | ||
1341 | } | ||
1342 | } | ||
1343 | |||
1344 | /* Wait for controller to stop using old iTDs and siTDs */ | ||
1345 | static void end_free_itds(struct fotg210_hcd *fotg210) | ||
1346 | { | ||
1347 | struct fotg210_itd *itd, *n; | ||
1348 | |||
1349 | if (fotg210->rh_state < FOTG210_RH_RUNNING) | ||
1350 | fotg210->last_itd_to_free = NULL; | ||
1351 | |||
1352 | list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) { | ||
1353 | list_del(&itd->itd_list); | ||
1354 | dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma); | ||
1355 | if (itd == fotg210->last_itd_to_free) | ||
1356 | break; | ||
1357 | } | ||
1358 | |||
1359 | if (!list_empty(&fotg210->cached_itd_list)) | ||
1360 | start_free_itds(fotg210); | ||
1361 | } | ||
1362 | |||
1363 | |||
1364 | /* Handle lost (or very late) IAA interrupts */ | ||
1365 | static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210) | ||
1366 | { | ||
1367 | if (fotg210->rh_state != FOTG210_RH_RUNNING) | ||
1368 | return; | ||
1369 | |||
1370 | /* | ||
1371 | * Lost IAA irqs wedge things badly; seen first with a vt8235. | ||
1372 | * So we need this watchdog, but must protect it against both | ||
1373 | * (a) SMP races against real IAA firing and retriggering, and | ||
1374 | * (b) clean HC shutdown, when IAA watchdog was pending. | ||
1375 | */ | ||
1376 | if (fotg210->async_iaa) { | ||
1377 | u32 cmd, status; | ||
1378 | |||
1379 | /* If we get here, IAA is *REALLY* late. It's barely | ||
1380 | * conceivable that the system is so busy that CMD_IAAD | ||
1381 | * is still legitimately set, so let's be sure it's | ||
1382 | * clear before we read STS_IAA. (The HC should clear | ||
1383 | * CMD_IAAD when it sets STS_IAA.) | ||
1384 | */ | ||
1385 | cmd = fotg210_readl(fotg210, &fotg210->regs->command); | ||
1386 | |||
1387 | /* | ||
1388 | * If IAA is set here it either legitimately triggered | ||
1389 | * after the watchdog timer expired (_way_ late, so we'll | ||
1390 | * still count it as lost) ... or a silicon erratum: | ||
1391 | * - VIA seems to set IAA without triggering the IRQ; | ||
1392 | * - IAAD potentially cleared without setting IAA. | ||
1393 | */ | ||
1394 | status = fotg210_readl(fotg210, &fotg210->regs->status); | ||
1395 | if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { | ||
1396 | COUNT(fotg210->stats.lost_iaa); | ||
1397 | fotg210_writel(fotg210, STS_IAA, | ||
1398 | &fotg210->regs->status); | ||
1399 | } | ||
1400 | |||
1401 | fotg210_vdbg(fotg210, "IAA watchdog: status %x cmd %x\n", | ||
1402 | status, cmd); | ||
1403 | end_unlink_async(fotg210); | ||
1404 | } | ||
1405 | } | ||
1406 | |||
1407 | |||
1408 | /* Enable the I/O watchdog, if appropriate */ | ||
1409 | static void turn_on_io_watchdog(struct fotg210_hcd *fotg210) | ||
1410 | { | ||
1411 | /* Not needed if the controller isn't running or it's already enabled */ | ||
1412 | if (fotg210->rh_state != FOTG210_RH_RUNNING || | ||
1413 | (fotg210->enabled_hrtimer_events & | ||
1414 | BIT(FOTG210_HRTIMER_IO_WATCHDOG))) | ||
1415 | return; | ||
1416 | |||
1417 | /* | ||
1418 | * Isochronous transfers always need the watchdog. | ||
1419 | * For other sorts we use it only if the flag is set. | ||
1420 | */ | ||
1421 | if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog && | ||
1422 | fotg210->async_count + fotg210->intr_count > 0)) | ||
1423 | fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG, | ||
1424 | true); | ||
1425 | } | ||
1426 | |||
1427 | |||
1428 | /* | ||
1429 | * Handler functions for the hrtimer event types. | ||
1430 | * Keep this array in the same order as the event types indexed by | ||
1431 | * enum fotg210_hrtimer_event in fotg210.h. | ||
1432 | */ | ||
1433 | static void (*event_handlers[])(struct fotg210_hcd *) = { | ||
1434 | fotg210_poll_ASS, /* FOTG210_HRTIMER_POLL_ASS */ | ||
1435 | fotg210_poll_PSS, /* FOTG210_HRTIMER_POLL_PSS */ | ||
1436 | fotg210_handle_controller_death, /* FOTG210_HRTIMER_POLL_DEAD */ | ||
1437 | fotg210_handle_intr_unlinks, /* FOTG210_HRTIMER_UNLINK_INTR */ | ||
1438 | end_free_itds, /* FOTG210_HRTIMER_FREE_ITDS */ | ||
1439 | unlink_empty_async, /* FOTG210_HRTIMER_ASYNC_UNLINKS */ | ||
1440 | fotg210_iaa_watchdog, /* FOTG210_HRTIMER_IAA_WATCHDOG */ | ||
1441 | fotg210_disable_PSE, /* FOTG210_HRTIMER_DISABLE_PERIODIC */ | ||
1442 | fotg210_disable_ASE, /* FOTG210_HRTIMER_DISABLE_ASYNC */ | ||
1443 | fotg210_work, /* FOTG210_HRTIMER_IO_WATCHDOG */ | ||
1444 | }; | ||
1445 | |||
1446 | static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t) | ||
1447 | { | ||
1448 | struct fotg210_hcd *fotg210 = | ||
1449 | container_of(t, struct fotg210_hcd, hrtimer); | ||
1450 | ktime_t now; | ||
1451 | unsigned long events; | ||
1452 | unsigned long flags; | ||
1453 | unsigned e; | ||
1454 | |||
1455 | spin_lock_irqsave(&fotg210->lock, flags); | ||
1456 | |||
1457 | events = fotg210->enabled_hrtimer_events; | ||
1458 | fotg210->enabled_hrtimer_events = 0; | ||
1459 | fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT; | ||
1460 | |||
1461 | /* | ||
1462 | * Check each pending event. If its time has expired, handle | ||
1463 | * the event; otherwise re-enable it. | ||
1464 | */ | ||
1465 | now = ktime_get(); | ||
1466 | for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) { | ||
1467 | if (now.tv64 >= fotg210->hr_timeouts[e].tv64) | ||
1468 | event_handlers[e](fotg210); | ||
1469 | else | ||
1470 | fotg210_enable_event(fotg210, e, false); | ||
1471 | } | ||
1472 | |||
1473 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
1474 | return HRTIMER_NORESTART; | ||
1475 | } | ||
1476 | |||
1477 | /*-------------------------------------------------------------------------*/ | ||
1478 | |||
1479 | #define fotg210_bus_suspend NULL | ||
1480 | #define fotg210_bus_resume NULL | ||
1481 | |||
1482 | /*-------------------------------------------------------------------------*/ | ||
1483 | |||
1484 | static int check_reset_complete( | ||
1485 | struct fotg210_hcd *fotg210, | ||
1486 | int index, | ||
1487 | u32 __iomem *status_reg, | ||
1488 | int port_status | ||
1489 | ) { | ||
1490 | if (!(port_status & PORT_CONNECT)) | ||
1491 | return port_status; | ||
1492 | |||
1493 | /* if reset finished and it's still not enabled -- handoff */ | ||
1494 | if (!(port_status & PORT_PE)) { | ||
1495 | /* with integrated TT, there's nobody to hand it to! */ | ||
1496 | fotg210_dbg(fotg210, | ||
1497 | "Failed to enable port %d on root hub TT\n", | ||
1498 | index+1); | ||
1499 | return port_status; | ||
1500 | } else { | ||
1501 | fotg210_dbg(fotg210, "port %d reset complete, port enabled\n", | ||
1502 | index + 1); | ||
1503 | } | ||
1504 | |||
1505 | return port_status; | ||
1506 | } | ||
1507 | |||
1508 | /*-------------------------------------------------------------------------*/ | ||
1509 | |||
1510 | |||
1511 | /* build "status change" packet (one or two bytes) from HC registers */ | ||
1512 | |||
1513 | static int | ||
1514 | fotg210_hub_status_data(struct usb_hcd *hcd, char *buf) | ||
1515 | { | ||
1516 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
1517 | u32 temp, status; | ||
1518 | u32 mask; | ||
1519 | int retval = 1; | ||
1520 | unsigned long flags; | ||
1521 | |||
1522 | /* init status to no-changes */ | ||
1523 | buf[0] = 0; | ||
1524 | |||
1525 | /* Inform the core about resumes-in-progress by returning | ||
1526 | * a non-zero value even if there are no status changes. | ||
1527 | */ | ||
1528 | status = fotg210->resuming_ports; | ||
1529 | |||
1530 | mask = PORT_CSC | PORT_PEC; | ||
1531 | /* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */ | ||
1532 | |||
1533 | /* no hub change reports (bit 0) for now (power, ...) */ | ||
1534 | |||
1535 | /* port N changes (bit N)? */ | ||
1536 | spin_lock_irqsave(&fotg210->lock, flags); | ||
1537 | |||
1538 | temp = fotg210_readl(fotg210, &fotg210->regs->port_status); | ||
1539 | |||
1540 | /* | ||
1541 | * Return status information even for ports with OWNER set. | ||
1542 | * Otherwise khubd wouldn't see the disconnect event when a | ||
1543 | * high-speed device is switched over to the companion | ||
1544 | * controller by the user. | ||
1545 | */ | ||
1546 | |||
1547 | if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) | ||
1548 | || (fotg210->reset_done[0] && time_after_eq( | ||
1549 | jiffies, fotg210->reset_done[0]))) { | ||
1550 | buf[0] |= 1 << 1; | ||
1551 | status = STS_PCD; | ||
1552 | } | ||
1553 | /* FIXME autosuspend idle root hubs */ | ||
1554 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
1555 | return status ? retval : 0; | ||
1556 | } | ||
1557 | |||
1558 | /*-------------------------------------------------------------------------*/ | ||
1559 | |||
1560 | static void | ||
1561 | fotg210_hub_descriptor( | ||
1562 | struct fotg210_hcd *fotg210, | ||
1563 | struct usb_hub_descriptor *desc | ||
1564 | ) { | ||
1565 | int ports = HCS_N_PORTS(fotg210->hcs_params); | ||
1566 | u16 temp; | ||
1567 | |||
1568 | desc->bDescriptorType = 0x29; | ||
1569 | desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */ | ||
1570 | desc->bHubContrCurrent = 0; | ||
1571 | |||
1572 | desc->bNbrPorts = ports; | ||
1573 | temp = 1 + (ports / 8); | ||
1574 | desc->bDescLength = 7 + 2 * temp; | ||
1575 | |||
1576 | /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ | ||
1577 | memset(&desc->u.hs.DeviceRemovable[0], 0, temp); | ||
1578 | memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp); | ||
1579 | |||
1580 | temp = 0x0008; /* per-port overcurrent reporting */ | ||
1581 | temp |= 0x0002; /* no power switching */ | ||
1582 | desc->wHubCharacteristics = cpu_to_le16(temp); | ||
1583 | } | ||
1584 | |||
1585 | /*-------------------------------------------------------------------------*/ | ||
1586 | |||
1587 | static int fotg210_hub_control( | ||
1588 | struct usb_hcd *hcd, | ||
1589 | u16 typeReq, | ||
1590 | u16 wValue, | ||
1591 | u16 wIndex, | ||
1592 | char *buf, | ||
1593 | u16 wLength | ||
1594 | ) { | ||
1595 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
1596 | int ports = HCS_N_PORTS(fotg210->hcs_params); | ||
1597 | u32 __iomem *status_reg = &fotg210->regs->port_status; | ||
1598 | u32 temp, temp1, status; | ||
1599 | unsigned long flags; | ||
1600 | int retval = 0; | ||
1601 | unsigned selector; | ||
1602 | |||
1603 | /* | ||
1604 | * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. | ||
1605 | * HCS_INDICATOR may say we can change LEDs to off/amber/green. | ||
1606 | * (track current state ourselves) ... blink for diagnostics, | ||
1607 | * power, "this is the one", etc. EHCI spec supports this. | ||
1608 | */ | ||
1609 | |||
1610 | spin_lock_irqsave(&fotg210->lock, flags); | ||
1611 | switch (typeReq) { | ||
1612 | case ClearHubFeature: | ||
1613 | switch (wValue) { | ||
1614 | case C_HUB_LOCAL_POWER: | ||
1615 | case C_HUB_OVER_CURRENT: | ||
1616 | /* no hub-wide feature/status flags */ | ||
1617 | break; | ||
1618 | default: | ||
1619 | goto error; | ||
1620 | } | ||
1621 | break; | ||
1622 | case ClearPortFeature: | ||
1623 | if (!wIndex || wIndex > ports) | ||
1624 | goto error; | ||
1625 | wIndex--; | ||
1626 | temp = fotg210_readl(fotg210, status_reg); | ||
1627 | temp &= ~PORT_RWC_BITS; | ||
1628 | |||
1629 | /* | ||
1630 | * Even if OWNER is set, so the port is owned by the | ||
1631 | * companion controller, khubd needs to be able to clear | ||
1632 | * the port-change status bits (especially | ||
1633 | * USB_PORT_STAT_C_CONNECTION). | ||
1634 | */ | ||
1635 | |||
1636 | switch (wValue) { | ||
1637 | case USB_PORT_FEAT_ENABLE: | ||
1638 | fotg210_writel(fotg210, temp & ~PORT_PE, status_reg); | ||
1639 | break; | ||
1640 | case USB_PORT_FEAT_C_ENABLE: | ||
1641 | fotg210_writel(fotg210, temp | PORT_PEC, status_reg); | ||
1642 | break; | ||
1643 | case USB_PORT_FEAT_SUSPEND: | ||
1644 | if (temp & PORT_RESET) | ||
1645 | goto error; | ||
1646 | if (!(temp & PORT_SUSPEND)) | ||
1647 | break; | ||
1648 | if ((temp & PORT_PE) == 0) | ||
1649 | goto error; | ||
1650 | |||
1651 | /* resume signaling for 20 msec */ | ||
1652 | fotg210_writel(fotg210, temp | PORT_RESUME, status_reg); | ||
1653 | fotg210->reset_done[wIndex] = jiffies | ||
1654 | + msecs_to_jiffies(20); | ||
1655 | break; | ||
1656 | case USB_PORT_FEAT_C_SUSPEND: | ||
1657 | clear_bit(wIndex, &fotg210->port_c_suspend); | ||
1658 | break; | ||
1659 | case USB_PORT_FEAT_C_CONNECTION: | ||
1660 | fotg210_writel(fotg210, temp | PORT_CSC, status_reg); | ||
1661 | break; | ||
1662 | case USB_PORT_FEAT_C_OVER_CURRENT: | ||
1663 | fotg210_writel(fotg210, temp | OTGISR_OVC, | ||
1664 | &fotg210->regs->otgisr); | ||
1665 | break; | ||
1666 | case USB_PORT_FEAT_C_RESET: | ||
1667 | /* GetPortStatus clears reset */ | ||
1668 | break; | ||
1669 | default: | ||
1670 | goto error; | ||
1671 | } | ||
1672 | fotg210_readl(fotg210, &fotg210->regs->command); | ||
1673 | break; | ||
1674 | case GetHubDescriptor: | ||
1675 | fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *) | ||
1676 | buf); | ||
1677 | break; | ||
1678 | case GetHubStatus: | ||
1679 | /* no hub-wide feature/status flags */ | ||
1680 | memset(buf, 0, 4); | ||
1681 | /*cpu_to_le32s ((u32 *) buf); */ | ||
1682 | break; | ||
1683 | case GetPortStatus: | ||
1684 | if (!wIndex || wIndex > ports) | ||
1685 | goto error; | ||
1686 | wIndex--; | ||
1687 | status = 0; | ||
1688 | temp = fotg210_readl(fotg210, status_reg); | ||
1689 | |||
1690 | /* wPortChange bits */ | ||
1691 | if (temp & PORT_CSC) | ||
1692 | status |= USB_PORT_STAT_C_CONNECTION << 16; | ||
1693 | if (temp & PORT_PEC) | ||
1694 | status |= USB_PORT_STAT_C_ENABLE << 16; | ||
1695 | |||
1696 | temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr); | ||
1697 | if (temp1 & OTGISR_OVC) | ||
1698 | status |= USB_PORT_STAT_C_OVERCURRENT << 16; | ||
1699 | |||
1700 | /* whoever resumes must GetPortStatus to complete it!! */ | ||
1701 | if (temp & PORT_RESUME) { | ||
1702 | |||
1703 | /* Remote Wakeup received? */ | ||
1704 | if (!fotg210->reset_done[wIndex]) { | ||
1705 | /* resume signaling for 20 msec */ | ||
1706 | fotg210->reset_done[wIndex] = jiffies | ||
1707 | + msecs_to_jiffies(20); | ||
1708 | /* check the port again */ | ||
1709 | mod_timer(&fotg210_to_hcd(fotg210)->rh_timer, | ||
1710 | fotg210->reset_done[wIndex]); | ||
1711 | } | ||
1712 | |||
1713 | /* resume completed? */ | ||
1714 | else if (time_after_eq(jiffies, | ||
1715 | fotg210->reset_done[wIndex])) { | ||
1716 | clear_bit(wIndex, &fotg210->suspended_ports); | ||
1717 | set_bit(wIndex, &fotg210->port_c_suspend); | ||
1718 | fotg210->reset_done[wIndex] = 0; | ||
1719 | |||
1720 | /* stop resume signaling */ | ||
1721 | temp = fotg210_readl(fotg210, status_reg); | ||
1722 | fotg210_writel(fotg210, | ||
1723 | temp & ~(PORT_RWC_BITS | PORT_RESUME), | ||
1724 | status_reg); | ||
1725 | clear_bit(wIndex, &fotg210->resuming_ports); | ||
1726 | retval = handshake(fotg210, status_reg, | ||
1727 | PORT_RESUME, 0, 2000 /* 2msec */); | ||
1728 | if (retval != 0) { | ||
1729 | fotg210_err(fotg210, | ||
1730 | "port %d resume error %d\n", | ||
1731 | wIndex + 1, retval); | ||
1732 | goto error; | ||
1733 | } | ||
1734 | temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10)); | ||
1735 | } | ||
1736 | } | ||
1737 | |||
1738 | /* whoever resets must GetPortStatus to complete it!! */ | ||
1739 | if ((temp & PORT_RESET) | ||
1740 | && time_after_eq(jiffies, | ||
1741 | fotg210->reset_done[wIndex])) { | ||
1742 | status |= USB_PORT_STAT_C_RESET << 16; | ||
1743 | fotg210->reset_done[wIndex] = 0; | ||
1744 | clear_bit(wIndex, &fotg210->resuming_ports); | ||
1745 | |||
1746 | /* force reset to complete */ | ||
1747 | fotg210_writel(fotg210, | ||
1748 | temp & ~(PORT_RWC_BITS | PORT_RESET), | ||
1749 | status_reg); | ||
1750 | /* REVISIT: some hardware needs 550+ usec to clear | ||
1751 | * this bit; seems too long to spin routinely... | ||
1752 | */ | ||
1753 | retval = handshake(fotg210, status_reg, | ||
1754 | PORT_RESET, 0, 1000); | ||
1755 | if (retval != 0) { | ||
1756 | fotg210_err(fotg210, "port %d reset error %d\n", | ||
1757 | wIndex + 1, retval); | ||
1758 | goto error; | ||
1759 | } | ||
1760 | |||
1761 | /* see what we found out */ | ||
1762 | temp = check_reset_complete(fotg210, wIndex, status_reg, | ||
1763 | fotg210_readl(fotg210, status_reg)); | ||
1764 | } | ||
1765 | |||
1766 | if (!(temp & (PORT_RESUME|PORT_RESET))) { | ||
1767 | fotg210->reset_done[wIndex] = 0; | ||
1768 | clear_bit(wIndex, &fotg210->resuming_ports); | ||
1769 | } | ||
1770 | |||
1771 | /* transfer dedicated ports to the companion hc */ | ||
1772 | if ((temp & PORT_CONNECT) && | ||
1773 | test_bit(wIndex, &fotg210->companion_ports)) { | ||
1774 | temp &= ~PORT_RWC_BITS; | ||
1775 | fotg210_writel(fotg210, temp, status_reg); | ||
1776 | fotg210_dbg(fotg210, "port %d --> companion\n", | ||
1777 | wIndex + 1); | ||
1778 | temp = fotg210_readl(fotg210, status_reg); | ||
1779 | } | ||
1780 | |||
1781 | /* | ||
1782 | * Even if OWNER is set, there's no harm letting khubd | ||
1783 | * see the wPortStatus values (they should all be 0 except | ||
1784 | * for PORT_POWER anyway). | ||
1785 | */ | ||
1786 | |||
1787 | if (temp & PORT_CONNECT) { | ||
1788 | status |= USB_PORT_STAT_CONNECTION; | ||
1789 | status |= fotg210_port_speed(fotg210, temp); | ||
1790 | } | ||
1791 | if (temp & PORT_PE) | ||
1792 | status |= USB_PORT_STAT_ENABLE; | ||
1793 | |||
1794 | /* maybe the port was unsuspended without our knowledge */ | ||
1795 | if (temp & (PORT_SUSPEND|PORT_RESUME)) { | ||
1796 | status |= USB_PORT_STAT_SUSPEND; | ||
1797 | } else if (test_bit(wIndex, &fotg210->suspended_ports)) { | ||
1798 | clear_bit(wIndex, &fotg210->suspended_ports); | ||
1799 | clear_bit(wIndex, &fotg210->resuming_ports); | ||
1800 | fotg210->reset_done[wIndex] = 0; | ||
1801 | if (temp & PORT_PE) | ||
1802 | set_bit(wIndex, &fotg210->port_c_suspend); | ||
1803 | } | ||
1804 | |||
1805 | temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr); | ||
1806 | if (temp1 & OTGISR_OVC) | ||
1807 | status |= USB_PORT_STAT_OVERCURRENT; | ||
1808 | if (temp & PORT_RESET) | ||
1809 | status |= USB_PORT_STAT_RESET; | ||
1810 | if (test_bit(wIndex, &fotg210->port_c_suspend)) | ||
1811 | status |= USB_PORT_STAT_C_SUSPEND << 16; | ||
1812 | |||
1813 | #ifndef VERBOSE_DEBUG | ||
1814 | if (status & ~0xffff) /* only if wPortChange is interesting */ | ||
1815 | #endif | ||
1816 | dbg_port(fotg210, "GetStatus", wIndex + 1, temp); | ||
1817 | put_unaligned_le32(status, buf); | ||
1818 | break; | ||
1819 | case SetHubFeature: | ||
1820 | switch (wValue) { | ||
1821 | case C_HUB_LOCAL_POWER: | ||
1822 | case C_HUB_OVER_CURRENT: | ||
1823 | /* no hub-wide feature/status flags */ | ||
1824 | break; | ||
1825 | default: | ||
1826 | goto error; | ||
1827 | } | ||
1828 | break; | ||
1829 | case SetPortFeature: | ||
1830 | selector = wIndex >> 8; | ||
1831 | wIndex &= 0xff; | ||
1832 | |||
1833 | if (!wIndex || wIndex > ports) | ||
1834 | goto error; | ||
1835 | wIndex--; | ||
1836 | temp = fotg210_readl(fotg210, status_reg); | ||
1837 | temp &= ~PORT_RWC_BITS; | ||
1838 | switch (wValue) { | ||
1839 | case USB_PORT_FEAT_SUSPEND: | ||
1840 | if ((temp & PORT_PE) == 0 | ||
1841 | || (temp & PORT_RESET) != 0) | ||
1842 | goto error; | ||
1843 | |||
1844 | /* After above check the port must be connected. | ||
1845 | * Set appropriate bit thus could put phy into low power | ||
1846 | * mode if we have hostpc feature | ||
1847 | */ | ||
1848 | fotg210_writel(fotg210, temp | PORT_SUSPEND, | ||
1849 | status_reg); | ||
1850 | set_bit(wIndex, &fotg210->suspended_ports); | ||
1851 | break; | ||
1852 | case USB_PORT_FEAT_RESET: | ||
1853 | if (temp & PORT_RESUME) | ||
1854 | goto error; | ||
1855 | /* line status bits may report this as low speed, | ||
1856 | * which can be fine if this root hub has a | ||
1857 | * transaction translator built in. | ||
1858 | */ | ||
1859 | fotg210_vdbg(fotg210, "port %d reset\n", wIndex + 1); | ||
1860 | temp |= PORT_RESET; | ||
1861 | temp &= ~PORT_PE; | ||
1862 | |||
1863 | /* | ||
1864 | * caller must wait, then call GetPortStatus | ||
1865 | * usb 2.0 spec says 50 ms resets on root | ||
1866 | */ | ||
1867 | fotg210->reset_done[wIndex] = jiffies | ||
1868 | + msecs_to_jiffies(50); | ||
1869 | fotg210_writel(fotg210, temp, status_reg); | ||
1870 | break; | ||
1871 | |||
1872 | /* For downstream facing ports (these): one hub port is put | ||
1873 | * into test mode according to USB2 11.24.2.13, then the hub | ||
1874 | * must be reset (which for root hub now means rmmod+modprobe, | ||
1875 | * or else system reboot). See EHCI 2.3.9 and 4.14 for info | ||
1876 | * about the EHCI-specific stuff. | ||
1877 | */ | ||
1878 | case USB_PORT_FEAT_TEST: | ||
1879 | if (!selector || selector > 5) | ||
1880 | goto error; | ||
1881 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
1882 | fotg210_quiesce(fotg210); | ||
1883 | spin_lock_irqsave(&fotg210->lock, flags); | ||
1884 | |||
1885 | /* Put all enabled ports into suspend */ | ||
1886 | temp = fotg210_readl(fotg210, status_reg) & | ||
1887 | ~PORT_RWC_BITS; | ||
1888 | if (temp & PORT_PE) | ||
1889 | fotg210_writel(fotg210, temp | PORT_SUSPEND, | ||
1890 | status_reg); | ||
1891 | |||
1892 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
1893 | fotg210_halt(fotg210); | ||
1894 | spin_lock_irqsave(&fotg210->lock, flags); | ||
1895 | |||
1896 | temp = fotg210_readl(fotg210, status_reg); | ||
1897 | temp |= selector << 16; | ||
1898 | fotg210_writel(fotg210, temp, status_reg); | ||
1899 | break; | ||
1900 | |||
1901 | default: | ||
1902 | goto error; | ||
1903 | } | ||
1904 | fotg210_readl(fotg210, &fotg210->regs->command); | ||
1905 | break; | ||
1906 | |||
1907 | default: | ||
1908 | error: | ||
1909 | /* "stall" on error */ | ||
1910 | retval = -EPIPE; | ||
1911 | } | ||
1912 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
1913 | return retval; | ||
1914 | } | ||
1915 | |||
1916 | static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd, | ||
1917 | int portnum) | ||
1918 | { | ||
1919 | return; | ||
1920 | } | ||
1921 | |||
1922 | static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd, | ||
1923 | int portnum) | ||
1924 | { | ||
1925 | return 0; | ||
1926 | } | ||
1927 | /*-------------------------------------------------------------------------*/ | ||
1928 | /* | ||
1929 | * There's basically three types of memory: | ||
1930 | * - data used only by the HCD ... kmalloc is fine | ||
1931 | * - async and periodic schedules, shared by HC and HCD ... these | ||
1932 | * need to use dma_pool or dma_alloc_coherent | ||
1933 | * - driver buffers, read/written by HC ... single shot DMA mapped | ||
1934 | * | ||
1935 | * There's also "register" data (e.g. PCI or SOC), which is memory mapped. | ||
1936 | * No memory seen by this driver is pageable. | ||
1937 | */ | ||
1938 | |||
1939 | /*-------------------------------------------------------------------------*/ | ||
1940 | |||
1941 | /* Allocate the key transfer structures from the previously allocated pool */ | ||
1942 | |||
1943 | static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210, | ||
1944 | struct fotg210_qtd *qtd, dma_addr_t dma) | ||
1945 | { | ||
1946 | memset(qtd, 0, sizeof(*qtd)); | ||
1947 | qtd->qtd_dma = dma; | ||
1948 | qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT); | ||
1949 | qtd->hw_next = FOTG210_LIST_END(fotg210); | ||
1950 | qtd->hw_alt_next = FOTG210_LIST_END(fotg210); | ||
1951 | INIT_LIST_HEAD(&qtd->qtd_list); | ||
1952 | } | ||
1953 | |||
1954 | static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210, | ||
1955 | gfp_t flags) | ||
1956 | { | ||
1957 | struct fotg210_qtd *qtd; | ||
1958 | dma_addr_t dma; | ||
1959 | |||
1960 | qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma); | ||
1961 | if (qtd != NULL) | ||
1962 | fotg210_qtd_init(fotg210, qtd, dma); | ||
1963 | |||
1964 | return qtd; | ||
1965 | } | ||
1966 | |||
1967 | static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210, | ||
1968 | struct fotg210_qtd *qtd) | ||
1969 | { | ||
1970 | dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma); | ||
1971 | } | ||
1972 | |||
1973 | |||
1974 | static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
1975 | { | ||
1976 | /* clean qtds first, and know this is not linked */ | ||
1977 | if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) { | ||
1978 | fotg210_dbg(fotg210, "unused qh not empty!\n"); | ||
1979 | BUG(); | ||
1980 | } | ||
1981 | if (qh->dummy) | ||
1982 | fotg210_qtd_free(fotg210, qh->dummy); | ||
1983 | dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma); | ||
1984 | kfree(qh); | ||
1985 | } | ||
1986 | |||
1987 | static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210, | ||
1988 | gfp_t flags) | ||
1989 | { | ||
1990 | struct fotg210_qh *qh; | ||
1991 | dma_addr_t dma; | ||
1992 | |||
1993 | qh = kzalloc(sizeof(*qh), GFP_ATOMIC); | ||
1994 | if (!qh) | ||
1995 | goto done; | ||
1996 | qh->hw = (struct fotg210_qh_hw *) | ||
1997 | dma_pool_alloc(fotg210->qh_pool, flags, &dma); | ||
1998 | if (!qh->hw) | ||
1999 | goto fail; | ||
2000 | memset(qh->hw, 0, sizeof(*qh->hw)); | ||
2001 | qh->qh_dma = dma; | ||
2002 | INIT_LIST_HEAD(&qh->qtd_list); | ||
2003 | |||
2004 | /* dummy td enables safe urb queuing */ | ||
2005 | qh->dummy = fotg210_qtd_alloc(fotg210, flags); | ||
2006 | if (qh->dummy == NULL) { | ||
2007 | fotg210_dbg(fotg210, "no dummy td\n"); | ||
2008 | goto fail1; | ||
2009 | } | ||
2010 | done: | ||
2011 | return qh; | ||
2012 | fail1: | ||
2013 | dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma); | ||
2014 | fail: | ||
2015 | kfree(qh); | ||
2016 | return NULL; | ||
2017 | } | ||
2018 | |||
2019 | /*-------------------------------------------------------------------------*/ | ||
2020 | |||
2021 | /* The queue heads and transfer descriptors are managed from pools tied | ||
2022 | * to each of the "per device" structures. | ||
2023 | * This is the initialisation and cleanup code. | ||
2024 | */ | ||
2025 | |||
2026 | static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210) | ||
2027 | { | ||
2028 | if (fotg210->async) | ||
2029 | qh_destroy(fotg210, fotg210->async); | ||
2030 | fotg210->async = NULL; | ||
2031 | |||
2032 | if (fotg210->dummy) | ||
2033 | qh_destroy(fotg210, fotg210->dummy); | ||
2034 | fotg210->dummy = NULL; | ||
2035 | |||
2036 | /* DMA consistent memory and pools */ | ||
2037 | if (fotg210->qtd_pool) | ||
2038 | dma_pool_destroy(fotg210->qtd_pool); | ||
2039 | fotg210->qtd_pool = NULL; | ||
2040 | |||
2041 | if (fotg210->qh_pool) { | ||
2042 | dma_pool_destroy(fotg210->qh_pool); | ||
2043 | fotg210->qh_pool = NULL; | ||
2044 | } | ||
2045 | |||
2046 | if (fotg210->itd_pool) | ||
2047 | dma_pool_destroy(fotg210->itd_pool); | ||
2048 | fotg210->itd_pool = NULL; | ||
2049 | |||
2050 | if (fotg210->periodic) | ||
2051 | dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller, | ||
2052 | fotg210->periodic_size * sizeof(u32), | ||
2053 | fotg210->periodic, fotg210->periodic_dma); | ||
2054 | fotg210->periodic = NULL; | ||
2055 | |||
2056 | /* shadow periodic table */ | ||
2057 | kfree(fotg210->pshadow); | ||
2058 | fotg210->pshadow = NULL; | ||
2059 | } | ||
2060 | |||
2061 | /* remember to add cleanup code (above) if you add anything here */ | ||
2062 | static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags) | ||
2063 | { | ||
2064 | int i; | ||
2065 | |||
2066 | /* QTDs for control/bulk/intr transfers */ | ||
2067 | fotg210->qtd_pool = dma_pool_create("fotg210_qtd", | ||
2068 | fotg210_to_hcd(fotg210)->self.controller, | ||
2069 | sizeof(struct fotg210_qtd), | ||
2070 | 32 /* byte alignment (for hw parts) */, | ||
2071 | 4096 /* can't cross 4K */); | ||
2072 | if (!fotg210->qtd_pool) | ||
2073 | goto fail; | ||
2074 | |||
2075 | /* QHs for control/bulk/intr transfers */ | ||
2076 | fotg210->qh_pool = dma_pool_create("fotg210_qh", | ||
2077 | fotg210_to_hcd(fotg210)->self.controller, | ||
2078 | sizeof(struct fotg210_qh_hw), | ||
2079 | 32 /* byte alignment (for hw parts) */, | ||
2080 | 4096 /* can't cross 4K */); | ||
2081 | if (!fotg210->qh_pool) | ||
2082 | goto fail; | ||
2083 | |||
2084 | fotg210->async = fotg210_qh_alloc(fotg210, flags); | ||
2085 | if (!fotg210->async) | ||
2086 | goto fail; | ||
2087 | |||
2088 | /* ITD for high speed ISO transfers */ | ||
2089 | fotg210->itd_pool = dma_pool_create("fotg210_itd", | ||
2090 | fotg210_to_hcd(fotg210)->self.controller, | ||
2091 | sizeof(struct fotg210_itd), | ||
2092 | 64 /* byte alignment (for hw parts) */, | ||
2093 | 4096 /* can't cross 4K */); | ||
2094 | if (!fotg210->itd_pool) | ||
2095 | goto fail; | ||
2096 | |||
2097 | /* Hardware periodic table */ | ||
2098 | fotg210->periodic = (__le32 *) | ||
2099 | dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller, | ||
2100 | fotg210->periodic_size * sizeof(__le32), | ||
2101 | &fotg210->periodic_dma, 0); | ||
2102 | if (fotg210->periodic == NULL) | ||
2103 | goto fail; | ||
2104 | |||
2105 | for (i = 0; i < fotg210->periodic_size; i++) | ||
2106 | fotg210->periodic[i] = FOTG210_LIST_END(fotg210); | ||
2107 | |||
2108 | /* software shadow of hardware table */ | ||
2109 | fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *), | ||
2110 | flags); | ||
2111 | if (fotg210->pshadow != NULL) | ||
2112 | return 0; | ||
2113 | |||
2114 | fail: | ||
2115 | fotg210_dbg(fotg210, "couldn't init memory\n"); | ||
2116 | fotg210_mem_cleanup(fotg210); | ||
2117 | return -ENOMEM; | ||
2118 | } | ||
2119 | /*-------------------------------------------------------------------------*/ | ||
2120 | /* | ||
2121 | * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. | ||
2122 | * | ||
2123 | * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" | ||
2124 | * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned | ||
2125 | * buffers needed for the larger number). We use one QH per endpoint, queue | ||
2126 | * multiple urbs (all three types) per endpoint. URBs may need several qtds. | ||
2127 | * | ||
2128 | * ISO traffic uses "ISO TD" (itd) records, and (along with | ||
2129 | * interrupts) needs careful scheduling. Performance improvements can be | ||
2130 | * an ongoing challenge. That's in "ehci-sched.c". | ||
2131 | * | ||
2132 | * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, | ||
2133 | * or otherwise through transaction translators (TTs) in USB 2.0 hubs using | ||
2134 | * (b) special fields in qh entries or (c) split iso entries. TTs will | ||
2135 | * buffer low/full speed data so the host collects it at high speed. | ||
2136 | */ | ||
2137 | |||
2138 | /*-------------------------------------------------------------------------*/ | ||
2139 | |||
2140 | /* fill a qtd, returning how much of the buffer we were able to queue up */ | ||
2141 | |||
2142 | static int | ||
2143 | qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf, | ||
2144 | size_t len, int token, int maxpacket) | ||
2145 | { | ||
2146 | int i, count; | ||
2147 | u64 addr = buf; | ||
2148 | |||
2149 | /* one buffer entry per 4K ... first might be short or unaligned */ | ||
2150 | qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr); | ||
2151 | qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32)); | ||
2152 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ | ||
2153 | if (likely(len < count)) /* ... iff needed */ | ||
2154 | count = len; | ||
2155 | else { | ||
2156 | buf += 0x1000; | ||
2157 | buf &= ~0x0fff; | ||
2158 | |||
2159 | /* per-qtd limit: from 16K to 20K (best alignment) */ | ||
2160 | for (i = 1; count < len && i < 5; i++) { | ||
2161 | addr = buf; | ||
2162 | qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr); | ||
2163 | qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210, | ||
2164 | (u32)(addr >> 32)); | ||
2165 | buf += 0x1000; | ||
2166 | if ((count + 0x1000) < len) | ||
2167 | count += 0x1000; | ||
2168 | else | ||
2169 | count = len; | ||
2170 | } | ||
2171 | |||
2172 | /* short packets may only terminate transfers */ | ||
2173 | if (count != len) | ||
2174 | count -= (count % maxpacket); | ||
2175 | } | ||
2176 | qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token); | ||
2177 | qtd->length = count; | ||
2178 | |||
2179 | return count; | ||
2180 | } | ||
2181 | |||
2182 | /*-------------------------------------------------------------------------*/ | ||
2183 | |||
2184 | static inline void | ||
2185 | qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, | ||
2186 | struct fotg210_qtd *qtd) | ||
2187 | { | ||
2188 | struct fotg210_qh_hw *hw = qh->hw; | ||
2189 | |||
2190 | /* writes to an active overlay are unsafe */ | ||
2191 | BUG_ON(qh->qh_state != QH_STATE_IDLE); | ||
2192 | |||
2193 | hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma); | ||
2194 | hw->hw_alt_next = FOTG210_LIST_END(fotg210); | ||
2195 | |||
2196 | /* Except for control endpoints, we make hardware maintain data | ||
2197 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | ||
2198 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | ||
2199 | * ever clear it. | ||
2200 | */ | ||
2201 | if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) { | ||
2202 | unsigned is_out, epnum; | ||
2203 | |||
2204 | is_out = qh->is_out; | ||
2205 | epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f; | ||
2206 | if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) { | ||
2207 | hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE); | ||
2208 | usb_settoggle(qh->dev, epnum, is_out, 1); | ||
2209 | } | ||
2210 | } | ||
2211 | |||
2212 | hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING); | ||
2213 | } | ||
2214 | |||
2215 | /* if it weren't for a common silicon quirk (writing the dummy into the qh | ||
2216 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault | ||
2217 | * recovery (including urb dequeue) would need software changes to a QH... | ||
2218 | */ | ||
2219 | static void | ||
2220 | qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
2221 | { | ||
2222 | struct fotg210_qtd *qtd; | ||
2223 | |||
2224 | if (list_empty(&qh->qtd_list)) | ||
2225 | qtd = qh->dummy; | ||
2226 | else { | ||
2227 | qtd = list_entry(qh->qtd_list.next, | ||
2228 | struct fotg210_qtd, qtd_list); | ||
2229 | /* | ||
2230 | * first qtd may already be partially processed. | ||
2231 | * If we come here during unlink, the QH overlay region | ||
2232 | * might have reference to the just unlinked qtd. The | ||
2233 | * qtd is updated in qh_completions(). Update the QH | ||
2234 | * overlay here. | ||
2235 | */ | ||
2236 | if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) { | ||
2237 | qh->hw->hw_qtd_next = qtd->hw_next; | ||
2238 | qtd = NULL; | ||
2239 | } | ||
2240 | } | ||
2241 | |||
2242 | if (qtd) | ||
2243 | qh_update(fotg210, qh, qtd); | ||
2244 | } | ||
2245 | |||
2246 | /*-------------------------------------------------------------------------*/ | ||
2247 | |||
2248 | static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); | ||
2249 | |||
2250 | static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd, | ||
2251 | struct usb_host_endpoint *ep) | ||
2252 | { | ||
2253 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
2254 | struct fotg210_qh *qh = ep->hcpriv; | ||
2255 | unsigned long flags; | ||
2256 | |||
2257 | spin_lock_irqsave(&fotg210->lock, flags); | ||
2258 | qh->clearing_tt = 0; | ||
2259 | if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) | ||
2260 | && fotg210->rh_state == FOTG210_RH_RUNNING) | ||
2261 | qh_link_async(fotg210, qh); | ||
2262 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
2263 | } | ||
2264 | |||
2265 | static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210, | ||
2266 | struct fotg210_qh *qh, | ||
2267 | struct urb *urb, u32 token) | ||
2268 | { | ||
2269 | |||
2270 | /* If an async split transaction gets an error or is unlinked, | ||
2271 | * the TT buffer may be left in an indeterminate state. We | ||
2272 | * have to clear the TT buffer. | ||
2273 | * | ||
2274 | * Note: this routine is never called for Isochronous transfers. | ||
2275 | */ | ||
2276 | if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { | ||
2277 | #ifdef DEBUG | ||
2278 | struct usb_device *tt = urb->dev->tt->hub; | ||
2279 | dev_dbg(&tt->dev, | ||
2280 | "clear tt buffer port %d, a%d ep%d t%08x\n", | ||
2281 | urb->dev->ttport, urb->dev->devnum, | ||
2282 | usb_pipeendpoint(urb->pipe), token); | ||
2283 | #endif /* DEBUG */ | ||
2284 | if (urb->dev->tt->hub != | ||
2285 | fotg210_to_hcd(fotg210)->self.root_hub) { | ||
2286 | if (usb_hub_clear_tt_buffer(urb) == 0) | ||
2287 | qh->clearing_tt = 1; | ||
2288 | } | ||
2289 | } | ||
2290 | } | ||
2291 | |||
2292 | static int qtd_copy_status( | ||
2293 | struct fotg210_hcd *fotg210, | ||
2294 | struct urb *urb, | ||
2295 | size_t length, | ||
2296 | u32 token | ||
2297 | ) | ||
2298 | { | ||
2299 | int status = -EINPROGRESS; | ||
2300 | |||
2301 | /* count IN/OUT bytes, not SETUP (even short packets) */ | ||
2302 | if (likely(QTD_PID(token) != 2)) | ||
2303 | urb->actual_length += length - QTD_LENGTH(token); | ||
2304 | |||
2305 | /* don't modify error codes */ | ||
2306 | if (unlikely(urb->unlinked)) | ||
2307 | return status; | ||
2308 | |||
2309 | /* force cleanup after short read; not always an error */ | ||
2310 | if (unlikely(IS_SHORT_READ(token))) | ||
2311 | status = -EREMOTEIO; | ||
2312 | |||
2313 | /* serious "can't proceed" faults reported by the hardware */ | ||
2314 | if (token & QTD_STS_HALT) { | ||
2315 | if (token & QTD_STS_BABBLE) { | ||
2316 | /* FIXME "must" disable babbling device's port too */ | ||
2317 | status = -EOVERFLOW; | ||
2318 | /* CERR nonzero + halt --> stall */ | ||
2319 | } else if (QTD_CERR(token)) { | ||
2320 | status = -EPIPE; | ||
2321 | |||
2322 | /* In theory, more than one of the following bits can be set | ||
2323 | * since they are sticky and the transaction is retried. | ||
2324 | * Which to test first is rather arbitrary. | ||
2325 | */ | ||
2326 | } else if (token & QTD_STS_MMF) { | ||
2327 | /* fs/ls interrupt xfer missed the complete-split */ | ||
2328 | status = -EPROTO; | ||
2329 | } else if (token & QTD_STS_DBE) { | ||
2330 | status = (QTD_PID(token) == 1) /* IN ? */ | ||
2331 | ? -ENOSR /* hc couldn't read data */ | ||
2332 | : -ECOMM; /* hc couldn't write data */ | ||
2333 | } else if (token & QTD_STS_XACT) { | ||
2334 | /* timeout, bad CRC, wrong PID, etc */ | ||
2335 | fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n", | ||
2336 | urb->dev->devpath, | ||
2337 | usb_pipeendpoint(urb->pipe), | ||
2338 | usb_pipein(urb->pipe) ? "in" : "out"); | ||
2339 | status = -EPROTO; | ||
2340 | } else { /* unknown */ | ||
2341 | status = -EPROTO; | ||
2342 | } | ||
2343 | |||
2344 | fotg210_vdbg(fotg210, | ||
2345 | "dev%d ep%d%s qtd token %08x --> status %d\n", | ||
2346 | usb_pipedevice(urb->pipe), | ||
2347 | usb_pipeendpoint(urb->pipe), | ||
2348 | usb_pipein(urb->pipe) ? "in" : "out", | ||
2349 | token, status); | ||
2350 | } | ||
2351 | |||
2352 | return status; | ||
2353 | } | ||
2354 | |||
2355 | static void | ||
2356 | fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb, int status) | ||
2357 | __releases(fotg210->lock) | ||
2358 | __acquires(fotg210->lock) | ||
2359 | { | ||
2360 | if (likely(urb->hcpriv != NULL)) { | ||
2361 | struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv; | ||
2362 | |||
2363 | /* S-mask in a QH means it's an interrupt urb */ | ||
2364 | if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) { | ||
2365 | |||
2366 | /* ... update hc-wide periodic stats (for usbfs) */ | ||
2367 | fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--; | ||
2368 | } | ||
2369 | } | ||
2370 | |||
2371 | if (unlikely(urb->unlinked)) { | ||
2372 | COUNT(fotg210->stats.unlink); | ||
2373 | } else { | ||
2374 | /* report non-error and short read status as zero */ | ||
2375 | if (status == -EINPROGRESS || status == -EREMOTEIO) | ||
2376 | status = 0; | ||
2377 | COUNT(fotg210->stats.complete); | ||
2378 | } | ||
2379 | |||
2380 | #ifdef FOTG210_URB_TRACE | ||
2381 | fotg210_dbg(fotg210, | ||
2382 | "%s %s urb %p ep%d%s status %d len %d/%d\n", | ||
2383 | __func__, urb->dev->devpath, urb, | ||
2384 | usb_pipeendpoint(urb->pipe), | ||
2385 | usb_pipein(urb->pipe) ? "in" : "out", | ||
2386 | status, | ||
2387 | urb->actual_length, urb->transfer_buffer_length); | ||
2388 | #endif | ||
2389 | |||
2390 | /* complete() can reenter this HCD */ | ||
2391 | usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); | ||
2392 | spin_unlock(&fotg210->lock); | ||
2393 | usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status); | ||
2394 | spin_lock(&fotg210->lock); | ||
2395 | } | ||
2396 | |||
2397 | static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); | ||
2398 | |||
2399 | /* | ||
2400 | * Process and free completed qtds for a qh, returning URBs to drivers. | ||
2401 | * Chases up to qh->hw_current. Returns number of completions called, | ||
2402 | * indicating how much "real" work we did. | ||
2403 | */ | ||
2404 | static unsigned | ||
2405 | qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
2406 | { | ||
2407 | struct fotg210_qtd *last, *end = qh->dummy; | ||
2408 | struct list_head *entry, *tmp; | ||
2409 | int last_status; | ||
2410 | int stopped; | ||
2411 | unsigned count = 0; | ||
2412 | u8 state; | ||
2413 | struct fotg210_qh_hw *hw = qh->hw; | ||
2414 | |||
2415 | if (unlikely(list_empty(&qh->qtd_list))) | ||
2416 | return count; | ||
2417 | |||
2418 | /* completions (or tasks on other cpus) must never clobber HALT | ||
2419 | * till we've gone through and cleaned everything up, even when | ||
2420 | * they add urbs to this qh's queue or mark them for unlinking. | ||
2421 | * | ||
2422 | * NOTE: unlinking expects to be done in queue order. | ||
2423 | * | ||
2424 | * It's a bug for qh->qh_state to be anything other than | ||
2425 | * QH_STATE_IDLE, unless our caller is scan_async() or | ||
2426 | * scan_intr(). | ||
2427 | */ | ||
2428 | state = qh->qh_state; | ||
2429 | qh->qh_state = QH_STATE_COMPLETING; | ||
2430 | stopped = (state == QH_STATE_IDLE); | ||
2431 | |||
2432 | rescan: | ||
2433 | last = NULL; | ||
2434 | last_status = -EINPROGRESS; | ||
2435 | qh->needs_rescan = 0; | ||
2436 | |||
2437 | /* remove de-activated QTDs from front of queue. | ||
2438 | * after faults (including short reads), cleanup this urb | ||
2439 | * then let the queue advance. | ||
2440 | * if queue is stopped, handles unlinks. | ||
2441 | */ | ||
2442 | list_for_each_safe(entry, tmp, &qh->qtd_list) { | ||
2443 | struct fotg210_qtd *qtd; | ||
2444 | struct urb *urb; | ||
2445 | u32 token = 0; | ||
2446 | |||
2447 | qtd = list_entry(entry, struct fotg210_qtd, qtd_list); | ||
2448 | urb = qtd->urb; | ||
2449 | |||
2450 | /* clean up any state from previous QTD ...*/ | ||
2451 | if (last) { | ||
2452 | if (likely(last->urb != urb)) { | ||
2453 | fotg210_urb_done(fotg210, last->urb, | ||
2454 | last_status); | ||
2455 | count++; | ||
2456 | last_status = -EINPROGRESS; | ||
2457 | } | ||
2458 | fotg210_qtd_free(fotg210, last); | ||
2459 | last = NULL; | ||
2460 | } | ||
2461 | |||
2462 | /* ignore urbs submitted during completions we reported */ | ||
2463 | if (qtd == end) | ||
2464 | break; | ||
2465 | |||
2466 | /* hardware copies qtd out of qh overlay */ | ||
2467 | rmb(); | ||
2468 | token = hc32_to_cpu(fotg210, qtd->hw_token); | ||
2469 | |||
2470 | /* always clean up qtds the hc de-activated */ | ||
2471 | retry_xacterr: | ||
2472 | if ((token & QTD_STS_ACTIVE) == 0) { | ||
2473 | |||
2474 | /* Report Data Buffer Error: non-fatal but useful */ | ||
2475 | if (token & QTD_STS_DBE) | ||
2476 | fotg210_dbg(fotg210, | ||
2477 | "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", | ||
2478 | urb, | ||
2479 | usb_endpoint_num(&urb->ep->desc), | ||
2480 | usb_endpoint_dir_in(&urb->ep->desc) | ||
2481 | ? "in" : "out", | ||
2482 | urb->transfer_buffer_length, | ||
2483 | qtd, | ||
2484 | qh); | ||
2485 | |||
2486 | /* on STALL, error, and short reads this urb must | ||
2487 | * complete and all its qtds must be recycled. | ||
2488 | */ | ||
2489 | if ((token & QTD_STS_HALT) != 0) { | ||
2490 | |||
2491 | /* retry transaction errors until we | ||
2492 | * reach the software xacterr limit | ||
2493 | */ | ||
2494 | if ((token & QTD_STS_XACT) && | ||
2495 | QTD_CERR(token) == 0 && | ||
2496 | ++qh->xacterrs < QH_XACTERR_MAX && | ||
2497 | !urb->unlinked) { | ||
2498 | fotg210_dbg(fotg210, | ||
2499 | "detected XactErr len %zu/%zu retry %d\n", | ||
2500 | qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); | ||
2501 | |||
2502 | /* reset the token in the qtd and the | ||
2503 | * qh overlay (which still contains | ||
2504 | * the qtd) so that we pick up from | ||
2505 | * where we left off | ||
2506 | */ | ||
2507 | token &= ~QTD_STS_HALT; | ||
2508 | token |= QTD_STS_ACTIVE | | ||
2509 | (FOTG210_TUNE_CERR << 10); | ||
2510 | qtd->hw_token = cpu_to_hc32(fotg210, | ||
2511 | token); | ||
2512 | wmb(); | ||
2513 | hw->hw_token = cpu_to_hc32(fotg210, | ||
2514 | token); | ||
2515 | goto retry_xacterr; | ||
2516 | } | ||
2517 | stopped = 1; | ||
2518 | |||
2519 | /* magic dummy for some short reads; qh won't advance. | ||
2520 | * that silicon quirk can kick in with this dummy too. | ||
2521 | * | ||
2522 | * other short reads won't stop the queue, including | ||
2523 | * control transfers (status stage handles that) or | ||
2524 | * most other single-qtd reads ... the queue stops if | ||
2525 | * URB_SHORT_NOT_OK was set so the driver submitting | ||
2526 | * the urbs could clean it up. | ||
2527 | */ | ||
2528 | } else if (IS_SHORT_READ(token) | ||
2529 | && !(qtd->hw_alt_next | ||
2530 | & FOTG210_LIST_END(fotg210))) { | ||
2531 | stopped = 1; | ||
2532 | } | ||
2533 | |||
2534 | /* stop scanning when we reach qtds the hc is using */ | ||
2535 | } else if (likely(!stopped | ||
2536 | && fotg210->rh_state >= FOTG210_RH_RUNNING)) { | ||
2537 | break; | ||
2538 | |||
2539 | /* scan the whole queue for unlinks whenever it stops */ | ||
2540 | } else { | ||
2541 | stopped = 1; | ||
2542 | |||
2543 | /* cancel everything if we halt, suspend, etc */ | ||
2544 | if (fotg210->rh_state < FOTG210_RH_RUNNING) | ||
2545 | last_status = -ESHUTDOWN; | ||
2546 | |||
2547 | /* this qtd is active; skip it unless a previous qtd | ||
2548 | * for its urb faulted, or its urb was canceled. | ||
2549 | */ | ||
2550 | else if (last_status == -EINPROGRESS && !urb->unlinked) | ||
2551 | continue; | ||
2552 | |||
2553 | /* qh unlinked; token in overlay may be most current */ | ||
2554 | if (state == QH_STATE_IDLE | ||
2555 | && cpu_to_hc32(fotg210, qtd->qtd_dma) | ||
2556 | == hw->hw_current) { | ||
2557 | token = hc32_to_cpu(fotg210, hw->hw_token); | ||
2558 | |||
2559 | /* An unlink may leave an incomplete | ||
2560 | * async transaction in the TT buffer. | ||
2561 | * We have to clear it. | ||
2562 | */ | ||
2563 | fotg210_clear_tt_buffer(fotg210, qh, urb, | ||
2564 | token); | ||
2565 | } | ||
2566 | } | ||
2567 | |||
2568 | /* unless we already know the urb's status, collect qtd status | ||
2569 | * and update count of bytes transferred. in common short read | ||
2570 | * cases with only one data qtd (including control transfers), | ||
2571 | * queue processing won't halt. but with two or more qtds (for | ||
2572 | * example, with a 32 KB transfer), when the first qtd gets a | ||
2573 | * short read the second must be removed by hand. | ||
2574 | */ | ||
2575 | if (last_status == -EINPROGRESS) { | ||
2576 | last_status = qtd_copy_status(fotg210, urb, | ||
2577 | qtd->length, token); | ||
2578 | if (last_status == -EREMOTEIO | ||
2579 | && (qtd->hw_alt_next | ||
2580 | & FOTG210_LIST_END(fotg210))) | ||
2581 | last_status = -EINPROGRESS; | ||
2582 | |||
2583 | /* As part of low/full-speed endpoint-halt processing | ||
2584 | * we must clear the TT buffer (11.17.5). | ||
2585 | */ | ||
2586 | if (unlikely(last_status != -EINPROGRESS && | ||
2587 | last_status != -EREMOTEIO)) { | ||
2588 | /* The TT's in some hubs malfunction when they | ||
2589 | * receive this request following a STALL (they | ||
2590 | * stop sending isochronous packets). Since a | ||
2591 | * STALL can't leave the TT buffer in a busy | ||
2592 | * state (if you believe Figures 11-48 - 11-51 | ||
2593 | * in the USB 2.0 spec), we won't clear the TT | ||
2594 | * buffer in this case. Strictly speaking this | ||
2595 | * is a violation of the spec. | ||
2596 | */ | ||
2597 | if (last_status != -EPIPE) | ||
2598 | fotg210_clear_tt_buffer(fotg210, qh, | ||
2599 | urb, token); | ||
2600 | } | ||
2601 | } | ||
2602 | |||
2603 | /* if we're removing something not at the queue head, | ||
2604 | * patch the hardware queue pointer. | ||
2605 | */ | ||
2606 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { | ||
2607 | last = list_entry(qtd->qtd_list.prev, | ||
2608 | struct fotg210_qtd, qtd_list); | ||
2609 | last->hw_next = qtd->hw_next; | ||
2610 | } | ||
2611 | |||
2612 | /* remove qtd; it's recycled after possible urb completion */ | ||
2613 | list_del(&qtd->qtd_list); | ||
2614 | last = qtd; | ||
2615 | |||
2616 | /* reinit the xacterr counter for the next qtd */ | ||
2617 | qh->xacterrs = 0; | ||
2618 | } | ||
2619 | |||
2620 | /* last urb's completion might still need calling */ | ||
2621 | if (likely(last != NULL)) { | ||
2622 | fotg210_urb_done(fotg210, last->urb, last_status); | ||
2623 | count++; | ||
2624 | fotg210_qtd_free(fotg210, last); | ||
2625 | } | ||
2626 | |||
2627 | /* Do we need to rescan for URBs dequeued during a giveback? */ | ||
2628 | if (unlikely(qh->needs_rescan)) { | ||
2629 | /* If the QH is already unlinked, do the rescan now. */ | ||
2630 | if (state == QH_STATE_IDLE) | ||
2631 | goto rescan; | ||
2632 | |||
2633 | /* Otherwise we have to wait until the QH is fully unlinked. | ||
2634 | * Our caller will start an unlink if qh->needs_rescan is | ||
2635 | * set. But if an unlink has already started, nothing needs | ||
2636 | * to be done. | ||
2637 | */ | ||
2638 | if (state != QH_STATE_LINKED) | ||
2639 | qh->needs_rescan = 0; | ||
2640 | } | ||
2641 | |||
2642 | /* restore original state; caller must unlink or relink */ | ||
2643 | qh->qh_state = state; | ||
2644 | |||
2645 | /* be sure the hardware's done with the qh before refreshing | ||
2646 | * it after fault cleanup, or recovering from silicon wrongly | ||
2647 | * overlaying the dummy qtd (which reduces DMA chatter). | ||
2648 | */ | ||
2649 | if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) { | ||
2650 | switch (state) { | ||
2651 | case QH_STATE_IDLE: | ||
2652 | qh_refresh(fotg210, qh); | ||
2653 | break; | ||
2654 | case QH_STATE_LINKED: | ||
2655 | /* We won't refresh a QH that's linked (after the HC | ||
2656 | * stopped the queue). That avoids a race: | ||
2657 | * - HC reads first part of QH; | ||
2658 | * - CPU updates that first part and the token; | ||
2659 | * - HC reads rest of that QH, including token | ||
2660 | * Result: HC gets an inconsistent image, and then | ||
2661 | * DMAs to/from the wrong memory (corrupting it). | ||
2662 | * | ||
2663 | * That should be rare for interrupt transfers, | ||
2664 | * except maybe high bandwidth ... | ||
2665 | */ | ||
2666 | |||
2667 | /* Tell the caller to start an unlink */ | ||
2668 | qh->needs_rescan = 1; | ||
2669 | break; | ||
2670 | /* otherwise, unlink already started */ | ||
2671 | } | ||
2672 | } | ||
2673 | |||
2674 | return count; | ||
2675 | } | ||
2676 | |||
2677 | /*-------------------------------------------------------------------------*/ | ||
2678 | |||
2679 | /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ | ||
2680 | #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) | ||
2681 | /* ... and packet size, for any kind of endpoint descriptor */ | ||
2682 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) | ||
2683 | |||
2684 | /* | ||
2685 | * reverse of qh_urb_transaction: free a list of TDs. | ||
2686 | * used for cleanup after errors, before HC sees an URB's TDs. | ||
2687 | */ | ||
2688 | static void qtd_list_free( | ||
2689 | struct fotg210_hcd *fotg210, | ||
2690 | struct urb *urb, | ||
2691 | struct list_head *qtd_list | ||
2692 | ) { | ||
2693 | struct list_head *entry, *temp; | ||
2694 | |||
2695 | list_for_each_safe(entry, temp, qtd_list) { | ||
2696 | struct fotg210_qtd *qtd; | ||
2697 | |||
2698 | qtd = list_entry(entry, struct fotg210_qtd, qtd_list); | ||
2699 | list_del(&qtd->qtd_list); | ||
2700 | fotg210_qtd_free(fotg210, qtd); | ||
2701 | } | ||
2702 | } | ||
2703 | |||
2704 | /* | ||
2705 | * create a list of filled qtds for this URB; won't link into qh. | ||
2706 | */ | ||
2707 | static struct list_head * | ||
2708 | qh_urb_transaction( | ||
2709 | struct fotg210_hcd *fotg210, | ||
2710 | struct urb *urb, | ||
2711 | struct list_head *head, | ||
2712 | gfp_t flags | ||
2713 | ) { | ||
2714 | struct fotg210_qtd *qtd, *qtd_prev; | ||
2715 | dma_addr_t buf; | ||
2716 | int len, this_sg_len, maxpacket; | ||
2717 | int is_input; | ||
2718 | u32 token; | ||
2719 | int i; | ||
2720 | struct scatterlist *sg; | ||
2721 | |||
2722 | /* | ||
2723 | * URBs map to sequences of QTDs: one logical transaction | ||
2724 | */ | ||
2725 | qtd = fotg210_qtd_alloc(fotg210, flags); | ||
2726 | if (unlikely(!qtd)) | ||
2727 | return NULL; | ||
2728 | list_add_tail(&qtd->qtd_list, head); | ||
2729 | qtd->urb = urb; | ||
2730 | |||
2731 | token = QTD_STS_ACTIVE; | ||
2732 | token |= (FOTG210_TUNE_CERR << 10); | ||
2733 | /* for split transactions, SplitXState initialized to zero */ | ||
2734 | |||
2735 | len = urb->transfer_buffer_length; | ||
2736 | is_input = usb_pipein(urb->pipe); | ||
2737 | if (usb_pipecontrol(urb->pipe)) { | ||
2738 | /* SETUP pid */ | ||
2739 | qtd_fill(fotg210, qtd, urb->setup_dma, | ||
2740 | sizeof(struct usb_ctrlrequest), | ||
2741 | token | (2 /* "setup" */ << 8), 8); | ||
2742 | |||
2743 | /* ... and always at least one more pid */ | ||
2744 | token ^= QTD_TOGGLE; | ||
2745 | qtd_prev = qtd; | ||
2746 | qtd = fotg210_qtd_alloc(fotg210, flags); | ||
2747 | if (unlikely(!qtd)) | ||
2748 | goto cleanup; | ||
2749 | qtd->urb = urb; | ||
2750 | qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma); | ||
2751 | list_add_tail(&qtd->qtd_list, head); | ||
2752 | |||
2753 | /* for zero length DATA stages, STATUS is always IN */ | ||
2754 | if (len == 0) | ||
2755 | token |= (1 /* "in" */ << 8); | ||
2756 | } | ||
2757 | |||
2758 | /* | ||
2759 | * data transfer stage: buffer setup | ||
2760 | */ | ||
2761 | i = urb->num_mapped_sgs; | ||
2762 | if (len > 0 && i > 0) { | ||
2763 | sg = urb->sg; | ||
2764 | buf = sg_dma_address(sg); | ||
2765 | |||
2766 | /* urb->transfer_buffer_length may be smaller than the | ||
2767 | * size of the scatterlist (or vice versa) | ||
2768 | */ | ||
2769 | this_sg_len = min_t(int, sg_dma_len(sg), len); | ||
2770 | } else { | ||
2771 | sg = NULL; | ||
2772 | buf = urb->transfer_dma; | ||
2773 | this_sg_len = len; | ||
2774 | } | ||
2775 | |||
2776 | if (is_input) | ||
2777 | token |= (1 /* "in" */ << 8); | ||
2778 | /* else it's already initted to "out" pid (0 << 8) */ | ||
2779 | |||
2780 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); | ||
2781 | |||
2782 | /* | ||
2783 | * buffer gets wrapped in one or more qtds; | ||
2784 | * last one may be "short" (including zero len) | ||
2785 | * and may serve as a control status ack | ||
2786 | */ | ||
2787 | for (;;) { | ||
2788 | int this_qtd_len; | ||
2789 | |||
2790 | this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token, | ||
2791 | maxpacket); | ||
2792 | this_sg_len -= this_qtd_len; | ||
2793 | len -= this_qtd_len; | ||
2794 | buf += this_qtd_len; | ||
2795 | |||
2796 | /* | ||
2797 | * short reads advance to a "magic" dummy instead of the next | ||
2798 | * qtd ... that forces the queue to stop, for manual cleanup. | ||
2799 | * (this will usually be overridden later.) | ||
2800 | */ | ||
2801 | if (is_input) | ||
2802 | qtd->hw_alt_next = fotg210->async->hw->hw_alt_next; | ||
2803 | |||
2804 | /* qh makes control packets use qtd toggle; maybe switch it */ | ||
2805 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | ||
2806 | token ^= QTD_TOGGLE; | ||
2807 | |||
2808 | if (likely(this_sg_len <= 0)) { | ||
2809 | if (--i <= 0 || len <= 0) | ||
2810 | break; | ||
2811 | sg = sg_next(sg); | ||
2812 | buf = sg_dma_address(sg); | ||
2813 | this_sg_len = min_t(int, sg_dma_len(sg), len); | ||
2814 | } | ||
2815 | |||
2816 | qtd_prev = qtd; | ||
2817 | qtd = fotg210_qtd_alloc(fotg210, flags); | ||
2818 | if (unlikely(!qtd)) | ||
2819 | goto cleanup; | ||
2820 | qtd->urb = urb; | ||
2821 | qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma); | ||
2822 | list_add_tail(&qtd->qtd_list, head); | ||
2823 | } | ||
2824 | |||
2825 | /* | ||
2826 | * unless the caller requires manual cleanup after short reads, | ||
2827 | * have the alt_next mechanism keep the queue running after the | ||
2828 | * last data qtd (the only one, for control and most other cases). | ||
2829 | */ | ||
2830 | if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 | ||
2831 | || usb_pipecontrol(urb->pipe))) | ||
2832 | qtd->hw_alt_next = FOTG210_LIST_END(fotg210); | ||
2833 | |||
2834 | /* | ||
2835 | * control requests may need a terminating data "status" ack; | ||
2836 | * other OUT ones may need a terminating short packet | ||
2837 | * (zero length). | ||
2838 | */ | ||
2839 | if (likely(urb->transfer_buffer_length != 0)) { | ||
2840 | int one_more = 0; | ||
2841 | |||
2842 | if (usb_pipecontrol(urb->pipe)) { | ||
2843 | one_more = 1; | ||
2844 | token ^= 0x0100; /* "in" <--> "out" */ | ||
2845 | token |= QTD_TOGGLE; /* force DATA1 */ | ||
2846 | } else if (usb_pipeout(urb->pipe) | ||
2847 | && (urb->transfer_flags & URB_ZERO_PACKET) | ||
2848 | && !(urb->transfer_buffer_length % maxpacket)) { | ||
2849 | one_more = 1; | ||
2850 | } | ||
2851 | if (one_more) { | ||
2852 | qtd_prev = qtd; | ||
2853 | qtd = fotg210_qtd_alloc(fotg210, flags); | ||
2854 | if (unlikely(!qtd)) | ||
2855 | goto cleanup; | ||
2856 | qtd->urb = urb; | ||
2857 | qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma); | ||
2858 | list_add_tail(&qtd->qtd_list, head); | ||
2859 | |||
2860 | /* never any data in such packets */ | ||
2861 | qtd_fill(fotg210, qtd, 0, 0, token, 0); | ||
2862 | } | ||
2863 | } | ||
2864 | |||
2865 | /* by default, enable interrupt on urb completion */ | ||
2866 | if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) | ||
2867 | qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC); | ||
2868 | return head; | ||
2869 | |||
2870 | cleanup: | ||
2871 | qtd_list_free(fotg210, urb, head); | ||
2872 | return NULL; | ||
2873 | } | ||
2874 | |||
2875 | /*-------------------------------------------------------------------------*/ | ||
2876 | /* | ||
2877 | * Would be best to create all qh's from config descriptors, | ||
2878 | * when each interface/altsetting is established. Unlink | ||
2879 | * any previous qh and cancel its urbs first; endpoints are | ||
2880 | * implicitly reset then (data toggle too). | ||
2881 | * That'd mean updating how usbcore talks to HCDs. (2.7?) | ||
2882 | */ | ||
2883 | |||
2884 | |||
2885 | /* | ||
2886 | * Each QH holds a qtd list; a QH is used for everything except iso. | ||
2887 | * | ||
2888 | * For interrupt urbs, the scheduler must set the microframe scheduling | ||
2889 | * mask(s) each time the QH gets scheduled. For highspeed, that's | ||
2890 | * just one microframe in the s-mask. For split interrupt transactions | ||
2891 | * there are additional complications: c-mask, maybe FSTNs. | ||
2892 | */ | ||
2893 | static struct fotg210_qh * | ||
2894 | qh_make( | ||
2895 | struct fotg210_hcd *fotg210, | ||
2896 | struct urb *urb, | ||
2897 | gfp_t flags | ||
2898 | ) { | ||
2899 | struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags); | ||
2900 | u32 info1 = 0, info2 = 0; | ||
2901 | int is_input, type; | ||
2902 | int maxp = 0; | ||
2903 | struct usb_tt *tt = urb->dev->tt; | ||
2904 | struct fotg210_qh_hw *hw; | ||
2905 | |||
2906 | if (!qh) | ||
2907 | return qh; | ||
2908 | |||
2909 | /* | ||
2910 | * init endpoint/device data for this QH | ||
2911 | */ | ||
2912 | info1 |= usb_pipeendpoint(urb->pipe) << 8; | ||
2913 | info1 |= usb_pipedevice(urb->pipe) << 0; | ||
2914 | |||
2915 | is_input = usb_pipein(urb->pipe); | ||
2916 | type = usb_pipetype(urb->pipe); | ||
2917 | maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input); | ||
2918 | |||
2919 | /* 1024 byte maxpacket is a hardware ceiling. High bandwidth | ||
2920 | * acts like up to 3KB, but is built from smaller packets. | ||
2921 | */ | ||
2922 | if (max_packet(maxp) > 1024) { | ||
2923 | fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", | ||
2924 | max_packet(maxp)); | ||
2925 | goto done; | ||
2926 | } | ||
2927 | |||
2928 | /* Compute interrupt scheduling parameters just once, and save. | ||
2929 | * - allowing for high bandwidth, how many nsec/uframe are used? | ||
2930 | * - split transactions need a second CSPLIT uframe; same question | ||
2931 | * - splits also need a schedule gap (for full/low speed I/O) | ||
2932 | * - qh has a polling interval | ||
2933 | * | ||
2934 | * For control/bulk requests, the HC or TT handles these. | ||
2935 | */ | ||
2936 | if (type == PIPE_INTERRUPT) { | ||
2937 | qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, | ||
2938 | is_input, 0, | ||
2939 | hb_mult(maxp) * max_packet(maxp))); | ||
2940 | qh->start = NO_FRAME; | ||
2941 | |||
2942 | if (urb->dev->speed == USB_SPEED_HIGH) { | ||
2943 | qh->c_usecs = 0; | ||
2944 | qh->gap_uf = 0; | ||
2945 | |||
2946 | qh->period = urb->interval >> 3; | ||
2947 | if (qh->period == 0 && urb->interval != 1) { | ||
2948 | /* NOTE interval 2 or 4 uframes could work. | ||
2949 | * But interval 1 scheduling is simpler, and | ||
2950 | * includes high bandwidth. | ||
2951 | */ | ||
2952 | urb->interval = 1; | ||
2953 | } else if (qh->period > fotg210->periodic_size) { | ||
2954 | qh->period = fotg210->periodic_size; | ||
2955 | urb->interval = qh->period << 3; | ||
2956 | } | ||
2957 | } else { | ||
2958 | int think_time; | ||
2959 | |||
2960 | /* gap is f(FS/LS transfer times) */ | ||
2961 | qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed, | ||
2962 | is_input, 0, maxp) / (125 * 1000); | ||
2963 | |||
2964 | /* FIXME this just approximates SPLIT/CSPLIT times */ | ||
2965 | if (is_input) { /* SPLIT, gap, CSPLIT+DATA */ | ||
2966 | qh->c_usecs = qh->usecs + HS_USECS(0); | ||
2967 | qh->usecs = HS_USECS(1); | ||
2968 | } else { /* SPLIT+DATA, gap, CSPLIT */ | ||
2969 | qh->usecs += HS_USECS(1); | ||
2970 | qh->c_usecs = HS_USECS(0); | ||
2971 | } | ||
2972 | |||
2973 | think_time = tt ? tt->think_time : 0; | ||
2974 | qh->tt_usecs = NS_TO_US(think_time + | ||
2975 | usb_calc_bus_time(urb->dev->speed, | ||
2976 | is_input, 0, max_packet(maxp))); | ||
2977 | qh->period = urb->interval; | ||
2978 | if (qh->period > fotg210->periodic_size) { | ||
2979 | qh->period = fotg210->periodic_size; | ||
2980 | urb->interval = qh->period; | ||
2981 | } | ||
2982 | } | ||
2983 | } | ||
2984 | |||
2985 | /* support for tt scheduling, and access to toggles */ | ||
2986 | qh->dev = urb->dev; | ||
2987 | |||
2988 | /* using TT? */ | ||
2989 | switch (urb->dev->speed) { | ||
2990 | case USB_SPEED_LOW: | ||
2991 | info1 |= QH_LOW_SPEED; | ||
2992 | /* FALL THROUGH */ | ||
2993 | |||
2994 | case USB_SPEED_FULL: | ||
2995 | /* EPS 0 means "full" */ | ||
2996 | if (type != PIPE_INTERRUPT) | ||
2997 | info1 |= (FOTG210_TUNE_RL_TT << 28); | ||
2998 | if (type == PIPE_CONTROL) { | ||
2999 | info1 |= QH_CONTROL_EP; /* for TT */ | ||
3000 | info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ | ||
3001 | } | ||
3002 | info1 |= maxp << 16; | ||
3003 | |||
3004 | info2 |= (FOTG210_TUNE_MULT_TT << 30); | ||
3005 | |||
3006 | /* Some Freescale processors have an erratum in which the | ||
3007 | * port number in the queue head was 0..N-1 instead of 1..N. | ||
3008 | */ | ||
3009 | if (fotg210_has_fsl_portno_bug(fotg210)) | ||
3010 | info2 |= (urb->dev->ttport-1) << 23; | ||
3011 | else | ||
3012 | info2 |= urb->dev->ttport << 23; | ||
3013 | |||
3014 | /* set the address of the TT; for TDI's integrated | ||
3015 | * root hub tt, leave it zeroed. | ||
3016 | */ | ||
3017 | if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub) | ||
3018 | info2 |= tt->hub->devnum << 16; | ||
3019 | |||
3020 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ | ||
3021 | |||
3022 | break; | ||
3023 | |||
3024 | case USB_SPEED_HIGH: /* no TT involved */ | ||
3025 | info1 |= QH_HIGH_SPEED; | ||
3026 | if (type == PIPE_CONTROL) { | ||
3027 | info1 |= (FOTG210_TUNE_RL_HS << 28); | ||
3028 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ | ||
3029 | info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ | ||
3030 | info2 |= (FOTG210_TUNE_MULT_HS << 30); | ||
3031 | } else if (type == PIPE_BULK) { | ||
3032 | info1 |= (FOTG210_TUNE_RL_HS << 28); | ||
3033 | /* The USB spec says that high speed bulk endpoints | ||
3034 | * always use 512 byte maxpacket. But some device | ||
3035 | * vendors decided to ignore that, and MSFT is happy | ||
3036 | * to help them do so. So now people expect to use | ||
3037 | * such nonconformant devices with Linux too; sigh. | ||
3038 | */ | ||
3039 | info1 |= max_packet(maxp) << 16; | ||
3040 | info2 |= (FOTG210_TUNE_MULT_HS << 30); | ||
3041 | } else { /* PIPE_INTERRUPT */ | ||
3042 | info1 |= max_packet(maxp) << 16; | ||
3043 | info2 |= hb_mult(maxp) << 30; | ||
3044 | } | ||
3045 | break; | ||
3046 | default: | ||
3047 | fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev, | ||
3048 | urb->dev->speed); | ||
3049 | done: | ||
3050 | qh_destroy(fotg210, qh); | ||
3051 | return NULL; | ||
3052 | } | ||
3053 | |||
3054 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ | ||
3055 | |||
3056 | /* init as live, toggle clear, advance to dummy */ | ||
3057 | qh->qh_state = QH_STATE_IDLE; | ||
3058 | hw = qh->hw; | ||
3059 | hw->hw_info1 = cpu_to_hc32(fotg210, info1); | ||
3060 | hw->hw_info2 = cpu_to_hc32(fotg210, info2); | ||
3061 | qh->is_out = !is_input; | ||
3062 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1); | ||
3063 | qh_refresh(fotg210, qh); | ||
3064 | return qh; | ||
3065 | } | ||
3066 | |||
3067 | /*-------------------------------------------------------------------------*/ | ||
3068 | |||
3069 | static void enable_async(struct fotg210_hcd *fotg210) | ||
3070 | { | ||
3071 | if (fotg210->async_count++) | ||
3072 | return; | ||
3073 | |||
3074 | /* Stop waiting to turn off the async schedule */ | ||
3075 | fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC); | ||
3076 | |||
3077 | /* Don't start the schedule until ASS is 0 */ | ||
3078 | fotg210_poll_ASS(fotg210); | ||
3079 | turn_on_io_watchdog(fotg210); | ||
3080 | } | ||
3081 | |||
3082 | static void disable_async(struct fotg210_hcd *fotg210) | ||
3083 | { | ||
3084 | if (--fotg210->async_count) | ||
3085 | return; | ||
3086 | |||
3087 | /* The async schedule and async_unlink list are supposed to be empty */ | ||
3088 | WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink); | ||
3089 | |||
3090 | /* Don't turn off the schedule until ASS is 1 */ | ||
3091 | fotg210_poll_ASS(fotg210); | ||
3092 | } | ||
3093 | |||
3094 | /* move qh (and its qtds) onto async queue; maybe enable queue. */ | ||
3095 | |||
3096 | static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
3097 | { | ||
3098 | __hc32 dma = QH_NEXT(fotg210, qh->qh_dma); | ||
3099 | struct fotg210_qh *head; | ||
3100 | |||
3101 | /* Don't link a QH if there's a Clear-TT-Buffer pending */ | ||
3102 | if (unlikely(qh->clearing_tt)) | ||
3103 | return; | ||
3104 | |||
3105 | WARN_ON(qh->qh_state != QH_STATE_IDLE); | ||
3106 | |||
3107 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ | ||
3108 | qh_refresh(fotg210, qh); | ||
3109 | |||
3110 | /* splice right after start */ | ||
3111 | head = fotg210->async; | ||
3112 | qh->qh_next = head->qh_next; | ||
3113 | qh->hw->hw_next = head->hw->hw_next; | ||
3114 | wmb(); | ||
3115 | |||
3116 | head->qh_next.qh = qh; | ||
3117 | head->hw->hw_next = dma; | ||
3118 | |||
3119 | qh->xacterrs = 0; | ||
3120 | qh->qh_state = QH_STATE_LINKED; | ||
3121 | /* qtd completions reported later by interrupt */ | ||
3122 | |||
3123 | enable_async(fotg210); | ||
3124 | } | ||
3125 | |||
3126 | /*-------------------------------------------------------------------------*/ | ||
3127 | |||
3128 | /* | ||
3129 | * For control/bulk/interrupt, return QH with these TDs appended. | ||
3130 | * Allocates and initializes the QH if necessary. | ||
3131 | * Returns null if it can't allocate a QH it needs to. | ||
3132 | * If the QH has TDs (urbs) already, that's great. | ||
3133 | */ | ||
3134 | static struct fotg210_qh *qh_append_tds( | ||
3135 | struct fotg210_hcd *fotg210, | ||
3136 | struct urb *urb, | ||
3137 | struct list_head *qtd_list, | ||
3138 | int epnum, | ||
3139 | void **ptr | ||
3140 | ) | ||
3141 | { | ||
3142 | struct fotg210_qh *qh = NULL; | ||
3143 | __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f); | ||
3144 | |||
3145 | qh = (struct fotg210_qh *) *ptr; | ||
3146 | if (unlikely(qh == NULL)) { | ||
3147 | /* can't sleep here, we have fotg210->lock... */ | ||
3148 | qh = qh_make(fotg210, urb, GFP_ATOMIC); | ||
3149 | *ptr = qh; | ||
3150 | } | ||
3151 | if (likely(qh != NULL)) { | ||
3152 | struct fotg210_qtd *qtd; | ||
3153 | |||
3154 | if (unlikely(list_empty(qtd_list))) | ||
3155 | qtd = NULL; | ||
3156 | else | ||
3157 | qtd = list_entry(qtd_list->next, struct fotg210_qtd, | ||
3158 | qtd_list); | ||
3159 | |||
3160 | /* control qh may need patching ... */ | ||
3161 | if (unlikely(epnum == 0)) { | ||
3162 | /* usb_reset_device() briefly reverts to address 0 */ | ||
3163 | if (usb_pipedevice(urb->pipe) == 0) | ||
3164 | qh->hw->hw_info1 &= ~qh_addr_mask; | ||
3165 | } | ||
3166 | |||
3167 | /* just one way to queue requests: swap with the dummy qtd. | ||
3168 | * only hc or qh_refresh() ever modify the overlay. | ||
3169 | */ | ||
3170 | if (likely(qtd != NULL)) { | ||
3171 | struct fotg210_qtd *dummy; | ||
3172 | dma_addr_t dma; | ||
3173 | __hc32 token; | ||
3174 | |||
3175 | /* to avoid racing the HC, use the dummy td instead of | ||
3176 | * the first td of our list (becomes new dummy). both | ||
3177 | * tds stay deactivated until we're done, when the | ||
3178 | * HC is allowed to fetch the old dummy (4.10.2). | ||
3179 | */ | ||
3180 | token = qtd->hw_token; | ||
3181 | qtd->hw_token = HALT_BIT(fotg210); | ||
3182 | |||
3183 | dummy = qh->dummy; | ||
3184 | |||
3185 | dma = dummy->qtd_dma; | ||
3186 | *dummy = *qtd; | ||
3187 | dummy->qtd_dma = dma; | ||
3188 | |||
3189 | list_del(&qtd->qtd_list); | ||
3190 | list_add(&dummy->qtd_list, qtd_list); | ||
3191 | list_splice_tail(qtd_list, &qh->qtd_list); | ||
3192 | |||
3193 | fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma); | ||
3194 | qh->dummy = qtd; | ||
3195 | |||
3196 | /* hc must see the new dummy at list end */ | ||
3197 | dma = qtd->qtd_dma; | ||
3198 | qtd = list_entry(qh->qtd_list.prev, | ||
3199 | struct fotg210_qtd, qtd_list); | ||
3200 | qtd->hw_next = QTD_NEXT(fotg210, dma); | ||
3201 | |||
3202 | /* let the hc process these next qtds */ | ||
3203 | wmb(); | ||
3204 | dummy->hw_token = token; | ||
3205 | |||
3206 | urb->hcpriv = qh; | ||
3207 | } | ||
3208 | } | ||
3209 | return qh; | ||
3210 | } | ||
3211 | |||
3212 | /*-------------------------------------------------------------------------*/ | ||
3213 | |||
3214 | static int | ||
3215 | submit_async( | ||
3216 | struct fotg210_hcd *fotg210, | ||
3217 | struct urb *urb, | ||
3218 | struct list_head *qtd_list, | ||
3219 | gfp_t mem_flags | ||
3220 | ) { | ||
3221 | int epnum; | ||
3222 | unsigned long flags; | ||
3223 | struct fotg210_qh *qh = NULL; | ||
3224 | int rc; | ||
3225 | |||
3226 | epnum = urb->ep->desc.bEndpointAddress; | ||
3227 | |||
3228 | #ifdef FOTG210_URB_TRACE | ||
3229 | { | ||
3230 | struct fotg210_qtd *qtd; | ||
3231 | qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list); | ||
3232 | fotg210_dbg(fotg210, | ||
3233 | "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", | ||
3234 | __func__, urb->dev->devpath, urb, | ||
3235 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", | ||
3236 | urb->transfer_buffer_length, | ||
3237 | qtd, urb->ep->hcpriv); | ||
3238 | } | ||
3239 | #endif | ||
3240 | |||
3241 | spin_lock_irqsave(&fotg210->lock, flags); | ||
3242 | if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) { | ||
3243 | rc = -ESHUTDOWN; | ||
3244 | goto done; | ||
3245 | } | ||
3246 | rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb); | ||
3247 | if (unlikely(rc)) | ||
3248 | goto done; | ||
3249 | |||
3250 | qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv); | ||
3251 | if (unlikely(qh == NULL)) { | ||
3252 | usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); | ||
3253 | rc = -ENOMEM; | ||
3254 | goto done; | ||
3255 | } | ||
3256 | |||
3257 | /* Control/bulk operations through TTs don't need scheduling, | ||
3258 | * the HC and TT handle it when the TT has a buffer ready. | ||
3259 | */ | ||
3260 | if (likely(qh->qh_state == QH_STATE_IDLE)) | ||
3261 | qh_link_async(fotg210, qh); | ||
3262 | done: | ||
3263 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
3264 | if (unlikely(qh == NULL)) | ||
3265 | qtd_list_free(fotg210, urb, qtd_list); | ||
3266 | return rc; | ||
3267 | } | ||
3268 | |||
3269 | /*-------------------------------------------------------------------------*/ | ||
3270 | |||
3271 | static void single_unlink_async(struct fotg210_hcd *fotg210, | ||
3272 | struct fotg210_qh *qh) | ||
3273 | { | ||
3274 | struct fotg210_qh *prev; | ||
3275 | |||
3276 | /* Add to the end of the list of QHs waiting for the next IAAD */ | ||
3277 | qh->qh_state = QH_STATE_UNLINK; | ||
3278 | if (fotg210->async_unlink) | ||
3279 | fotg210->async_unlink_last->unlink_next = qh; | ||
3280 | else | ||
3281 | fotg210->async_unlink = qh; | ||
3282 | fotg210->async_unlink_last = qh; | ||
3283 | |||
3284 | /* Unlink it from the schedule */ | ||
3285 | prev = fotg210->async; | ||
3286 | while (prev->qh_next.qh != qh) | ||
3287 | prev = prev->qh_next.qh; | ||
3288 | |||
3289 | prev->hw->hw_next = qh->hw->hw_next; | ||
3290 | prev->qh_next = qh->qh_next; | ||
3291 | if (fotg210->qh_scan_next == qh) | ||
3292 | fotg210->qh_scan_next = qh->qh_next.qh; | ||
3293 | } | ||
3294 | |||
3295 | static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested) | ||
3296 | { | ||
3297 | /* | ||
3298 | * Do nothing if an IAA cycle is already running or | ||
3299 | * if one will be started shortly. | ||
3300 | */ | ||
3301 | if (fotg210->async_iaa || fotg210->async_unlinking) | ||
3302 | return; | ||
3303 | |||
3304 | /* Do all the waiting QHs at once */ | ||
3305 | fotg210->async_iaa = fotg210->async_unlink; | ||
3306 | fotg210->async_unlink = NULL; | ||
3307 | |||
3308 | /* If the controller isn't running, we don't have to wait for it */ | ||
3309 | if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) { | ||
3310 | if (!nested) /* Avoid recursion */ | ||
3311 | end_unlink_async(fotg210); | ||
3312 | |||
3313 | /* Otherwise start a new IAA cycle */ | ||
3314 | } else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) { | ||
3315 | /* Make sure the unlinks are all visible to the hardware */ | ||
3316 | wmb(); | ||
3317 | |||
3318 | fotg210_writel(fotg210, fotg210->command | CMD_IAAD, | ||
3319 | &fotg210->regs->command); | ||
3320 | fotg210_readl(fotg210, &fotg210->regs->command); | ||
3321 | fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG, | ||
3322 | true); | ||
3323 | } | ||
3324 | } | ||
3325 | |||
3326 | /* the async qh for the qtds being unlinked are now gone from the HC */ | ||
3327 | |||
3328 | static void end_unlink_async(struct fotg210_hcd *fotg210) | ||
3329 | { | ||
3330 | struct fotg210_qh *qh; | ||
3331 | |||
3332 | /* Process the idle QHs */ | ||
3333 | restart: | ||
3334 | fotg210->async_unlinking = true; | ||
3335 | while (fotg210->async_iaa) { | ||
3336 | qh = fotg210->async_iaa; | ||
3337 | fotg210->async_iaa = qh->unlink_next; | ||
3338 | qh->unlink_next = NULL; | ||
3339 | |||
3340 | qh->qh_state = QH_STATE_IDLE; | ||
3341 | qh->qh_next.qh = NULL; | ||
3342 | |||
3343 | qh_completions(fotg210, qh); | ||
3344 | if (!list_empty(&qh->qtd_list) && | ||
3345 | fotg210->rh_state == FOTG210_RH_RUNNING) | ||
3346 | qh_link_async(fotg210, qh); | ||
3347 | disable_async(fotg210); | ||
3348 | } | ||
3349 | fotg210->async_unlinking = false; | ||
3350 | |||
3351 | /* Start a new IAA cycle if any QHs are waiting for it */ | ||
3352 | if (fotg210->async_unlink) { | ||
3353 | start_iaa_cycle(fotg210, true); | ||
3354 | if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) | ||
3355 | goto restart; | ||
3356 | } | ||
3357 | } | ||
3358 | |||
3359 | static void unlink_empty_async(struct fotg210_hcd *fotg210) | ||
3360 | { | ||
3361 | struct fotg210_qh *qh, *next; | ||
3362 | bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING); | ||
3363 | bool check_unlinks_later = false; | ||
3364 | |||
3365 | /* Unlink all the async QHs that have been empty for a timer cycle */ | ||
3366 | next = fotg210->async->qh_next.qh; | ||
3367 | while (next) { | ||
3368 | qh = next; | ||
3369 | next = qh->qh_next.qh; | ||
3370 | |||
3371 | if (list_empty(&qh->qtd_list) && | ||
3372 | qh->qh_state == QH_STATE_LINKED) { | ||
3373 | if (!stopped && qh->unlink_cycle == | ||
3374 | fotg210->async_unlink_cycle) | ||
3375 | check_unlinks_later = true; | ||
3376 | else | ||
3377 | single_unlink_async(fotg210, qh); | ||
3378 | } | ||
3379 | } | ||
3380 | |||
3381 | /* Start a new IAA cycle if any QHs are waiting for it */ | ||
3382 | if (fotg210->async_unlink) | ||
3383 | start_iaa_cycle(fotg210, false); | ||
3384 | |||
3385 | /* QHs that haven't been empty for long enough will be handled later */ | ||
3386 | if (check_unlinks_later) { | ||
3387 | fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS, | ||
3388 | true); | ||
3389 | ++fotg210->async_unlink_cycle; | ||
3390 | } | ||
3391 | } | ||
3392 | |||
3393 | /* makes sure the async qh will become idle */ | ||
3394 | /* caller must own fotg210->lock */ | ||
3395 | |||
3396 | static void start_unlink_async(struct fotg210_hcd *fotg210, | ||
3397 | struct fotg210_qh *qh) | ||
3398 | { | ||
3399 | /* | ||
3400 | * If the QH isn't linked then there's nothing we can do | ||
3401 | * unless we were called during a giveback, in which case | ||
3402 | * qh_completions() has to deal with it. | ||
3403 | */ | ||
3404 | if (qh->qh_state != QH_STATE_LINKED) { | ||
3405 | if (qh->qh_state == QH_STATE_COMPLETING) | ||
3406 | qh->needs_rescan = 1; | ||
3407 | return; | ||
3408 | } | ||
3409 | |||
3410 | single_unlink_async(fotg210, qh); | ||
3411 | start_iaa_cycle(fotg210, false); | ||
3412 | } | ||
3413 | |||
3414 | /*-------------------------------------------------------------------------*/ | ||
3415 | |||
3416 | static void scan_async(struct fotg210_hcd *fotg210) | ||
3417 | { | ||
3418 | struct fotg210_qh *qh; | ||
3419 | bool check_unlinks_later = false; | ||
3420 | |||
3421 | fotg210->qh_scan_next = fotg210->async->qh_next.qh; | ||
3422 | while (fotg210->qh_scan_next) { | ||
3423 | qh = fotg210->qh_scan_next; | ||
3424 | fotg210->qh_scan_next = qh->qh_next.qh; | ||
3425 | rescan: | ||
3426 | /* clean any finished work for this qh */ | ||
3427 | if (!list_empty(&qh->qtd_list)) { | ||
3428 | int temp; | ||
3429 | |||
3430 | /* | ||
3431 | * Unlinks could happen here; completion reporting | ||
3432 | * drops the lock. That's why fotg210->qh_scan_next | ||
3433 | * always holds the next qh to scan; if the next qh | ||
3434 | * gets unlinked then fotg210->qh_scan_next is adjusted | ||
3435 | * in single_unlink_async(). | ||
3436 | */ | ||
3437 | temp = qh_completions(fotg210, qh); | ||
3438 | if (qh->needs_rescan) { | ||
3439 | start_unlink_async(fotg210, qh); | ||
3440 | } else if (list_empty(&qh->qtd_list) | ||
3441 | && qh->qh_state == QH_STATE_LINKED) { | ||
3442 | qh->unlink_cycle = fotg210->async_unlink_cycle; | ||
3443 | check_unlinks_later = true; | ||
3444 | } else if (temp != 0) | ||
3445 | goto rescan; | ||
3446 | } | ||
3447 | } | ||
3448 | |||
3449 | /* | ||
3450 | * Unlink empty entries, reducing DMA usage as well | ||
3451 | * as HCD schedule-scanning costs. Delay for any qh | ||
3452 | * we just scanned, there's a not-unusual case that it | ||
3453 | * doesn't stay idle for long. | ||
3454 | */ | ||
3455 | if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING && | ||
3456 | !(fotg210->enabled_hrtimer_events & | ||
3457 | BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) { | ||
3458 | fotg210_enable_event(fotg210, | ||
3459 | FOTG210_HRTIMER_ASYNC_UNLINKS, true); | ||
3460 | ++fotg210->async_unlink_cycle; | ||
3461 | } | ||
3462 | } | ||
3463 | /*-------------------------------------------------------------------------*/ | ||
3464 | /* | ||
3465 | * EHCI scheduled transaction support: interrupt, iso, split iso | ||
3466 | * These are called "periodic" transactions in the EHCI spec. | ||
3467 | * | ||
3468 | * Note that for interrupt transfers, the QH/QTD manipulation is shared | ||
3469 | * with the "asynchronous" transaction support (control/bulk transfers). | ||
3470 | * The only real difference is in how interrupt transfers are scheduled. | ||
3471 | * | ||
3472 | * For ISO, we make an "iso_stream" head to serve the same role as a QH. | ||
3473 | * It keeps track of every ITD (or SITD) that's linked, and holds enough | ||
3474 | * pre-calculated schedule data to make appending to the queue be quick. | ||
3475 | */ | ||
3476 | |||
3477 | static int fotg210_get_frame(struct usb_hcd *hcd); | ||
3478 | |||
3479 | /*-------------------------------------------------------------------------*/ | ||
3480 | |||
3481 | /* | ||
3482 | * periodic_next_shadow - return "next" pointer on shadow list | ||
3483 | * @periodic: host pointer to qh/itd | ||
3484 | * @tag: hardware tag for type of this record | ||
3485 | */ | ||
3486 | static union fotg210_shadow * | ||
3487 | periodic_next_shadow(struct fotg210_hcd *fotg210, | ||
3488 | union fotg210_shadow *periodic, __hc32 tag) | ||
3489 | { | ||
3490 | switch (hc32_to_cpu(fotg210, tag)) { | ||
3491 | case Q_TYPE_QH: | ||
3492 | return &periodic->qh->qh_next; | ||
3493 | case Q_TYPE_FSTN: | ||
3494 | return &periodic->fstn->fstn_next; | ||
3495 | default: | ||
3496 | return &periodic->itd->itd_next; | ||
3497 | } | ||
3498 | } | ||
3499 | |||
3500 | static __hc32 * | ||
3501 | shadow_next_periodic(struct fotg210_hcd *fotg210, | ||
3502 | union fotg210_shadow *periodic, __hc32 tag) | ||
3503 | { | ||
3504 | switch (hc32_to_cpu(fotg210, tag)) { | ||
3505 | /* our fotg210_shadow.qh is actually software part */ | ||
3506 | case Q_TYPE_QH: | ||
3507 | return &periodic->qh->hw->hw_next; | ||
3508 | /* others are hw parts */ | ||
3509 | default: | ||
3510 | return periodic->hw_next; | ||
3511 | } | ||
3512 | } | ||
3513 | |||
3514 | /* caller must hold fotg210->lock */ | ||
3515 | static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame, | ||
3516 | void *ptr) | ||
3517 | { | ||
3518 | union fotg210_shadow *prev_p = &fotg210->pshadow[frame]; | ||
3519 | __hc32 *hw_p = &fotg210->periodic[frame]; | ||
3520 | union fotg210_shadow here = *prev_p; | ||
3521 | |||
3522 | /* find predecessor of "ptr"; hw and shadow lists are in sync */ | ||
3523 | while (here.ptr && here.ptr != ptr) { | ||
3524 | prev_p = periodic_next_shadow(fotg210, prev_p, | ||
3525 | Q_NEXT_TYPE(fotg210, *hw_p)); | ||
3526 | hw_p = shadow_next_periodic(fotg210, &here, | ||
3527 | Q_NEXT_TYPE(fotg210, *hw_p)); | ||
3528 | here = *prev_p; | ||
3529 | } | ||
3530 | /* an interrupt entry (at list end) could have been shared */ | ||
3531 | if (!here.ptr) | ||
3532 | return; | ||
3533 | |||
3534 | /* update shadow and hardware lists ... the old "next" pointers | ||
3535 | * from ptr may still be in use, the caller updates them. | ||
3536 | */ | ||
3537 | *prev_p = *periodic_next_shadow(fotg210, &here, | ||
3538 | Q_NEXT_TYPE(fotg210, *hw_p)); | ||
3539 | |||
3540 | *hw_p = *shadow_next_periodic(fotg210, &here, | ||
3541 | Q_NEXT_TYPE(fotg210, *hw_p)); | ||
3542 | } | ||
3543 | |||
3544 | /* how many of the uframe's 125 usecs are allocated? */ | ||
3545 | static unsigned short | ||
3546 | periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe) | ||
3547 | { | ||
3548 | __hc32 *hw_p = &fotg210->periodic[frame]; | ||
3549 | union fotg210_shadow *q = &fotg210->pshadow[frame]; | ||
3550 | unsigned usecs = 0; | ||
3551 | struct fotg210_qh_hw *hw; | ||
3552 | |||
3553 | while (q->ptr) { | ||
3554 | switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) { | ||
3555 | case Q_TYPE_QH: | ||
3556 | hw = q->qh->hw; | ||
3557 | /* is it in the S-mask? */ | ||
3558 | if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe)) | ||
3559 | usecs += q->qh->usecs; | ||
3560 | /* ... or C-mask? */ | ||
3561 | if (hw->hw_info2 & cpu_to_hc32(fotg210, | ||
3562 | 1 << (8 + uframe))) | ||
3563 | usecs += q->qh->c_usecs; | ||
3564 | hw_p = &hw->hw_next; | ||
3565 | q = &q->qh->qh_next; | ||
3566 | break; | ||
3567 | /* case Q_TYPE_FSTN: */ | ||
3568 | default: | ||
3569 | /* for "save place" FSTNs, count the relevant INTR | ||
3570 | * bandwidth from the previous frame | ||
3571 | */ | ||
3572 | if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210)) | ||
3573 | fotg210_dbg(fotg210, "ignoring FSTN cost ...\n"); | ||
3574 | |||
3575 | hw_p = &q->fstn->hw_next; | ||
3576 | q = &q->fstn->fstn_next; | ||
3577 | break; | ||
3578 | case Q_TYPE_ITD: | ||
3579 | if (q->itd->hw_transaction[uframe]) | ||
3580 | usecs += q->itd->stream->usecs; | ||
3581 | hw_p = &q->itd->hw_next; | ||
3582 | q = &q->itd->itd_next; | ||
3583 | break; | ||
3584 | } | ||
3585 | } | ||
3586 | #ifdef DEBUG | ||
3587 | if (usecs > fotg210->uframe_periodic_max) | ||
3588 | fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n", | ||
3589 | frame * 8 + uframe, usecs); | ||
3590 | #endif | ||
3591 | return usecs; | ||
3592 | } | ||
3593 | |||
3594 | /*-------------------------------------------------------------------------*/ | ||
3595 | |||
3596 | static int same_tt(struct usb_device *dev1, struct usb_device *dev2) | ||
3597 | { | ||
3598 | if (!dev1->tt || !dev2->tt) | ||
3599 | return 0; | ||
3600 | if (dev1->tt != dev2->tt) | ||
3601 | return 0; | ||
3602 | if (dev1->tt->multi) | ||
3603 | return dev1->ttport == dev2->ttport; | ||
3604 | else | ||
3605 | return 1; | ||
3606 | } | ||
3607 | |||
3608 | /* return true iff the device's transaction translator is available | ||
3609 | * for a periodic transfer starting at the specified frame, using | ||
3610 | * all the uframes in the mask. | ||
3611 | */ | ||
3612 | static int tt_no_collision( | ||
3613 | struct fotg210_hcd *fotg210, | ||
3614 | unsigned period, | ||
3615 | struct usb_device *dev, | ||
3616 | unsigned frame, | ||
3617 | u32 uf_mask | ||
3618 | ) | ||
3619 | { | ||
3620 | if (period == 0) /* error */ | ||
3621 | return 0; | ||
3622 | |||
3623 | /* note bandwidth wastage: split never follows csplit | ||
3624 | * (different dev or endpoint) until the next uframe. | ||
3625 | * calling convention doesn't make that distinction. | ||
3626 | */ | ||
3627 | for (; frame < fotg210->periodic_size; frame += period) { | ||
3628 | union fotg210_shadow here; | ||
3629 | __hc32 type; | ||
3630 | struct fotg210_qh_hw *hw; | ||
3631 | |||
3632 | here = fotg210->pshadow[frame]; | ||
3633 | type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]); | ||
3634 | while (here.ptr) { | ||
3635 | switch (hc32_to_cpu(fotg210, type)) { | ||
3636 | case Q_TYPE_ITD: | ||
3637 | type = Q_NEXT_TYPE(fotg210, here.itd->hw_next); | ||
3638 | here = here.itd->itd_next; | ||
3639 | continue; | ||
3640 | case Q_TYPE_QH: | ||
3641 | hw = here.qh->hw; | ||
3642 | if (same_tt(dev, here.qh->dev)) { | ||
3643 | u32 mask; | ||
3644 | |||
3645 | mask = hc32_to_cpu(fotg210, | ||
3646 | hw->hw_info2); | ||
3647 | /* "knows" no gap is needed */ | ||
3648 | mask |= mask >> 8; | ||
3649 | if (mask & uf_mask) | ||
3650 | break; | ||
3651 | } | ||
3652 | type = Q_NEXT_TYPE(fotg210, hw->hw_next); | ||
3653 | here = here.qh->qh_next; | ||
3654 | continue; | ||
3655 | /* case Q_TYPE_FSTN: */ | ||
3656 | default: | ||
3657 | fotg210_dbg(fotg210, | ||
3658 | "periodic frame %d bogus type %d\n", | ||
3659 | frame, type); | ||
3660 | } | ||
3661 | |||
3662 | /* collision or error */ | ||
3663 | return 0; | ||
3664 | } | ||
3665 | } | ||
3666 | |||
3667 | /* no collision */ | ||
3668 | return 1; | ||
3669 | } | ||
3670 | |||
3671 | /*-------------------------------------------------------------------------*/ | ||
3672 | |||
3673 | static void enable_periodic(struct fotg210_hcd *fotg210) | ||
3674 | { | ||
3675 | if (fotg210->periodic_count++) | ||
3676 | return; | ||
3677 | |||
3678 | /* Stop waiting to turn off the periodic schedule */ | ||
3679 | fotg210->enabled_hrtimer_events &= | ||
3680 | ~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC); | ||
3681 | |||
3682 | /* Don't start the schedule until PSS is 0 */ | ||
3683 | fotg210_poll_PSS(fotg210); | ||
3684 | turn_on_io_watchdog(fotg210); | ||
3685 | } | ||
3686 | |||
3687 | static void disable_periodic(struct fotg210_hcd *fotg210) | ||
3688 | { | ||
3689 | if (--fotg210->periodic_count) | ||
3690 | return; | ||
3691 | |||
3692 | /* Don't turn off the schedule until PSS is 1 */ | ||
3693 | fotg210_poll_PSS(fotg210); | ||
3694 | } | ||
3695 | |||
3696 | /*-------------------------------------------------------------------------*/ | ||
3697 | |||
3698 | /* periodic schedule slots have iso tds (normal or split) first, then a | ||
3699 | * sparse tree for active interrupt transfers. | ||
3700 | * | ||
3701 | * this just links in a qh; caller guarantees uframe masks are set right. | ||
3702 | * no FSTN support (yet; fotg210 0.96+) | ||
3703 | */ | ||
3704 | static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
3705 | { | ||
3706 | unsigned i; | ||
3707 | unsigned period = qh->period; | ||
3708 | |||
3709 | dev_dbg(&qh->dev->dev, | ||
3710 | "link qh%d-%04x/%p start %d [%d/%d us]\n", | ||
3711 | period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) | ||
3712 | & (QH_CMASK | QH_SMASK), | ||
3713 | qh, qh->start, qh->usecs, qh->c_usecs); | ||
3714 | |||
3715 | /* high bandwidth, or otherwise every microframe */ | ||
3716 | if (period == 0) | ||
3717 | period = 1; | ||
3718 | |||
3719 | for (i = qh->start; i < fotg210->periodic_size; i += period) { | ||
3720 | union fotg210_shadow *prev = &fotg210->pshadow[i]; | ||
3721 | __hc32 *hw_p = &fotg210->periodic[i]; | ||
3722 | union fotg210_shadow here = *prev; | ||
3723 | __hc32 type = 0; | ||
3724 | |||
3725 | /* skip the iso nodes at list head */ | ||
3726 | while (here.ptr) { | ||
3727 | type = Q_NEXT_TYPE(fotg210, *hw_p); | ||
3728 | if (type == cpu_to_hc32(fotg210, Q_TYPE_QH)) | ||
3729 | break; | ||
3730 | prev = periodic_next_shadow(fotg210, prev, type); | ||
3731 | hw_p = shadow_next_periodic(fotg210, &here, type); | ||
3732 | here = *prev; | ||
3733 | } | ||
3734 | |||
3735 | /* sorting each branch by period (slow-->fast) | ||
3736 | * enables sharing interior tree nodes | ||
3737 | */ | ||
3738 | while (here.ptr && qh != here.qh) { | ||
3739 | if (qh->period > here.qh->period) | ||
3740 | break; | ||
3741 | prev = &here.qh->qh_next; | ||
3742 | hw_p = &here.qh->hw->hw_next; | ||
3743 | here = *prev; | ||
3744 | } | ||
3745 | /* link in this qh, unless some earlier pass did that */ | ||
3746 | if (qh != here.qh) { | ||
3747 | qh->qh_next = here; | ||
3748 | if (here.qh) | ||
3749 | qh->hw->hw_next = *hw_p; | ||
3750 | wmb(); | ||
3751 | prev->qh = qh; | ||
3752 | *hw_p = QH_NEXT(fotg210, qh->qh_dma); | ||
3753 | } | ||
3754 | } | ||
3755 | qh->qh_state = QH_STATE_LINKED; | ||
3756 | qh->xacterrs = 0; | ||
3757 | |||
3758 | /* update per-qh bandwidth for usbfs */ | ||
3759 | fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period | ||
3760 | ? ((qh->usecs + qh->c_usecs) / qh->period) | ||
3761 | : (qh->usecs * 8); | ||
3762 | |||
3763 | list_add(&qh->intr_node, &fotg210->intr_qh_list); | ||
3764 | |||
3765 | /* maybe enable periodic schedule processing */ | ||
3766 | ++fotg210->intr_count; | ||
3767 | enable_periodic(fotg210); | ||
3768 | } | ||
3769 | |||
3770 | static void qh_unlink_periodic(struct fotg210_hcd *fotg210, | ||
3771 | struct fotg210_qh *qh) | ||
3772 | { | ||
3773 | unsigned i; | ||
3774 | unsigned period; | ||
3775 | |||
3776 | /* | ||
3777 | * If qh is for a low/full-speed device, simply unlinking it | ||
3778 | * could interfere with an ongoing split transaction. To unlink | ||
3779 | * it safely would require setting the QH_INACTIVATE bit and | ||
3780 | * waiting at least one frame, as described in EHCI 4.12.2.5. | ||
3781 | * | ||
3782 | * We won't bother with any of this. Instead, we assume that the | ||
3783 | * only reason for unlinking an interrupt QH while the current URB | ||
3784 | * is still active is to dequeue all the URBs (flush the whole | ||
3785 | * endpoint queue). | ||
3786 | * | ||
3787 | * If rebalancing the periodic schedule is ever implemented, this | ||
3788 | * approach will no longer be valid. | ||
3789 | */ | ||
3790 | |||
3791 | /* high bandwidth, or otherwise part of every microframe */ | ||
3792 | period = qh->period; | ||
3793 | if (!period) | ||
3794 | period = 1; | ||
3795 | |||
3796 | for (i = qh->start; i < fotg210->periodic_size; i += period) | ||
3797 | periodic_unlink(fotg210, i, qh); | ||
3798 | |||
3799 | /* update per-qh bandwidth for usbfs */ | ||
3800 | fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period | ||
3801 | ? ((qh->usecs + qh->c_usecs) / qh->period) | ||
3802 | : (qh->usecs * 8); | ||
3803 | |||
3804 | dev_dbg(&qh->dev->dev, | ||
3805 | "unlink qh%d-%04x/%p start %d [%d/%d us]\n", | ||
3806 | qh->period, | ||
3807 | hc32_to_cpup(fotg210, &qh->hw->hw_info2) & | ||
3808 | (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs); | ||
3809 | |||
3810 | /* qh->qh_next still "live" to HC */ | ||
3811 | qh->qh_state = QH_STATE_UNLINK; | ||
3812 | qh->qh_next.ptr = NULL; | ||
3813 | |||
3814 | if (fotg210->qh_scan_next == qh) | ||
3815 | fotg210->qh_scan_next = list_entry(qh->intr_node.next, | ||
3816 | struct fotg210_qh, intr_node); | ||
3817 | list_del(&qh->intr_node); | ||
3818 | } | ||
3819 | |||
3820 | static void start_unlink_intr(struct fotg210_hcd *fotg210, | ||
3821 | struct fotg210_qh *qh) | ||
3822 | { | ||
3823 | /* If the QH isn't linked then there's nothing we can do | ||
3824 | * unless we were called during a giveback, in which case | ||
3825 | * qh_completions() has to deal with it. | ||
3826 | */ | ||
3827 | if (qh->qh_state != QH_STATE_LINKED) { | ||
3828 | if (qh->qh_state == QH_STATE_COMPLETING) | ||
3829 | qh->needs_rescan = 1; | ||
3830 | return; | ||
3831 | } | ||
3832 | |||
3833 | qh_unlink_periodic(fotg210, qh); | ||
3834 | |||
3835 | /* Make sure the unlinks are visible before starting the timer */ | ||
3836 | wmb(); | ||
3837 | |||
3838 | /* | ||
3839 | * The EHCI spec doesn't say how long it takes the controller to | ||
3840 | * stop accessing an unlinked interrupt QH. The timer delay is | ||
3841 | * 9 uframes; presumably that will be long enough. | ||
3842 | */ | ||
3843 | qh->unlink_cycle = fotg210->intr_unlink_cycle; | ||
3844 | |||
3845 | /* New entries go at the end of the intr_unlink list */ | ||
3846 | if (fotg210->intr_unlink) | ||
3847 | fotg210->intr_unlink_last->unlink_next = qh; | ||
3848 | else | ||
3849 | fotg210->intr_unlink = qh; | ||
3850 | fotg210->intr_unlink_last = qh; | ||
3851 | |||
3852 | if (fotg210->intr_unlinking) | ||
3853 | ; /* Avoid recursive calls */ | ||
3854 | else if (fotg210->rh_state < FOTG210_RH_RUNNING) | ||
3855 | fotg210_handle_intr_unlinks(fotg210); | ||
3856 | else if (fotg210->intr_unlink == qh) { | ||
3857 | fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR, | ||
3858 | true); | ||
3859 | ++fotg210->intr_unlink_cycle; | ||
3860 | } | ||
3861 | } | ||
3862 | |||
3863 | static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
3864 | { | ||
3865 | struct fotg210_qh_hw *hw = qh->hw; | ||
3866 | int rc; | ||
3867 | |||
3868 | qh->qh_state = QH_STATE_IDLE; | ||
3869 | hw->hw_next = FOTG210_LIST_END(fotg210); | ||
3870 | |||
3871 | qh_completions(fotg210, qh); | ||
3872 | |||
3873 | /* reschedule QH iff another request is queued */ | ||
3874 | if (!list_empty(&qh->qtd_list) && | ||
3875 | fotg210->rh_state == FOTG210_RH_RUNNING) { | ||
3876 | rc = qh_schedule(fotg210, qh); | ||
3877 | |||
3878 | /* An error here likely indicates handshake failure | ||
3879 | * or no space left in the schedule. Neither fault | ||
3880 | * should happen often ... | ||
3881 | * | ||
3882 | * FIXME kill the now-dysfunctional queued urbs | ||
3883 | */ | ||
3884 | if (rc != 0) | ||
3885 | fotg210_err(fotg210, "can't reschedule qh %p, err %d\n", | ||
3886 | qh, rc); | ||
3887 | } | ||
3888 | |||
3889 | /* maybe turn off periodic schedule */ | ||
3890 | --fotg210->intr_count; | ||
3891 | disable_periodic(fotg210); | ||
3892 | } | ||
3893 | |||
3894 | /*-------------------------------------------------------------------------*/ | ||
3895 | |||
3896 | static int check_period( | ||
3897 | struct fotg210_hcd *fotg210, | ||
3898 | unsigned frame, | ||
3899 | unsigned uframe, | ||
3900 | unsigned period, | ||
3901 | unsigned usecs | ||
3902 | ) { | ||
3903 | int claimed; | ||
3904 | |||
3905 | /* complete split running into next frame? | ||
3906 | * given FSTN support, we could sometimes check... | ||
3907 | */ | ||
3908 | if (uframe >= 8) | ||
3909 | return 0; | ||
3910 | |||
3911 | /* convert "usecs we need" to "max already claimed" */ | ||
3912 | usecs = fotg210->uframe_periodic_max - usecs; | ||
3913 | |||
3914 | /* we "know" 2 and 4 uframe intervals were rejected; so | ||
3915 | * for period 0, check _every_ microframe in the schedule. | ||
3916 | */ | ||
3917 | if (unlikely(period == 0)) { | ||
3918 | do { | ||
3919 | for (uframe = 0; uframe < 7; uframe++) { | ||
3920 | claimed = periodic_usecs(fotg210, frame, | ||
3921 | uframe); | ||
3922 | if (claimed > usecs) | ||
3923 | return 0; | ||
3924 | } | ||
3925 | } while ((frame += 1) < fotg210->periodic_size); | ||
3926 | |||
3927 | /* just check the specified uframe, at that period */ | ||
3928 | } else { | ||
3929 | do { | ||
3930 | claimed = periodic_usecs(fotg210, frame, uframe); | ||
3931 | if (claimed > usecs) | ||
3932 | return 0; | ||
3933 | } while ((frame += period) < fotg210->periodic_size); | ||
3934 | } | ||
3935 | |||
3936 | /* success! */ | ||
3937 | return 1; | ||
3938 | } | ||
3939 | |||
3940 | static int check_intr_schedule( | ||
3941 | struct fotg210_hcd *fotg210, | ||
3942 | unsigned frame, | ||
3943 | unsigned uframe, | ||
3944 | const struct fotg210_qh *qh, | ||
3945 | __hc32 *c_maskp | ||
3946 | ) | ||
3947 | { | ||
3948 | int retval = -ENOSPC; | ||
3949 | u8 mask = 0; | ||
3950 | |||
3951 | if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ | ||
3952 | goto done; | ||
3953 | |||
3954 | if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs)) | ||
3955 | goto done; | ||
3956 | if (!qh->c_usecs) { | ||
3957 | retval = 0; | ||
3958 | *c_maskp = 0; | ||
3959 | goto done; | ||
3960 | } | ||
3961 | |||
3962 | /* Make sure this tt's buffer is also available for CSPLITs. | ||
3963 | * We pessimize a bit; probably the typical full speed case | ||
3964 | * doesn't need the second CSPLIT. | ||
3965 | * | ||
3966 | * NOTE: both SPLIT and CSPLIT could be checked in just | ||
3967 | * one smart pass... | ||
3968 | */ | ||
3969 | mask = 0x03 << (uframe + qh->gap_uf); | ||
3970 | *c_maskp = cpu_to_hc32(fotg210, mask << 8); | ||
3971 | |||
3972 | mask |= 1 << uframe; | ||
3973 | if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) { | ||
3974 | if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1, | ||
3975 | qh->period, qh->c_usecs)) | ||
3976 | goto done; | ||
3977 | if (!check_period(fotg210, frame, uframe + qh->gap_uf, | ||
3978 | qh->period, qh->c_usecs)) | ||
3979 | goto done; | ||
3980 | retval = 0; | ||
3981 | } | ||
3982 | done: | ||
3983 | return retval; | ||
3984 | } | ||
3985 | |||
3986 | /* "first fit" scheduling policy used the first time through, | ||
3987 | * or when the previous schedule slot can't be re-used. | ||
3988 | */ | ||
3989 | static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) | ||
3990 | { | ||
3991 | int status; | ||
3992 | unsigned uframe; | ||
3993 | __hc32 c_mask; | ||
3994 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ | ||
3995 | struct fotg210_qh_hw *hw = qh->hw; | ||
3996 | |||
3997 | qh_refresh(fotg210, qh); | ||
3998 | hw->hw_next = FOTG210_LIST_END(fotg210); | ||
3999 | frame = qh->start; | ||
4000 | |||
4001 | /* reuse the previous schedule slots, if we can */ | ||
4002 | if (frame < qh->period) { | ||
4003 | uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK); | ||
4004 | status = check_intr_schedule(fotg210, frame, --uframe, | ||
4005 | qh, &c_mask); | ||
4006 | } else { | ||
4007 | uframe = 0; | ||
4008 | c_mask = 0; | ||
4009 | status = -ENOSPC; | ||
4010 | } | ||
4011 | |||
4012 | /* else scan the schedule to find a group of slots such that all | ||
4013 | * uframes have enough periodic bandwidth available. | ||
4014 | */ | ||
4015 | if (status) { | ||
4016 | /* "normal" case, uframing flexible except with splits */ | ||
4017 | if (qh->period) { | ||
4018 | int i; | ||
4019 | |||
4020 | for (i = qh->period; status && i > 0; --i) { | ||
4021 | frame = ++fotg210->random_frame % qh->period; | ||
4022 | for (uframe = 0; uframe < 8; uframe++) { | ||
4023 | status = check_intr_schedule(fotg210, | ||
4024 | frame, uframe, qh, | ||
4025 | &c_mask); | ||
4026 | if (status == 0) | ||
4027 | break; | ||
4028 | } | ||
4029 | } | ||
4030 | |||
4031 | /* qh->period == 0 means every uframe */ | ||
4032 | } else { | ||
4033 | frame = 0; | ||
4034 | status = check_intr_schedule(fotg210, 0, 0, qh, | ||
4035 | &c_mask); | ||
4036 | } | ||
4037 | if (status) | ||
4038 | goto done; | ||
4039 | qh->start = frame; | ||
4040 | |||
4041 | /* reset S-frame and (maybe) C-frame masks */ | ||
4042 | hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK)); | ||
4043 | hw->hw_info2 |= qh->period | ||
4044 | ? cpu_to_hc32(fotg210, 1 << uframe) | ||
4045 | : cpu_to_hc32(fotg210, QH_SMASK); | ||
4046 | hw->hw_info2 |= c_mask; | ||
4047 | } else | ||
4048 | fotg210_dbg(fotg210, "reused qh %p schedule\n", qh); | ||
4049 | |||
4050 | /* stuff into the periodic schedule */ | ||
4051 | qh_link_periodic(fotg210, qh); | ||
4052 | done: | ||
4053 | return status; | ||
4054 | } | ||
4055 | |||
4056 | static int intr_submit( | ||
4057 | struct fotg210_hcd *fotg210, | ||
4058 | struct urb *urb, | ||
4059 | struct list_head *qtd_list, | ||
4060 | gfp_t mem_flags | ||
4061 | ) { | ||
4062 | unsigned epnum; | ||
4063 | unsigned long flags; | ||
4064 | struct fotg210_qh *qh; | ||
4065 | int status; | ||
4066 | struct list_head empty; | ||
4067 | |||
4068 | /* get endpoint and transfer/schedule data */ | ||
4069 | epnum = urb->ep->desc.bEndpointAddress; | ||
4070 | |||
4071 | spin_lock_irqsave(&fotg210->lock, flags); | ||
4072 | |||
4073 | if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) { | ||
4074 | status = -ESHUTDOWN; | ||
4075 | goto done_not_linked; | ||
4076 | } | ||
4077 | status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb); | ||
4078 | if (unlikely(status)) | ||
4079 | goto done_not_linked; | ||
4080 | |||
4081 | /* get qh and force any scheduling errors */ | ||
4082 | INIT_LIST_HEAD(&empty); | ||
4083 | qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv); | ||
4084 | if (qh == NULL) { | ||
4085 | status = -ENOMEM; | ||
4086 | goto done; | ||
4087 | } | ||
4088 | if (qh->qh_state == QH_STATE_IDLE) { | ||
4089 | status = qh_schedule(fotg210, qh); | ||
4090 | if (status) | ||
4091 | goto done; | ||
4092 | } | ||
4093 | |||
4094 | /* then queue the urb's tds to the qh */ | ||
4095 | qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv); | ||
4096 | BUG_ON(qh == NULL); | ||
4097 | |||
4098 | /* ... update usbfs periodic stats */ | ||
4099 | fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++; | ||
4100 | |||
4101 | done: | ||
4102 | if (unlikely(status)) | ||
4103 | usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); | ||
4104 | done_not_linked: | ||
4105 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
4106 | if (status) | ||
4107 | qtd_list_free(fotg210, urb, qtd_list); | ||
4108 | |||
4109 | return status; | ||
4110 | } | ||
4111 | |||
4112 | static void scan_intr(struct fotg210_hcd *fotg210) | ||
4113 | { | ||
4114 | struct fotg210_qh *qh; | ||
4115 | |||
4116 | list_for_each_entry_safe(qh, fotg210->qh_scan_next, | ||
4117 | &fotg210->intr_qh_list, intr_node) { | ||
4118 | rescan: | ||
4119 | /* clean any finished work for this qh */ | ||
4120 | if (!list_empty(&qh->qtd_list)) { | ||
4121 | int temp; | ||
4122 | |||
4123 | /* | ||
4124 | * Unlinks could happen here; completion reporting | ||
4125 | * drops the lock. That's why fotg210->qh_scan_next | ||
4126 | * always holds the next qh to scan; if the next qh | ||
4127 | * gets unlinked then fotg210->qh_scan_next is adjusted | ||
4128 | * in qh_unlink_periodic(). | ||
4129 | */ | ||
4130 | temp = qh_completions(fotg210, qh); | ||
4131 | if (unlikely(qh->needs_rescan || | ||
4132 | (list_empty(&qh->qtd_list) && | ||
4133 | qh->qh_state == QH_STATE_LINKED))) | ||
4134 | start_unlink_intr(fotg210, qh); | ||
4135 | else if (temp != 0) | ||
4136 | goto rescan; | ||
4137 | } | ||
4138 | } | ||
4139 | } | ||
4140 | |||
4141 | /*-------------------------------------------------------------------------*/ | ||
4142 | |||
4143 | /* fotg210_iso_stream ops work with both ITD and SITD */ | ||
4144 | |||
4145 | static struct fotg210_iso_stream * | ||
4146 | iso_stream_alloc(gfp_t mem_flags) | ||
4147 | { | ||
4148 | struct fotg210_iso_stream *stream; | ||
4149 | |||
4150 | stream = kzalloc(sizeof(*stream), mem_flags); | ||
4151 | if (likely(stream != NULL)) { | ||
4152 | INIT_LIST_HEAD(&stream->td_list); | ||
4153 | INIT_LIST_HEAD(&stream->free_list); | ||
4154 | stream->next_uframe = -1; | ||
4155 | } | ||
4156 | return stream; | ||
4157 | } | ||
4158 | |||
4159 | static void | ||
4160 | iso_stream_init( | ||
4161 | struct fotg210_hcd *fotg210, | ||
4162 | struct fotg210_iso_stream *stream, | ||
4163 | struct usb_device *dev, | ||
4164 | int pipe, | ||
4165 | unsigned interval | ||
4166 | ) | ||
4167 | { | ||
4168 | u32 buf1; | ||
4169 | unsigned epnum, maxp; | ||
4170 | int is_input; | ||
4171 | long bandwidth; | ||
4172 | unsigned multi; | ||
4173 | |||
4174 | /* | ||
4175 | * this might be a "high bandwidth" highspeed endpoint, | ||
4176 | * as encoded in the ep descriptor's wMaxPacket field | ||
4177 | */ | ||
4178 | epnum = usb_pipeendpoint(pipe); | ||
4179 | is_input = usb_pipein(pipe) ? USB_DIR_IN : 0; | ||
4180 | maxp = usb_maxpacket(dev, pipe, !is_input); | ||
4181 | if (is_input) | ||
4182 | buf1 = (1 << 11); | ||
4183 | else | ||
4184 | buf1 = 0; | ||
4185 | |||
4186 | maxp = max_packet(maxp); | ||
4187 | multi = hb_mult(maxp); | ||
4188 | buf1 |= maxp; | ||
4189 | maxp *= multi; | ||
4190 | |||
4191 | stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum); | ||
4192 | stream->buf1 = cpu_to_hc32(fotg210, buf1); | ||
4193 | stream->buf2 = cpu_to_hc32(fotg210, multi); | ||
4194 | |||
4195 | /* usbfs wants to report the average usecs per frame tied up | ||
4196 | * when transfers on this endpoint are scheduled ... | ||
4197 | */ | ||
4198 | if (dev->speed == USB_SPEED_FULL) { | ||
4199 | interval <<= 3; | ||
4200 | stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed, | ||
4201 | is_input, 1, maxp)); | ||
4202 | stream->usecs /= 8; | ||
4203 | } else { | ||
4204 | stream->highspeed = 1; | ||
4205 | stream->usecs = HS_USECS_ISO(maxp); | ||
4206 | } | ||
4207 | bandwidth = stream->usecs * 8; | ||
4208 | bandwidth /= interval; | ||
4209 | |||
4210 | stream->bandwidth = bandwidth; | ||
4211 | stream->udev = dev; | ||
4212 | stream->bEndpointAddress = is_input | epnum; | ||
4213 | stream->interval = interval; | ||
4214 | stream->maxp = maxp; | ||
4215 | } | ||
4216 | |||
4217 | static struct fotg210_iso_stream * | ||
4218 | iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb) | ||
4219 | { | ||
4220 | unsigned epnum; | ||
4221 | struct fotg210_iso_stream *stream; | ||
4222 | struct usb_host_endpoint *ep; | ||
4223 | unsigned long flags; | ||
4224 | |||
4225 | epnum = usb_pipeendpoint(urb->pipe); | ||
4226 | if (usb_pipein(urb->pipe)) | ||
4227 | ep = urb->dev->ep_in[epnum]; | ||
4228 | else | ||
4229 | ep = urb->dev->ep_out[epnum]; | ||
4230 | |||
4231 | spin_lock_irqsave(&fotg210->lock, flags); | ||
4232 | stream = ep->hcpriv; | ||
4233 | |||
4234 | if (unlikely(stream == NULL)) { | ||
4235 | stream = iso_stream_alloc(GFP_ATOMIC); | ||
4236 | if (likely(stream != NULL)) { | ||
4237 | ep->hcpriv = stream; | ||
4238 | stream->ep = ep; | ||
4239 | iso_stream_init(fotg210, stream, urb->dev, urb->pipe, | ||
4240 | urb->interval); | ||
4241 | } | ||
4242 | |||
4243 | /* if dev->ep[epnum] is a QH, hw is set */ | ||
4244 | } else if (unlikely(stream->hw != NULL)) { | ||
4245 | fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n", | ||
4246 | urb->dev->devpath, epnum, | ||
4247 | usb_pipein(urb->pipe) ? "in" : "out"); | ||
4248 | stream = NULL; | ||
4249 | } | ||
4250 | |||
4251 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
4252 | return stream; | ||
4253 | } | ||
4254 | |||
4255 | /*-------------------------------------------------------------------------*/ | ||
4256 | |||
4257 | /* fotg210_iso_sched ops can be ITD-only or SITD-only */ | ||
4258 | |||
4259 | static struct fotg210_iso_sched * | ||
4260 | iso_sched_alloc(unsigned packets, gfp_t mem_flags) | ||
4261 | { | ||
4262 | struct fotg210_iso_sched *iso_sched; | ||
4263 | int size = sizeof(*iso_sched); | ||
4264 | |||
4265 | size += packets * sizeof(struct fotg210_iso_packet); | ||
4266 | iso_sched = kzalloc(size, mem_flags); | ||
4267 | if (likely(iso_sched != NULL)) | ||
4268 | INIT_LIST_HEAD(&iso_sched->td_list); | ||
4269 | |||
4270 | return iso_sched; | ||
4271 | } | ||
4272 | |||
4273 | static inline void | ||
4274 | itd_sched_init( | ||
4275 | struct fotg210_hcd *fotg210, | ||
4276 | struct fotg210_iso_sched *iso_sched, | ||
4277 | struct fotg210_iso_stream *stream, | ||
4278 | struct urb *urb | ||
4279 | ) | ||
4280 | { | ||
4281 | unsigned i; | ||
4282 | dma_addr_t dma = urb->transfer_dma; | ||
4283 | |||
4284 | /* how many uframes are needed for these transfers */ | ||
4285 | iso_sched->span = urb->number_of_packets * stream->interval; | ||
4286 | |||
4287 | /* figure out per-uframe itd fields that we'll need later | ||
4288 | * when we fit new itds into the schedule. | ||
4289 | */ | ||
4290 | for (i = 0; i < urb->number_of_packets; i++) { | ||
4291 | struct fotg210_iso_packet *uframe = &iso_sched->packet[i]; | ||
4292 | unsigned length; | ||
4293 | dma_addr_t buf; | ||
4294 | u32 trans; | ||
4295 | |||
4296 | length = urb->iso_frame_desc[i].length; | ||
4297 | buf = dma + urb->iso_frame_desc[i].offset; | ||
4298 | |||
4299 | trans = FOTG210_ISOC_ACTIVE; | ||
4300 | trans |= buf & 0x0fff; | ||
4301 | if (unlikely(((i + 1) == urb->number_of_packets)) | ||
4302 | && !(urb->transfer_flags & URB_NO_INTERRUPT)) | ||
4303 | trans |= FOTG210_ITD_IOC; | ||
4304 | trans |= length << 16; | ||
4305 | uframe->transaction = cpu_to_hc32(fotg210, trans); | ||
4306 | |||
4307 | /* might need to cross a buffer page within a uframe */ | ||
4308 | uframe->bufp = (buf & ~(u64)0x0fff); | ||
4309 | buf += length; | ||
4310 | if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff)))) | ||
4311 | uframe->cross = 1; | ||
4312 | } | ||
4313 | } | ||
4314 | |||
4315 | static void | ||
4316 | iso_sched_free( | ||
4317 | struct fotg210_iso_stream *stream, | ||
4318 | struct fotg210_iso_sched *iso_sched | ||
4319 | ) | ||
4320 | { | ||
4321 | if (!iso_sched) | ||
4322 | return; | ||
4323 | /* caller must hold fotg210->lock!*/ | ||
4324 | list_splice(&iso_sched->td_list, &stream->free_list); | ||
4325 | kfree(iso_sched); | ||
4326 | } | ||
4327 | |||
4328 | static int | ||
4329 | itd_urb_transaction( | ||
4330 | struct fotg210_iso_stream *stream, | ||
4331 | struct fotg210_hcd *fotg210, | ||
4332 | struct urb *urb, | ||
4333 | gfp_t mem_flags | ||
4334 | ) | ||
4335 | { | ||
4336 | struct fotg210_itd *itd; | ||
4337 | dma_addr_t itd_dma; | ||
4338 | int i; | ||
4339 | unsigned num_itds; | ||
4340 | struct fotg210_iso_sched *sched; | ||
4341 | unsigned long flags; | ||
4342 | |||
4343 | sched = iso_sched_alloc(urb->number_of_packets, mem_flags); | ||
4344 | if (unlikely(sched == NULL)) | ||
4345 | return -ENOMEM; | ||
4346 | |||
4347 | itd_sched_init(fotg210, sched, stream, urb); | ||
4348 | |||
4349 | if (urb->interval < 8) | ||
4350 | num_itds = 1 + (sched->span + 7) / 8; | ||
4351 | else | ||
4352 | num_itds = urb->number_of_packets; | ||
4353 | |||
4354 | /* allocate/init ITDs */ | ||
4355 | spin_lock_irqsave(&fotg210->lock, flags); | ||
4356 | for (i = 0; i < num_itds; i++) { | ||
4357 | |||
4358 | /* | ||
4359 | * Use iTDs from the free list, but not iTDs that may | ||
4360 | * still be in use by the hardware. | ||
4361 | */ | ||
4362 | if (likely(!list_empty(&stream->free_list))) { | ||
4363 | itd = list_first_entry(&stream->free_list, | ||
4364 | struct fotg210_itd, itd_list); | ||
4365 | if (itd->frame == fotg210->now_frame) | ||
4366 | goto alloc_itd; | ||
4367 | list_del(&itd->itd_list); | ||
4368 | itd_dma = itd->itd_dma; | ||
4369 | } else { | ||
4370 | alloc_itd: | ||
4371 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
4372 | itd = dma_pool_alloc(fotg210->itd_pool, mem_flags, | ||
4373 | &itd_dma); | ||
4374 | spin_lock_irqsave(&fotg210->lock, flags); | ||
4375 | if (!itd) { | ||
4376 | iso_sched_free(stream, sched); | ||
4377 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
4378 | return -ENOMEM; | ||
4379 | } | ||
4380 | } | ||
4381 | |||
4382 | memset(itd, 0, sizeof(*itd)); | ||
4383 | itd->itd_dma = itd_dma; | ||
4384 | list_add(&itd->itd_list, &sched->td_list); | ||
4385 | } | ||
4386 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
4387 | |||
4388 | /* temporarily store schedule info in hcpriv */ | ||
4389 | urb->hcpriv = sched; | ||
4390 | urb->error_count = 0; | ||
4391 | return 0; | ||
4392 | } | ||
4393 | |||
4394 | /*-------------------------------------------------------------------------*/ | ||
4395 | |||
4396 | static inline int | ||
4397 | itd_slot_ok( | ||
4398 | struct fotg210_hcd *fotg210, | ||
4399 | u32 mod, | ||
4400 | u32 uframe, | ||
4401 | u8 usecs, | ||
4402 | u32 period | ||
4403 | ) | ||
4404 | { | ||
4405 | uframe %= period; | ||
4406 | do { | ||
4407 | /* can't commit more than uframe_periodic_max usec */ | ||
4408 | if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7) | ||
4409 | > (fotg210->uframe_periodic_max - usecs)) | ||
4410 | return 0; | ||
4411 | |||
4412 | /* we know urb->interval is 2^N uframes */ | ||
4413 | uframe += period; | ||
4414 | } while (uframe < mod); | ||
4415 | return 1; | ||
4416 | } | ||
4417 | |||
4418 | /* | ||
4419 | * This scheduler plans almost as far into the future as it has actual | ||
4420 | * periodic schedule slots. (Affected by TUNE_FLS, which defaults to | ||
4421 | * "as small as possible" to be cache-friendlier.) That limits the size | ||
4422 | * transfers you can stream reliably; avoid more than 64 msec per urb. | ||
4423 | * Also avoid queue depths of less than fotg210's worst irq latency (affected | ||
4424 | * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, | ||
4425 | * and other factors); or more than about 230 msec total (for portability, | ||
4426 | * given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler! | ||
4427 | */ | ||
4428 | |||
4429 | #define SCHEDULE_SLOP 80 /* microframes */ | ||
4430 | |||
4431 | static int | ||
4432 | iso_stream_schedule( | ||
4433 | struct fotg210_hcd *fotg210, | ||
4434 | struct urb *urb, | ||
4435 | struct fotg210_iso_stream *stream | ||
4436 | ) | ||
4437 | { | ||
4438 | u32 now, next, start, period, span; | ||
4439 | int status; | ||
4440 | unsigned mod = fotg210->periodic_size << 3; | ||
4441 | struct fotg210_iso_sched *sched = urb->hcpriv; | ||
4442 | |||
4443 | period = urb->interval; | ||
4444 | span = sched->span; | ||
4445 | |||
4446 | if (span > mod - SCHEDULE_SLOP) { | ||
4447 | fotg210_dbg(fotg210, "iso request %p too long\n", urb); | ||
4448 | status = -EFBIG; | ||
4449 | goto fail; | ||
4450 | } | ||
4451 | |||
4452 | now = fotg210_read_frame_index(fotg210) & (mod - 1); | ||
4453 | |||
4454 | /* Typical case: reuse current schedule, stream is still active. | ||
4455 | * Hopefully there are no gaps from the host falling behind | ||
4456 | * (irq delays etc), but if there are we'll take the next | ||
4457 | * slot in the schedule, implicitly assuming URB_ISO_ASAP. | ||
4458 | */ | ||
4459 | if (likely(!list_empty(&stream->td_list))) { | ||
4460 | u32 excess; | ||
4461 | |||
4462 | /* For high speed devices, allow scheduling within the | ||
4463 | * isochronous scheduling threshold. For full speed devices | ||
4464 | * and Intel PCI-based controllers, don't (work around for | ||
4465 | * Intel ICH9 bug). | ||
4466 | */ | ||
4467 | if (!stream->highspeed && fotg210->fs_i_thresh) | ||
4468 | next = now + fotg210->i_thresh; | ||
4469 | else | ||
4470 | next = now; | ||
4471 | |||
4472 | /* Fell behind (by up to twice the slop amount)? | ||
4473 | * We decide based on the time of the last currently-scheduled | ||
4474 | * slot, not the time of the next available slot. | ||
4475 | */ | ||
4476 | excess = (stream->next_uframe - period - next) & (mod - 1); | ||
4477 | if (excess >= mod - 2 * SCHEDULE_SLOP) | ||
4478 | start = next + excess - mod + period * | ||
4479 | DIV_ROUND_UP(mod - excess, period); | ||
4480 | else | ||
4481 | start = next + excess + period; | ||
4482 | if (start - now >= mod) { | ||
4483 | fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n", | ||
4484 | urb, start - now - period, period, | ||
4485 | mod); | ||
4486 | status = -EFBIG; | ||
4487 | goto fail; | ||
4488 | } | ||
4489 | } | ||
4490 | |||
4491 | /* need to schedule; when's the next (u)frame we could start? | ||
4492 | * this is bigger than fotg210->i_thresh allows; scheduling itself | ||
4493 | * isn't free, the slop should handle reasonably slow cpus. it | ||
4494 | * can also help high bandwidth if the dma and irq loads don't | ||
4495 | * jump until after the queue is primed. | ||
4496 | */ | ||
4497 | else { | ||
4498 | int done = 0; | ||
4499 | start = SCHEDULE_SLOP + (now & ~0x07); | ||
4500 | |||
4501 | /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ | ||
4502 | |||
4503 | /* find a uframe slot with enough bandwidth. | ||
4504 | * Early uframes are more precious because full-speed | ||
4505 | * iso IN transfers can't use late uframes, | ||
4506 | * and therefore they should be allocated last. | ||
4507 | */ | ||
4508 | next = start; | ||
4509 | start += period; | ||
4510 | do { | ||
4511 | start--; | ||
4512 | /* check schedule: enough space? */ | ||
4513 | if (itd_slot_ok(fotg210, mod, start, | ||
4514 | stream->usecs, period)) | ||
4515 | done = 1; | ||
4516 | } while (start > next && !done); | ||
4517 | |||
4518 | /* no room in the schedule */ | ||
4519 | if (!done) { | ||
4520 | fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n", | ||
4521 | urb, now, now + mod); | ||
4522 | status = -ENOSPC; | ||
4523 | goto fail; | ||
4524 | } | ||
4525 | } | ||
4526 | |||
4527 | /* Tried to schedule too far into the future? */ | ||
4528 | if (unlikely(start - now + span - period | ||
4529 | >= mod - 2 * SCHEDULE_SLOP)) { | ||
4530 | fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n", | ||
4531 | urb, start - now, span - period, | ||
4532 | mod - 2 * SCHEDULE_SLOP); | ||
4533 | status = -EFBIG; | ||
4534 | goto fail; | ||
4535 | } | ||
4536 | |||
4537 | stream->next_uframe = start & (mod - 1); | ||
4538 | |||
4539 | /* report high speed start in uframes; full speed, in frames */ | ||
4540 | urb->start_frame = stream->next_uframe; | ||
4541 | if (!stream->highspeed) | ||
4542 | urb->start_frame >>= 3; | ||
4543 | |||
4544 | /* Make sure scan_isoc() sees these */ | ||
4545 | if (fotg210->isoc_count == 0) | ||
4546 | fotg210->next_frame = now >> 3; | ||
4547 | return 0; | ||
4548 | |||
4549 | fail: | ||
4550 | iso_sched_free(stream, sched); | ||
4551 | urb->hcpriv = NULL; | ||
4552 | return status; | ||
4553 | } | ||
4554 | |||
4555 | /*-------------------------------------------------------------------------*/ | ||
4556 | |||
4557 | static inline void | ||
4558 | itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream, | ||
4559 | struct fotg210_itd *itd) | ||
4560 | { | ||
4561 | int i; | ||
4562 | |||
4563 | /* it's been recently zeroed */ | ||
4564 | itd->hw_next = FOTG210_LIST_END(fotg210); | ||
4565 | itd->hw_bufp[0] = stream->buf0; | ||
4566 | itd->hw_bufp[1] = stream->buf1; | ||
4567 | itd->hw_bufp[2] = stream->buf2; | ||
4568 | |||
4569 | for (i = 0; i < 8; i++) | ||
4570 | itd->index[i] = -1; | ||
4571 | |||
4572 | /* All other fields are filled when scheduling */ | ||
4573 | } | ||
4574 | |||
4575 | static inline void | ||
4576 | itd_patch( | ||
4577 | struct fotg210_hcd *fotg210, | ||
4578 | struct fotg210_itd *itd, | ||
4579 | struct fotg210_iso_sched *iso_sched, | ||
4580 | unsigned index, | ||
4581 | u16 uframe | ||
4582 | ) | ||
4583 | { | ||
4584 | struct fotg210_iso_packet *uf = &iso_sched->packet[index]; | ||
4585 | unsigned pg = itd->pg; | ||
4586 | |||
4587 | uframe &= 0x07; | ||
4588 | itd->index[uframe] = index; | ||
4589 | |||
4590 | itd->hw_transaction[uframe] = uf->transaction; | ||
4591 | itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12); | ||
4592 | itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0); | ||
4593 | itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32)); | ||
4594 | |||
4595 | /* iso_frame_desc[].offset must be strictly increasing */ | ||
4596 | if (unlikely(uf->cross)) { | ||
4597 | u64 bufp = uf->bufp + 4096; | ||
4598 | |||
4599 | itd->pg = ++pg; | ||
4600 | itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0); | ||
4601 | itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32)); | ||
4602 | } | ||
4603 | } | ||
4604 | |||
4605 | static inline void | ||
4606 | itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd) | ||
4607 | { | ||
4608 | union fotg210_shadow *prev = &fotg210->pshadow[frame]; | ||
4609 | __hc32 *hw_p = &fotg210->periodic[frame]; | ||
4610 | union fotg210_shadow here = *prev; | ||
4611 | __hc32 type = 0; | ||
4612 | |||
4613 | /* skip any iso nodes which might belong to previous microframes */ | ||
4614 | while (here.ptr) { | ||
4615 | type = Q_NEXT_TYPE(fotg210, *hw_p); | ||
4616 | if (type == cpu_to_hc32(fotg210, Q_TYPE_QH)) | ||
4617 | break; | ||
4618 | prev = periodic_next_shadow(fotg210, prev, type); | ||
4619 | hw_p = shadow_next_periodic(fotg210, &here, type); | ||
4620 | here = *prev; | ||
4621 | } | ||
4622 | |||
4623 | itd->itd_next = here; | ||
4624 | itd->hw_next = *hw_p; | ||
4625 | prev->itd = itd; | ||
4626 | itd->frame = frame; | ||
4627 | wmb(); | ||
4628 | *hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD); | ||
4629 | } | ||
4630 | |||
4631 | /* fit urb's itds into the selected schedule slot; activate as needed */ | ||
4632 | static void itd_link_urb( | ||
4633 | struct fotg210_hcd *fotg210, | ||
4634 | struct urb *urb, | ||
4635 | unsigned mod, | ||
4636 | struct fotg210_iso_stream *stream | ||
4637 | ) | ||
4638 | { | ||
4639 | int packet; | ||
4640 | unsigned next_uframe, uframe, frame; | ||
4641 | struct fotg210_iso_sched *iso_sched = urb->hcpriv; | ||
4642 | struct fotg210_itd *itd; | ||
4643 | |||
4644 | next_uframe = stream->next_uframe & (mod - 1); | ||
4645 | |||
4646 | if (unlikely(list_empty(&stream->td_list))) { | ||
4647 | fotg210_to_hcd(fotg210)->self.bandwidth_allocated | ||
4648 | += stream->bandwidth; | ||
4649 | fotg210_vdbg(fotg210, | ||
4650 | "schedule devp %s ep%d%s-iso period %d start %d.%d\n", | ||
4651 | urb->dev->devpath, stream->bEndpointAddress & 0x0f, | ||
4652 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", | ||
4653 | urb->interval, | ||
4654 | next_uframe >> 3, next_uframe & 0x7); | ||
4655 | } | ||
4656 | |||
4657 | /* fill iTDs uframe by uframe */ | ||
4658 | for (packet = 0, itd = NULL; packet < urb->number_of_packets;) { | ||
4659 | if (itd == NULL) { | ||
4660 | /* ASSERT: we have all necessary itds */ | ||
4661 | |||
4662 | /* ASSERT: no itds for this endpoint in this uframe */ | ||
4663 | |||
4664 | itd = list_entry(iso_sched->td_list.next, | ||
4665 | struct fotg210_itd, itd_list); | ||
4666 | list_move_tail(&itd->itd_list, &stream->td_list); | ||
4667 | itd->stream = stream; | ||
4668 | itd->urb = urb; | ||
4669 | itd_init(fotg210, stream, itd); | ||
4670 | } | ||
4671 | |||
4672 | uframe = next_uframe & 0x07; | ||
4673 | frame = next_uframe >> 3; | ||
4674 | |||
4675 | itd_patch(fotg210, itd, iso_sched, packet, uframe); | ||
4676 | |||
4677 | next_uframe += stream->interval; | ||
4678 | next_uframe &= mod - 1; | ||
4679 | packet++; | ||
4680 | |||
4681 | /* link completed itds into the schedule */ | ||
4682 | if (((next_uframe >> 3) != frame) | ||
4683 | || packet == urb->number_of_packets) { | ||
4684 | itd_link(fotg210, frame & (fotg210->periodic_size - 1), | ||
4685 | itd); | ||
4686 | itd = NULL; | ||
4687 | } | ||
4688 | } | ||
4689 | stream->next_uframe = next_uframe; | ||
4690 | |||
4691 | /* don't need that schedule data any more */ | ||
4692 | iso_sched_free(stream, iso_sched); | ||
4693 | urb->hcpriv = NULL; | ||
4694 | |||
4695 | ++fotg210->isoc_count; | ||
4696 | enable_periodic(fotg210); | ||
4697 | } | ||
4698 | |||
4699 | #define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\ | ||
4700 | FOTG210_ISOC_XACTERR) | ||
4701 | |||
4702 | /* Process and recycle a completed ITD. Return true iff its urb completed, | ||
4703 | * and hence its completion callback probably added things to the hardware | ||
4704 | * schedule. | ||
4705 | * | ||
4706 | * Note that we carefully avoid recycling this descriptor until after any | ||
4707 | * completion callback runs, so that it won't be reused quickly. That is, | ||
4708 | * assuming (a) no more than two urbs per frame on this endpoint, and also | ||
4709 | * (b) only this endpoint's completions submit URBs. It seems some silicon | ||
4710 | * corrupts things if you reuse completed descriptors very quickly... | ||
4711 | */ | ||
4712 | static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd) | ||
4713 | { | ||
4714 | struct urb *urb = itd->urb; | ||
4715 | struct usb_iso_packet_descriptor *desc; | ||
4716 | u32 t; | ||
4717 | unsigned uframe; | ||
4718 | int urb_index = -1; | ||
4719 | struct fotg210_iso_stream *stream = itd->stream; | ||
4720 | struct usb_device *dev; | ||
4721 | bool retval = false; | ||
4722 | |||
4723 | /* for each uframe with a packet */ | ||
4724 | for (uframe = 0; uframe < 8; uframe++) { | ||
4725 | if (likely(itd->index[uframe] == -1)) | ||
4726 | continue; | ||
4727 | urb_index = itd->index[uframe]; | ||
4728 | desc = &urb->iso_frame_desc[urb_index]; | ||
4729 | |||
4730 | t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]); | ||
4731 | itd->hw_transaction[uframe] = 0; | ||
4732 | |||
4733 | /* report transfer status */ | ||
4734 | if (unlikely(t & ISO_ERRS)) { | ||
4735 | urb->error_count++; | ||
4736 | if (t & FOTG210_ISOC_BUF_ERR) | ||
4737 | desc->status = usb_pipein(urb->pipe) | ||
4738 | ? -ENOSR /* hc couldn't read */ | ||
4739 | : -ECOMM; /* hc couldn't write */ | ||
4740 | else if (t & FOTG210_ISOC_BABBLE) | ||
4741 | desc->status = -EOVERFLOW; | ||
4742 | else /* (t & FOTG210_ISOC_XACTERR) */ | ||
4743 | desc->status = -EPROTO; | ||
4744 | |||
4745 | /* HC need not update length with this error */ | ||
4746 | if (!(t & FOTG210_ISOC_BABBLE)) { | ||
4747 | desc->actual_length = | ||
4748 | fotg210_itdlen(urb, desc, t); | ||
4749 | urb->actual_length += desc->actual_length; | ||
4750 | } | ||
4751 | } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) { | ||
4752 | desc->status = 0; | ||
4753 | desc->actual_length = fotg210_itdlen(urb, desc, t); | ||
4754 | urb->actual_length += desc->actual_length; | ||
4755 | } else { | ||
4756 | /* URB was too late */ | ||
4757 | desc->status = -EXDEV; | ||
4758 | } | ||
4759 | } | ||
4760 | |||
4761 | /* handle completion now? */ | ||
4762 | if (likely((urb_index + 1) != urb->number_of_packets)) | ||
4763 | goto done; | ||
4764 | |||
4765 | /* ASSERT: it's really the last itd for this urb | ||
4766 | list_for_each_entry (itd, &stream->td_list, itd_list) | ||
4767 | BUG_ON (itd->urb == urb); | ||
4768 | */ | ||
4769 | |||
4770 | /* give urb back to the driver; completion often (re)submits */ | ||
4771 | dev = urb->dev; | ||
4772 | fotg210_urb_done(fotg210, urb, 0); | ||
4773 | retval = true; | ||
4774 | urb = NULL; | ||
4775 | |||
4776 | --fotg210->isoc_count; | ||
4777 | disable_periodic(fotg210); | ||
4778 | |||
4779 | if (unlikely(list_is_singular(&stream->td_list))) { | ||
4780 | fotg210_to_hcd(fotg210)->self.bandwidth_allocated | ||
4781 | -= stream->bandwidth; | ||
4782 | fotg210_vdbg(fotg210, | ||
4783 | "deschedule devp %s ep%d%s-iso\n", | ||
4784 | dev->devpath, stream->bEndpointAddress & 0x0f, | ||
4785 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); | ||
4786 | } | ||
4787 | |||
4788 | done: | ||
4789 | itd->urb = NULL; | ||
4790 | |||
4791 | /* Add to the end of the free list for later reuse */ | ||
4792 | list_move_tail(&itd->itd_list, &stream->free_list); | ||
4793 | |||
4794 | /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */ | ||
4795 | if (list_empty(&stream->td_list)) { | ||
4796 | list_splice_tail_init(&stream->free_list, | ||
4797 | &fotg210->cached_itd_list); | ||
4798 | start_free_itds(fotg210); | ||
4799 | } | ||
4800 | |||
4801 | return retval; | ||
4802 | } | ||
4803 | |||
4804 | /*-------------------------------------------------------------------------*/ | ||
4805 | |||
4806 | static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb, | ||
4807 | gfp_t mem_flags) | ||
4808 | { | ||
4809 | int status = -EINVAL; | ||
4810 | unsigned long flags; | ||
4811 | struct fotg210_iso_stream *stream; | ||
4812 | |||
4813 | /* Get iso_stream head */ | ||
4814 | stream = iso_stream_find(fotg210, urb); | ||
4815 | if (unlikely(stream == NULL)) { | ||
4816 | fotg210_dbg(fotg210, "can't get iso stream\n"); | ||
4817 | return -ENOMEM; | ||
4818 | } | ||
4819 | if (unlikely(urb->interval != stream->interval && | ||
4820 | fotg210_port_speed(fotg210, 0) == | ||
4821 | USB_PORT_STAT_HIGH_SPEED)) { | ||
4822 | fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n", | ||
4823 | stream->interval, urb->interval); | ||
4824 | goto done; | ||
4825 | } | ||
4826 | |||
4827 | #ifdef FOTG210_URB_TRACE | ||
4828 | fotg210_dbg(fotg210, | ||
4829 | "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n", | ||
4830 | __func__, urb->dev->devpath, urb, | ||
4831 | usb_pipeendpoint(urb->pipe), | ||
4832 | usb_pipein(urb->pipe) ? "in" : "out", | ||
4833 | urb->transfer_buffer_length, | ||
4834 | urb->number_of_packets, urb->interval, | ||
4835 | stream); | ||
4836 | #endif | ||
4837 | |||
4838 | /* allocate ITDs w/o locking anything */ | ||
4839 | status = itd_urb_transaction(stream, fotg210, urb, mem_flags); | ||
4840 | if (unlikely(status < 0)) { | ||
4841 | fotg210_dbg(fotg210, "can't init itds\n"); | ||
4842 | goto done; | ||
4843 | } | ||
4844 | |||
4845 | /* schedule ... need to lock */ | ||
4846 | spin_lock_irqsave(&fotg210->lock, flags); | ||
4847 | if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) { | ||
4848 | status = -ESHUTDOWN; | ||
4849 | goto done_not_linked; | ||
4850 | } | ||
4851 | status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb); | ||
4852 | if (unlikely(status)) | ||
4853 | goto done_not_linked; | ||
4854 | status = iso_stream_schedule(fotg210, urb, stream); | ||
4855 | if (likely(status == 0)) | ||
4856 | itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream); | ||
4857 | else | ||
4858 | usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); | ||
4859 | done_not_linked: | ||
4860 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
4861 | done: | ||
4862 | return status; | ||
4863 | } | ||
4864 | |||
4865 | /*-------------------------------------------------------------------------*/ | ||
4866 | |||
4867 | static void scan_isoc(struct fotg210_hcd *fotg210) | ||
4868 | { | ||
4869 | unsigned uf, now_frame, frame; | ||
4870 | unsigned fmask = fotg210->periodic_size - 1; | ||
4871 | bool modified, live; | ||
4872 | |||
4873 | /* | ||
4874 | * When running, scan from last scan point up to "now" | ||
4875 | * else clean up by scanning everything that's left. | ||
4876 | * Touches as few pages as possible: cache-friendly. | ||
4877 | */ | ||
4878 | if (fotg210->rh_state >= FOTG210_RH_RUNNING) { | ||
4879 | uf = fotg210_read_frame_index(fotg210); | ||
4880 | now_frame = (uf >> 3) & fmask; | ||
4881 | live = true; | ||
4882 | } else { | ||
4883 | now_frame = (fotg210->next_frame - 1) & fmask; | ||
4884 | live = false; | ||
4885 | } | ||
4886 | fotg210->now_frame = now_frame; | ||
4887 | |||
4888 | frame = fotg210->next_frame; | ||
4889 | for (;;) { | ||
4890 | union fotg210_shadow q, *q_p; | ||
4891 | __hc32 type, *hw_p; | ||
4892 | |||
4893 | restart: | ||
4894 | /* scan each element in frame's queue for completions */ | ||
4895 | q_p = &fotg210->pshadow[frame]; | ||
4896 | hw_p = &fotg210->periodic[frame]; | ||
4897 | q.ptr = q_p->ptr; | ||
4898 | type = Q_NEXT_TYPE(fotg210, *hw_p); | ||
4899 | modified = false; | ||
4900 | |||
4901 | while (q.ptr != NULL) { | ||
4902 | switch (hc32_to_cpu(fotg210, type)) { | ||
4903 | case Q_TYPE_ITD: | ||
4904 | /* If this ITD is still active, leave it for | ||
4905 | * later processing ... check the next entry. | ||
4906 | * No need to check for activity unless the | ||
4907 | * frame is current. | ||
4908 | */ | ||
4909 | if (frame == now_frame && live) { | ||
4910 | rmb(); | ||
4911 | for (uf = 0; uf < 8; uf++) { | ||
4912 | if (q.itd->hw_transaction[uf] & | ||
4913 | ITD_ACTIVE(fotg210)) | ||
4914 | break; | ||
4915 | } | ||
4916 | if (uf < 8) { | ||
4917 | q_p = &q.itd->itd_next; | ||
4918 | hw_p = &q.itd->hw_next; | ||
4919 | type = Q_NEXT_TYPE(fotg210, | ||
4920 | q.itd->hw_next); | ||
4921 | q = *q_p; | ||
4922 | break; | ||
4923 | } | ||
4924 | } | ||
4925 | |||
4926 | /* Take finished ITDs out of the schedule | ||
4927 | * and process them: recycle, maybe report | ||
4928 | * URB completion. HC won't cache the | ||
4929 | * pointer for much longer, if at all. | ||
4930 | */ | ||
4931 | *q_p = q.itd->itd_next; | ||
4932 | *hw_p = q.itd->hw_next; | ||
4933 | type = Q_NEXT_TYPE(fotg210, q.itd->hw_next); | ||
4934 | wmb(); | ||
4935 | modified = itd_complete(fotg210, q.itd); | ||
4936 | q = *q_p; | ||
4937 | break; | ||
4938 | default: | ||
4939 | fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n", | ||
4940 | type, frame, q.ptr); | ||
4941 | /* FALL THROUGH */ | ||
4942 | case Q_TYPE_QH: | ||
4943 | case Q_TYPE_FSTN: | ||
4944 | /* End of the iTDs and siTDs */ | ||
4945 | q.ptr = NULL; | ||
4946 | break; | ||
4947 | } | ||
4948 | |||
4949 | /* assume completion callbacks modify the queue */ | ||
4950 | if (unlikely(modified && fotg210->isoc_count > 0)) | ||
4951 | goto restart; | ||
4952 | } | ||
4953 | |||
4954 | /* Stop when we have reached the current frame */ | ||
4955 | if (frame == now_frame) | ||
4956 | break; | ||
4957 | frame = (frame + 1) & fmask; | ||
4958 | } | ||
4959 | fotg210->next_frame = now_frame; | ||
4960 | } | ||
4961 | /*-------------------------------------------------------------------------*/ | ||
4962 | /* | ||
4963 | * Display / Set uframe_periodic_max | ||
4964 | */ | ||
4965 | static ssize_t show_uframe_periodic_max(struct device *dev, | ||
4966 | struct device_attribute *attr, | ||
4967 | char *buf) | ||
4968 | { | ||
4969 | struct fotg210_hcd *fotg210; | ||
4970 | int n; | ||
4971 | |||
4972 | fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev))); | ||
4973 | n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max); | ||
4974 | return n; | ||
4975 | } | ||
4976 | |||
4977 | |||
4978 | static ssize_t store_uframe_periodic_max(struct device *dev, | ||
4979 | struct device_attribute *attr, | ||
4980 | const char *buf, size_t count) | ||
4981 | { | ||
4982 | struct fotg210_hcd *fotg210; | ||
4983 | unsigned uframe_periodic_max; | ||
4984 | unsigned frame, uframe; | ||
4985 | unsigned short allocated_max; | ||
4986 | unsigned long flags; | ||
4987 | ssize_t ret; | ||
4988 | |||
4989 | fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev))); | ||
4990 | if (kstrtouint(buf, 0, &uframe_periodic_max) < 0) | ||
4991 | return -EINVAL; | ||
4992 | |||
4993 | if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) { | ||
4994 | fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n", | ||
4995 | uframe_periodic_max); | ||
4996 | return -EINVAL; | ||
4997 | } | ||
4998 | |||
4999 | ret = -EINVAL; | ||
5000 | |||
5001 | /* | ||
5002 | * lock, so that our checking does not race with possible periodic | ||
5003 | * bandwidth allocation through submitting new urbs. | ||
5004 | */ | ||
5005 | spin_lock_irqsave(&fotg210->lock, flags); | ||
5006 | |||
5007 | /* | ||
5008 | * for request to decrease max periodic bandwidth, we have to check | ||
5009 | * every microframe in the schedule to see whether the decrease is | ||
5010 | * possible. | ||
5011 | */ | ||
5012 | if (uframe_periodic_max < fotg210->uframe_periodic_max) { | ||
5013 | allocated_max = 0; | ||
5014 | |||
5015 | for (frame = 0; frame < fotg210->periodic_size; ++frame) | ||
5016 | for (uframe = 0; uframe < 7; ++uframe) | ||
5017 | allocated_max = max(allocated_max, | ||
5018 | periodic_usecs(fotg210, frame, uframe)); | ||
5019 | |||
5020 | if (allocated_max > uframe_periodic_max) { | ||
5021 | fotg210_info(fotg210, | ||
5022 | "cannot decrease uframe_periodic_max becase " | ||
5023 | "periodic bandwidth is already allocated " | ||
5024 | "(%u > %u)\n", | ||
5025 | allocated_max, uframe_periodic_max); | ||
5026 | goto out_unlock; | ||
5027 | } | ||
5028 | } | ||
5029 | |||
5030 | /* increasing is always ok */ | ||
5031 | |||
5032 | fotg210_info(fotg210, "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n", | ||
5033 | 100 * uframe_periodic_max/125, uframe_periodic_max); | ||
5034 | |||
5035 | if (uframe_periodic_max != 100) | ||
5036 | fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n"); | ||
5037 | |||
5038 | fotg210->uframe_periodic_max = uframe_periodic_max; | ||
5039 | ret = count; | ||
5040 | |||
5041 | out_unlock: | ||
5042 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
5043 | return ret; | ||
5044 | } | ||
5045 | |||
5046 | static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, | ||
5047 | store_uframe_periodic_max); | ||
5048 | |||
5049 | static inline int create_sysfs_files(struct fotg210_hcd *fotg210) | ||
5050 | { | ||
5051 | struct device *controller = fotg210_to_hcd(fotg210)->self.controller; | ||
5052 | int i = 0; | ||
5053 | |||
5054 | if (i) | ||
5055 | goto out; | ||
5056 | |||
5057 | i = device_create_file(controller, &dev_attr_uframe_periodic_max); | ||
5058 | out: | ||
5059 | return i; | ||
5060 | } | ||
5061 | |||
5062 | static inline void remove_sysfs_files(struct fotg210_hcd *fotg210) | ||
5063 | { | ||
5064 | struct device *controller = fotg210_to_hcd(fotg210)->self.controller; | ||
5065 | |||
5066 | device_remove_file(controller, &dev_attr_uframe_periodic_max); | ||
5067 | } | ||
5068 | /*-------------------------------------------------------------------------*/ | ||
5069 | |||
5070 | /* On some systems, leaving remote wakeup enabled prevents system shutdown. | ||
5071 | * The firmware seems to think that powering off is a wakeup event! | ||
5072 | * This routine turns off remote wakeup and everything else, on all ports. | ||
5073 | */ | ||
5074 | static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210) | ||
5075 | { | ||
5076 | u32 __iomem *status_reg = &fotg210->regs->port_status; | ||
5077 | |||
5078 | fotg210_writel(fotg210, PORT_RWC_BITS, status_reg); | ||
5079 | } | ||
5080 | |||
5081 | /* | ||
5082 | * Halt HC, turn off all ports, and let the BIOS use the companion controllers. | ||
5083 | * Must be called with interrupts enabled and the lock not held. | ||
5084 | */ | ||
5085 | static void fotg210_silence_controller(struct fotg210_hcd *fotg210) | ||
5086 | { | ||
5087 | fotg210_halt(fotg210); | ||
5088 | |||
5089 | spin_lock_irq(&fotg210->lock); | ||
5090 | fotg210->rh_state = FOTG210_RH_HALTED; | ||
5091 | fotg210_turn_off_all_ports(fotg210); | ||
5092 | spin_unlock_irq(&fotg210->lock); | ||
5093 | } | ||
5094 | |||
5095 | /* fotg210_shutdown kick in for silicon on any bus (not just pci, etc). | ||
5096 | * This forcibly disables dma and IRQs, helping kexec and other cases | ||
5097 | * where the next system software may expect clean state. | ||
5098 | */ | ||
5099 | static void fotg210_shutdown(struct usb_hcd *hcd) | ||
5100 | { | ||
5101 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5102 | |||
5103 | spin_lock_irq(&fotg210->lock); | ||
5104 | fotg210->shutdown = true; | ||
5105 | fotg210->rh_state = FOTG210_RH_STOPPING; | ||
5106 | fotg210->enabled_hrtimer_events = 0; | ||
5107 | spin_unlock_irq(&fotg210->lock); | ||
5108 | |||
5109 | fotg210_silence_controller(fotg210); | ||
5110 | |||
5111 | hrtimer_cancel(&fotg210->hrtimer); | ||
5112 | } | ||
5113 | |||
5114 | /*-------------------------------------------------------------------------*/ | ||
5115 | |||
5116 | /* | ||
5117 | * fotg210_work is called from some interrupts, timers, and so on. | ||
5118 | * it calls driver completion functions, after dropping fotg210->lock. | ||
5119 | */ | ||
5120 | static void fotg210_work(struct fotg210_hcd *fotg210) | ||
5121 | { | ||
5122 | /* another CPU may drop fotg210->lock during a schedule scan while | ||
5123 | * it reports urb completions. this flag guards against bogus | ||
5124 | * attempts at re-entrant schedule scanning. | ||
5125 | */ | ||
5126 | if (fotg210->scanning) { | ||
5127 | fotg210->need_rescan = true; | ||
5128 | return; | ||
5129 | } | ||
5130 | fotg210->scanning = true; | ||
5131 | |||
5132 | rescan: | ||
5133 | fotg210->need_rescan = false; | ||
5134 | if (fotg210->async_count) | ||
5135 | scan_async(fotg210); | ||
5136 | if (fotg210->intr_count > 0) | ||
5137 | scan_intr(fotg210); | ||
5138 | if (fotg210->isoc_count > 0) | ||
5139 | scan_isoc(fotg210); | ||
5140 | if (fotg210->need_rescan) | ||
5141 | goto rescan; | ||
5142 | fotg210->scanning = false; | ||
5143 | |||
5144 | /* the IO watchdog guards against hardware or driver bugs that | ||
5145 | * misplace IRQs, and should let us run completely without IRQs. | ||
5146 | * such lossage has been observed on both VT6202 and VT8235. | ||
5147 | */ | ||
5148 | turn_on_io_watchdog(fotg210); | ||
5149 | } | ||
5150 | |||
5151 | /* | ||
5152 | * Called when the fotg210_hcd module is removed. | ||
5153 | */ | ||
5154 | static void fotg210_stop(struct usb_hcd *hcd) | ||
5155 | { | ||
5156 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5157 | |||
5158 | fotg210_dbg(fotg210, "stop\n"); | ||
5159 | |||
5160 | /* no more interrupts ... */ | ||
5161 | |||
5162 | spin_lock_irq(&fotg210->lock); | ||
5163 | fotg210->enabled_hrtimer_events = 0; | ||
5164 | spin_unlock_irq(&fotg210->lock); | ||
5165 | |||
5166 | fotg210_quiesce(fotg210); | ||
5167 | fotg210_silence_controller(fotg210); | ||
5168 | fotg210_reset(fotg210); | ||
5169 | |||
5170 | hrtimer_cancel(&fotg210->hrtimer); | ||
5171 | remove_sysfs_files(fotg210); | ||
5172 | remove_debug_files(fotg210); | ||
5173 | |||
5174 | /* root hub is shut down separately (first, when possible) */ | ||
5175 | spin_lock_irq(&fotg210->lock); | ||
5176 | end_free_itds(fotg210); | ||
5177 | spin_unlock_irq(&fotg210->lock); | ||
5178 | fotg210_mem_cleanup(fotg210); | ||
5179 | |||
5180 | #ifdef FOTG210_STATS | ||
5181 | fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n", | ||
5182 | fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa, | ||
5183 | fotg210->stats.lost_iaa); | ||
5184 | fotg210_dbg(fotg210, "complete %ld unlink %ld\n", | ||
5185 | fotg210->stats.complete, fotg210->stats.unlink); | ||
5186 | #endif | ||
5187 | |||
5188 | dbg_status(fotg210, "fotg210_stop completed", | ||
5189 | fotg210_readl(fotg210, &fotg210->regs->status)); | ||
5190 | } | ||
5191 | |||
5192 | /* one-time init, only for memory state */ | ||
5193 | static int hcd_fotg210_init(struct usb_hcd *hcd) | ||
5194 | { | ||
5195 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5196 | u32 temp; | ||
5197 | int retval; | ||
5198 | u32 hcc_params; | ||
5199 | struct fotg210_qh_hw *hw; | ||
5200 | |||
5201 | spin_lock_init(&fotg210->lock); | ||
5202 | |||
5203 | /* | ||
5204 | * keep io watchdog by default, those good HCDs could turn off it later | ||
5205 | */ | ||
5206 | fotg210->need_io_watchdog = 1; | ||
5207 | |||
5208 | hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
5209 | fotg210->hrtimer.function = fotg210_hrtimer_func; | ||
5210 | fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT; | ||
5211 | |||
5212 | hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params); | ||
5213 | |||
5214 | /* | ||
5215 | * by default set standard 80% (== 100 usec/uframe) max periodic | ||
5216 | * bandwidth as required by USB 2.0 | ||
5217 | */ | ||
5218 | fotg210->uframe_periodic_max = 100; | ||
5219 | |||
5220 | /* | ||
5221 | * hw default: 1K periodic list heads, one per frame. | ||
5222 | * periodic_size can shrink by USBCMD update if hcc_params allows. | ||
5223 | */ | ||
5224 | fotg210->periodic_size = DEFAULT_I_TDPS; | ||
5225 | INIT_LIST_HEAD(&fotg210->intr_qh_list); | ||
5226 | INIT_LIST_HEAD(&fotg210->cached_itd_list); | ||
5227 | |||
5228 | if (HCC_PGM_FRAMELISTLEN(hcc_params)) { | ||
5229 | /* periodic schedule size can be smaller than default */ | ||
5230 | switch (FOTG210_TUNE_FLS) { | ||
5231 | case 0: | ||
5232 | fotg210->periodic_size = 1024; | ||
5233 | break; | ||
5234 | case 1: | ||
5235 | fotg210->periodic_size = 512; | ||
5236 | break; | ||
5237 | case 2: | ||
5238 | fotg210->periodic_size = 256; | ||
5239 | break; | ||
5240 | default: | ||
5241 | BUG(); | ||
5242 | } | ||
5243 | } | ||
5244 | retval = fotg210_mem_init(fotg210, GFP_KERNEL); | ||
5245 | if (retval < 0) | ||
5246 | return retval; | ||
5247 | |||
5248 | /* controllers may cache some of the periodic schedule ... */ | ||
5249 | fotg210->i_thresh = 2; | ||
5250 | |||
5251 | /* | ||
5252 | * dedicate a qh for the async ring head, since we couldn't unlink | ||
5253 | * a 'real' qh without stopping the async schedule [4.8]. use it | ||
5254 | * as the 'reclamation list head' too. | ||
5255 | * its dummy is used in hw_alt_next of many tds, to prevent the qh | ||
5256 | * from automatically advancing to the next td after short reads. | ||
5257 | */ | ||
5258 | fotg210->async->qh_next.qh = NULL; | ||
5259 | hw = fotg210->async->hw; | ||
5260 | hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma); | ||
5261 | hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD); | ||
5262 | hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT); | ||
5263 | hw->hw_qtd_next = FOTG210_LIST_END(fotg210); | ||
5264 | fotg210->async->qh_state = QH_STATE_LINKED; | ||
5265 | hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma); | ||
5266 | |||
5267 | /* clear interrupt enables, set irq latency */ | ||
5268 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) | ||
5269 | log2_irq_thresh = 0; | ||
5270 | temp = 1 << (16 + log2_irq_thresh); | ||
5271 | if (HCC_CANPARK(hcc_params)) { | ||
5272 | /* HW default park == 3, on hardware that supports it (like | ||
5273 | * NVidia and ALI silicon), maximizes throughput on the async | ||
5274 | * schedule by avoiding QH fetches between transfers. | ||
5275 | * | ||
5276 | * With fast usb storage devices and NForce2, "park" seems to | ||
5277 | * make problems: throughput reduction (!), data errors... | ||
5278 | */ | ||
5279 | if (park) { | ||
5280 | park = min_t(unsigned, park, 3); | ||
5281 | temp |= CMD_PARK; | ||
5282 | temp |= park << 8; | ||
5283 | } | ||
5284 | fotg210_dbg(fotg210, "park %d\n", park); | ||
5285 | } | ||
5286 | if (HCC_PGM_FRAMELISTLEN(hcc_params)) { | ||
5287 | /* periodic schedule size can be smaller than default */ | ||
5288 | temp &= ~(3 << 2); | ||
5289 | temp |= (FOTG210_TUNE_FLS << 2); | ||
5290 | } | ||
5291 | fotg210->command = temp; | ||
5292 | |||
5293 | /* Accept arbitrarily long scatter-gather lists */ | ||
5294 | if (!(hcd->driver->flags & HCD_LOCAL_MEM)) | ||
5295 | hcd->self.sg_tablesize = ~0; | ||
5296 | return 0; | ||
5297 | } | ||
5298 | |||
5299 | /* start HC running; it's halted, hcd_fotg210_init() has been run (once) */ | ||
5300 | static int fotg210_run(struct usb_hcd *hcd) | ||
5301 | { | ||
5302 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5303 | u32 temp; | ||
5304 | u32 hcc_params; | ||
5305 | |||
5306 | hcd->uses_new_polling = 1; | ||
5307 | |||
5308 | /* EHCI spec section 4.1 */ | ||
5309 | |||
5310 | fotg210_writel(fotg210, fotg210->periodic_dma, | ||
5311 | &fotg210->regs->frame_list); | ||
5312 | fotg210_writel(fotg210, (u32)fotg210->async->qh_dma, | ||
5313 | &fotg210->regs->async_next); | ||
5314 | |||
5315 | /* | ||
5316 | * hcc_params controls whether fotg210->regs->segment must (!!!) | ||
5317 | * be used; it constrains QH/ITD/SITD and QTD locations. | ||
5318 | * pci_pool consistent memory always uses segment zero. | ||
5319 | * streaming mappings for I/O buffers, like pci_map_single(), | ||
5320 | * can return segments above 4GB, if the device allows. | ||
5321 | * | ||
5322 | * NOTE: the dma mask is visible through dma_supported(), so | ||
5323 | * drivers can pass this info along ... like NETIF_F_HIGHDMA, | ||
5324 | * Scsi_Host.highmem_io, and so forth. It's readonly to all | ||
5325 | * host side drivers though. | ||
5326 | */ | ||
5327 | hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params); | ||
5328 | |||
5329 | /* | ||
5330 | * Philips, Intel, and maybe others need CMD_RUN before the | ||
5331 | * root hub will detect new devices (why?); NEC doesn't | ||
5332 | */ | ||
5333 | fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); | ||
5334 | fotg210->command |= CMD_RUN; | ||
5335 | fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); | ||
5336 | dbg_cmd(fotg210, "init", fotg210->command); | ||
5337 | |||
5338 | /* | ||
5339 | * Start, enabling full USB 2.0 functionality ... usb 1.1 devices | ||
5340 | * are explicitly handed to companion controller(s), so no TT is | ||
5341 | * involved with the root hub. (Except where one is integrated, | ||
5342 | * and there's no companion controller unless maybe for USB OTG.) | ||
5343 | * | ||
5344 | * Turning on the CF flag will transfer ownership of all ports | ||
5345 | * from the companions to the EHCI controller. If any of the | ||
5346 | * companions are in the middle of a port reset at the time, it | ||
5347 | * could cause trouble. Write-locking ehci_cf_port_reset_rwsem | ||
5348 | * guarantees that no resets are in progress. After we set CF, | ||
5349 | * a short delay lets the hardware catch up; new resets shouldn't | ||
5350 | * be started before the port switching actions could complete. | ||
5351 | */ | ||
5352 | down_write(&ehci_cf_port_reset_rwsem); | ||
5353 | fotg210->rh_state = FOTG210_RH_RUNNING; | ||
5354 | /* unblock posted writes */ | ||
5355 | fotg210_readl(fotg210, &fotg210->regs->command); | ||
5356 | msleep(5); | ||
5357 | up_write(&ehci_cf_port_reset_rwsem); | ||
5358 | fotg210->last_periodic_enable = ktime_get_real(); | ||
5359 | |||
5360 | temp = HC_VERSION(fotg210, | ||
5361 | fotg210_readl(fotg210, &fotg210->caps->hc_capbase)); | ||
5362 | fotg210_info(fotg210, | ||
5363 | "USB %x.%x started, EHCI %x.%02x\n", | ||
5364 | ((fotg210->sbrn & 0xf0)>>4), (fotg210->sbrn & 0x0f), | ||
5365 | temp >> 8, temp & 0xff); | ||
5366 | |||
5367 | fotg210_writel(fotg210, INTR_MASK, | ||
5368 | &fotg210->regs->intr_enable); /* Turn On Interrupts */ | ||
5369 | |||
5370 | /* GRR this is run-once init(), being done every time the HC starts. | ||
5371 | * So long as they're part of class devices, we can't do it init() | ||
5372 | * since the class device isn't created that early. | ||
5373 | */ | ||
5374 | create_debug_files(fotg210); | ||
5375 | create_sysfs_files(fotg210); | ||
5376 | |||
5377 | return 0; | ||
5378 | } | ||
5379 | |||
5380 | static int fotg210_setup(struct usb_hcd *hcd) | ||
5381 | { | ||
5382 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5383 | int retval; | ||
5384 | |||
5385 | fotg210->regs = (void __iomem *)fotg210->caps + | ||
5386 | HC_LENGTH(fotg210, | ||
5387 | fotg210_readl(fotg210, &fotg210->caps->hc_capbase)); | ||
5388 | dbg_hcs_params(fotg210, "reset"); | ||
5389 | dbg_hcc_params(fotg210, "reset"); | ||
5390 | |||
5391 | /* cache this readonly data; minimize chip reads */ | ||
5392 | fotg210->hcs_params = fotg210_readl(fotg210, | ||
5393 | &fotg210->caps->hcs_params); | ||
5394 | |||
5395 | fotg210->sbrn = HCD_USB2; | ||
5396 | |||
5397 | /* data structure init */ | ||
5398 | retval = hcd_fotg210_init(hcd); | ||
5399 | if (retval) | ||
5400 | return retval; | ||
5401 | |||
5402 | retval = fotg210_halt(fotg210); | ||
5403 | if (retval) | ||
5404 | return retval; | ||
5405 | |||
5406 | fotg210_reset(fotg210); | ||
5407 | |||
5408 | return 0; | ||
5409 | } | ||
5410 | |||
5411 | /*-------------------------------------------------------------------------*/ | ||
5412 | |||
5413 | static irqreturn_t fotg210_irq(struct usb_hcd *hcd) | ||
5414 | { | ||
5415 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5416 | u32 status, masked_status, pcd_status = 0, cmd; | ||
5417 | int bh; | ||
5418 | |||
5419 | spin_lock(&fotg210->lock); | ||
5420 | |||
5421 | status = fotg210_readl(fotg210, &fotg210->regs->status); | ||
5422 | |||
5423 | /* e.g. cardbus physical eject */ | ||
5424 | if (status == ~(u32) 0) { | ||
5425 | fotg210_dbg(fotg210, "device removed\n"); | ||
5426 | goto dead; | ||
5427 | } | ||
5428 | |||
5429 | /* | ||
5430 | * We don't use STS_FLR, but some controllers don't like it to | ||
5431 | * remain on, so mask it out along with the other status bits. | ||
5432 | */ | ||
5433 | masked_status = status & (INTR_MASK | STS_FLR); | ||
5434 | |||
5435 | /* Shared IRQ? */ | ||
5436 | if (!masked_status || | ||
5437 | unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) { | ||
5438 | spin_unlock(&fotg210->lock); | ||
5439 | return IRQ_NONE; | ||
5440 | } | ||
5441 | |||
5442 | /* clear (just) interrupts */ | ||
5443 | fotg210_writel(fotg210, masked_status, &fotg210->regs->status); | ||
5444 | cmd = fotg210_readl(fotg210, &fotg210->regs->command); | ||
5445 | bh = 0; | ||
5446 | |||
5447 | #ifdef VERBOSE_DEBUG | ||
5448 | /* unrequested/ignored: Frame List Rollover */ | ||
5449 | dbg_status(fotg210, "irq", status); | ||
5450 | #endif | ||
5451 | |||
5452 | /* INT, ERR, and IAA interrupt rates can be throttled */ | ||
5453 | |||
5454 | /* normal [4.15.1.2] or error [4.15.1.1] completion */ | ||
5455 | if (likely((status & (STS_INT|STS_ERR)) != 0)) { | ||
5456 | if (likely((status & STS_ERR) == 0)) | ||
5457 | COUNT(fotg210->stats.normal); | ||
5458 | else | ||
5459 | COUNT(fotg210->stats.error); | ||
5460 | bh = 1; | ||
5461 | } | ||
5462 | |||
5463 | /* complete the unlinking of some qh [4.15.2.3] */ | ||
5464 | if (status & STS_IAA) { | ||
5465 | |||
5466 | /* Turn off the IAA watchdog */ | ||
5467 | fotg210->enabled_hrtimer_events &= | ||
5468 | ~BIT(FOTG210_HRTIMER_IAA_WATCHDOG); | ||
5469 | |||
5470 | /* | ||
5471 | * Mild optimization: Allow another IAAD to reset the | ||
5472 | * hrtimer, if one occurs before the next expiration. | ||
5473 | * In theory we could always cancel the hrtimer, but | ||
5474 | * tests show that about half the time it will be reset | ||
5475 | * for some other event anyway. | ||
5476 | */ | ||
5477 | if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG) | ||
5478 | ++fotg210->next_hrtimer_event; | ||
5479 | |||
5480 | /* guard against (alleged) silicon errata */ | ||
5481 | if (cmd & CMD_IAAD) | ||
5482 | fotg210_dbg(fotg210, "IAA with IAAD still set?\n"); | ||
5483 | if (fotg210->async_iaa) { | ||
5484 | COUNT(fotg210->stats.iaa); | ||
5485 | end_unlink_async(fotg210); | ||
5486 | } else | ||
5487 | fotg210_dbg(fotg210, "IAA with nothing unlinked?\n"); | ||
5488 | } | ||
5489 | |||
5490 | /* remote wakeup [4.3.1] */ | ||
5491 | if (status & STS_PCD) { | ||
5492 | int pstatus; | ||
5493 | u32 __iomem *status_reg = &fotg210->regs->port_status; | ||
5494 | |||
5495 | /* kick root hub later */ | ||
5496 | pcd_status = status; | ||
5497 | |||
5498 | /* resume root hub? */ | ||
5499 | if (fotg210->rh_state == FOTG210_RH_SUSPENDED) | ||
5500 | usb_hcd_resume_root_hub(hcd); | ||
5501 | |||
5502 | pstatus = fotg210_readl(fotg210, status_reg); | ||
5503 | |||
5504 | if (test_bit(0, &fotg210->suspended_ports) && | ||
5505 | ((pstatus & PORT_RESUME) || | ||
5506 | !(pstatus & PORT_SUSPEND)) && | ||
5507 | (pstatus & PORT_PE) && | ||
5508 | fotg210->reset_done[0] == 0) { | ||
5509 | |||
5510 | /* start 20 msec resume signaling from this port, | ||
5511 | * and make khubd collect PORT_STAT_C_SUSPEND to | ||
5512 | * stop that signaling. Use 5 ms extra for safety, | ||
5513 | * like usb_port_resume() does. | ||
5514 | */ | ||
5515 | fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25); | ||
5516 | set_bit(0, &fotg210->resuming_ports); | ||
5517 | fotg210_dbg(fotg210, "port 1 remote wakeup\n"); | ||
5518 | mod_timer(&hcd->rh_timer, fotg210->reset_done[0]); | ||
5519 | } | ||
5520 | } | ||
5521 | |||
5522 | /* PCI errors [4.15.2.4] */ | ||
5523 | if (unlikely((status & STS_FATAL) != 0)) { | ||
5524 | fotg210_err(fotg210, "fatal error\n"); | ||
5525 | dbg_cmd(fotg210, "fatal", cmd); | ||
5526 | dbg_status(fotg210, "fatal", status); | ||
5527 | dead: | ||
5528 | usb_hc_died(hcd); | ||
5529 | |||
5530 | /* Don't let the controller do anything more */ | ||
5531 | fotg210->shutdown = true; | ||
5532 | fotg210->rh_state = FOTG210_RH_STOPPING; | ||
5533 | fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE); | ||
5534 | fotg210_writel(fotg210, fotg210->command, | ||
5535 | &fotg210->regs->command); | ||
5536 | fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable); | ||
5537 | fotg210_handle_controller_death(fotg210); | ||
5538 | |||
5539 | /* Handle completions when the controller stops */ | ||
5540 | bh = 0; | ||
5541 | } | ||
5542 | |||
5543 | if (bh) | ||
5544 | fotg210_work(fotg210); | ||
5545 | spin_unlock(&fotg210->lock); | ||
5546 | if (pcd_status) | ||
5547 | usb_hcd_poll_rh_status(hcd); | ||
5548 | return IRQ_HANDLED; | ||
5549 | } | ||
5550 | |||
5551 | /*-------------------------------------------------------------------------*/ | ||
5552 | |||
5553 | /* | ||
5554 | * non-error returns are a promise to giveback() the urb later | ||
5555 | * we drop ownership so next owner (or urb unlink) can get it | ||
5556 | * | ||
5557 | * urb + dev is in hcd.self.controller.urb_list | ||
5558 | * we're queueing TDs onto software and hardware lists | ||
5559 | * | ||
5560 | * hcd-specific init for hcpriv hasn't been done yet | ||
5561 | * | ||
5562 | * NOTE: control, bulk, and interrupt share the same code to append TDs | ||
5563 | * to a (possibly active) QH, and the same QH scanning code. | ||
5564 | */ | ||
5565 | static int fotg210_urb_enqueue( | ||
5566 | struct usb_hcd *hcd, | ||
5567 | struct urb *urb, | ||
5568 | gfp_t mem_flags | ||
5569 | ) { | ||
5570 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5571 | struct list_head qtd_list; | ||
5572 | |||
5573 | INIT_LIST_HEAD(&qtd_list); | ||
5574 | |||
5575 | switch (usb_pipetype(urb->pipe)) { | ||
5576 | case PIPE_CONTROL: | ||
5577 | /* qh_completions() code doesn't handle all the fault cases | ||
5578 | * in multi-TD control transfers. Even 1KB is rare anyway. | ||
5579 | */ | ||
5580 | if (urb->transfer_buffer_length > (16 * 1024)) | ||
5581 | return -EMSGSIZE; | ||
5582 | /* FALLTHROUGH */ | ||
5583 | /* case PIPE_BULK: */ | ||
5584 | default: | ||
5585 | if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags)) | ||
5586 | return -ENOMEM; | ||
5587 | return submit_async(fotg210, urb, &qtd_list, mem_flags); | ||
5588 | |||
5589 | case PIPE_INTERRUPT: | ||
5590 | if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags)) | ||
5591 | return -ENOMEM; | ||
5592 | return intr_submit(fotg210, urb, &qtd_list, mem_flags); | ||
5593 | |||
5594 | case PIPE_ISOCHRONOUS: | ||
5595 | return itd_submit(fotg210, urb, mem_flags); | ||
5596 | } | ||
5597 | } | ||
5598 | |||
5599 | /* remove from hardware lists | ||
5600 | * completions normally happen asynchronously | ||
5601 | */ | ||
5602 | |||
5603 | static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | ||
5604 | { | ||
5605 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5606 | struct fotg210_qh *qh; | ||
5607 | unsigned long flags; | ||
5608 | int rc; | ||
5609 | |||
5610 | spin_lock_irqsave(&fotg210->lock, flags); | ||
5611 | rc = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
5612 | if (rc) | ||
5613 | goto done; | ||
5614 | |||
5615 | switch (usb_pipetype(urb->pipe)) { | ||
5616 | /* case PIPE_CONTROL: */ | ||
5617 | /* case PIPE_BULK:*/ | ||
5618 | default: | ||
5619 | qh = (struct fotg210_qh *) urb->hcpriv; | ||
5620 | if (!qh) | ||
5621 | break; | ||
5622 | switch (qh->qh_state) { | ||
5623 | case QH_STATE_LINKED: | ||
5624 | case QH_STATE_COMPLETING: | ||
5625 | start_unlink_async(fotg210, qh); | ||
5626 | break; | ||
5627 | case QH_STATE_UNLINK: | ||
5628 | case QH_STATE_UNLINK_WAIT: | ||
5629 | /* already started */ | ||
5630 | break; | ||
5631 | case QH_STATE_IDLE: | ||
5632 | /* QH might be waiting for a Clear-TT-Buffer */ | ||
5633 | qh_completions(fotg210, qh); | ||
5634 | break; | ||
5635 | } | ||
5636 | break; | ||
5637 | |||
5638 | case PIPE_INTERRUPT: | ||
5639 | qh = (struct fotg210_qh *) urb->hcpriv; | ||
5640 | if (!qh) | ||
5641 | break; | ||
5642 | switch (qh->qh_state) { | ||
5643 | case QH_STATE_LINKED: | ||
5644 | case QH_STATE_COMPLETING: | ||
5645 | start_unlink_intr(fotg210, qh); | ||
5646 | break; | ||
5647 | case QH_STATE_IDLE: | ||
5648 | qh_completions(fotg210, qh); | ||
5649 | break; | ||
5650 | default: | ||
5651 | fotg210_dbg(fotg210, "bogus qh %p state %d\n", | ||
5652 | qh, qh->qh_state); | ||
5653 | goto done; | ||
5654 | } | ||
5655 | break; | ||
5656 | |||
5657 | case PIPE_ISOCHRONOUS: | ||
5658 | /* itd... */ | ||
5659 | |||
5660 | /* wait till next completion, do it then. */ | ||
5661 | /* completion irqs can wait up to 1024 msec, */ | ||
5662 | break; | ||
5663 | } | ||
5664 | done: | ||
5665 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
5666 | return rc; | ||
5667 | } | ||
5668 | |||
5669 | /*-------------------------------------------------------------------------*/ | ||
5670 | |||
5671 | /* bulk qh holds the data toggle */ | ||
5672 | |||
5673 | static void | ||
5674 | fotg210_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) | ||
5675 | { | ||
5676 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5677 | unsigned long flags; | ||
5678 | struct fotg210_qh *qh, *tmp; | ||
5679 | |||
5680 | /* ASSERT: any requests/urbs are being unlinked */ | ||
5681 | /* ASSERT: nobody can be submitting urbs for this any more */ | ||
5682 | |||
5683 | rescan: | ||
5684 | spin_lock_irqsave(&fotg210->lock, flags); | ||
5685 | qh = ep->hcpriv; | ||
5686 | if (!qh) | ||
5687 | goto done; | ||
5688 | |||
5689 | /* endpoints can be iso streams. for now, we don't | ||
5690 | * accelerate iso completions ... so spin a while. | ||
5691 | */ | ||
5692 | if (qh->hw == NULL) { | ||
5693 | struct fotg210_iso_stream *stream = ep->hcpriv; | ||
5694 | |||
5695 | if (!list_empty(&stream->td_list)) | ||
5696 | goto idle_timeout; | ||
5697 | |||
5698 | /* BUG_ON(!list_empty(&stream->free_list)); */ | ||
5699 | kfree(stream); | ||
5700 | goto done; | ||
5701 | } | ||
5702 | |||
5703 | if (fotg210->rh_state < FOTG210_RH_RUNNING) | ||
5704 | qh->qh_state = QH_STATE_IDLE; | ||
5705 | switch (qh->qh_state) { | ||
5706 | case QH_STATE_LINKED: | ||
5707 | case QH_STATE_COMPLETING: | ||
5708 | for (tmp = fotg210->async->qh_next.qh; | ||
5709 | tmp && tmp != qh; | ||
5710 | tmp = tmp->qh_next.qh) | ||
5711 | continue; | ||
5712 | /* periodic qh self-unlinks on empty, and a COMPLETING qh | ||
5713 | * may already be unlinked. | ||
5714 | */ | ||
5715 | if (tmp) | ||
5716 | start_unlink_async(fotg210, qh); | ||
5717 | /* FALL THROUGH */ | ||
5718 | case QH_STATE_UNLINK: /* wait for hw to finish? */ | ||
5719 | case QH_STATE_UNLINK_WAIT: | ||
5720 | idle_timeout: | ||
5721 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
5722 | schedule_timeout_uninterruptible(1); | ||
5723 | goto rescan; | ||
5724 | case QH_STATE_IDLE: /* fully unlinked */ | ||
5725 | if (qh->clearing_tt) | ||
5726 | goto idle_timeout; | ||
5727 | if (list_empty(&qh->qtd_list)) { | ||
5728 | qh_destroy(fotg210, qh); | ||
5729 | break; | ||
5730 | } | ||
5731 | /* else FALL THROUGH */ | ||
5732 | default: | ||
5733 | /* caller was supposed to have unlinked any requests; | ||
5734 | * that's not our job. just leak this memory. | ||
5735 | */ | ||
5736 | fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n", | ||
5737 | qh, ep->desc.bEndpointAddress, qh->qh_state, | ||
5738 | list_empty(&qh->qtd_list) ? "" : "(has tds)"); | ||
5739 | break; | ||
5740 | } | ||
5741 | done: | ||
5742 | ep->hcpriv = NULL; | ||
5743 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
5744 | } | ||
5745 | |||
5746 | static void | ||
5747 | fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) | ||
5748 | { | ||
5749 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5750 | struct fotg210_qh *qh; | ||
5751 | int eptype = usb_endpoint_type(&ep->desc); | ||
5752 | int epnum = usb_endpoint_num(&ep->desc); | ||
5753 | int is_out = usb_endpoint_dir_out(&ep->desc); | ||
5754 | unsigned long flags; | ||
5755 | |||
5756 | if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) | ||
5757 | return; | ||
5758 | |||
5759 | spin_lock_irqsave(&fotg210->lock, flags); | ||
5760 | qh = ep->hcpriv; | ||
5761 | |||
5762 | /* For Bulk and Interrupt endpoints we maintain the toggle state | ||
5763 | * in the hardware; the toggle bits in udev aren't used at all. | ||
5764 | * When an endpoint is reset by usb_clear_halt() we must reset | ||
5765 | * the toggle bit in the QH. | ||
5766 | */ | ||
5767 | if (qh) { | ||
5768 | usb_settoggle(qh->dev, epnum, is_out, 0); | ||
5769 | if (!list_empty(&qh->qtd_list)) { | ||
5770 | WARN_ONCE(1, "clear_halt for a busy endpoint\n"); | ||
5771 | } else if (qh->qh_state == QH_STATE_LINKED || | ||
5772 | qh->qh_state == QH_STATE_COMPLETING) { | ||
5773 | |||
5774 | /* The toggle value in the QH can't be updated | ||
5775 | * while the QH is active. Unlink it now; | ||
5776 | * re-linking will call qh_refresh(). | ||
5777 | */ | ||
5778 | if (eptype == USB_ENDPOINT_XFER_BULK) | ||
5779 | start_unlink_async(fotg210, qh); | ||
5780 | else | ||
5781 | start_unlink_intr(fotg210, qh); | ||
5782 | } | ||
5783 | } | ||
5784 | spin_unlock_irqrestore(&fotg210->lock, flags); | ||
5785 | } | ||
5786 | |||
5787 | static int fotg210_get_frame(struct usb_hcd *hcd) | ||
5788 | { | ||
5789 | struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); | ||
5790 | return (fotg210_read_frame_index(fotg210) >> 3) % | ||
5791 | fotg210->periodic_size; | ||
5792 | } | ||
5793 | |||
5794 | /*-------------------------------------------------------------------------*/ | ||
5795 | |||
5796 | /* | ||
5797 | * The EHCI in ChipIdea HDRC cannot be a separate module or device, | ||
5798 | * because its registers (and irq) are shared between host/gadget/otg | ||
5799 | * functions and in order to facilitate role switching we cannot | ||
5800 | * give the fotg210 driver exclusive access to those. | ||
5801 | */ | ||
5802 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
5803 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
5804 | MODULE_LICENSE("GPL"); | ||
5805 | |||
5806 | static const struct hc_driver fotg210_fotg210_hc_driver = { | ||
5807 | .description = hcd_name, | ||
5808 | .product_desc = "Faraday USB2.0 Host Controller", | ||
5809 | .hcd_priv_size = sizeof(struct fotg210_hcd), | ||
5810 | |||
5811 | /* | ||
5812 | * generic hardware linkage | ||
5813 | */ | ||
5814 | .irq = fotg210_irq, | ||
5815 | .flags = HCD_MEMORY | HCD_USB2, | ||
5816 | |||
5817 | /* | ||
5818 | * basic lifecycle operations | ||
5819 | */ | ||
5820 | .reset = hcd_fotg210_init, | ||
5821 | .start = fotg210_run, | ||
5822 | .stop = fotg210_stop, | ||
5823 | .shutdown = fotg210_shutdown, | ||
5824 | |||
5825 | /* | ||
5826 | * managing i/o requests and associated device resources | ||
5827 | */ | ||
5828 | .urb_enqueue = fotg210_urb_enqueue, | ||
5829 | .urb_dequeue = fotg210_urb_dequeue, | ||
5830 | .endpoint_disable = fotg210_endpoint_disable, | ||
5831 | .endpoint_reset = fotg210_endpoint_reset, | ||
5832 | |||
5833 | /* | ||
5834 | * scheduling support | ||
5835 | */ | ||
5836 | .get_frame_number = fotg210_get_frame, | ||
5837 | |||
5838 | /* | ||
5839 | * root hub support | ||
5840 | */ | ||
5841 | .hub_status_data = fotg210_hub_status_data, | ||
5842 | .hub_control = fotg210_hub_control, | ||
5843 | .bus_suspend = fotg210_bus_suspend, | ||
5844 | .bus_resume = fotg210_bus_resume, | ||
5845 | |||
5846 | .relinquish_port = fotg210_relinquish_port, | ||
5847 | .port_handed_over = fotg210_port_handed_over, | ||
5848 | |||
5849 | .clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete, | ||
5850 | }; | ||
5851 | |||
5852 | static void fotg210_init(struct fotg210_hcd *fotg210) | ||
5853 | { | ||
5854 | u32 value; | ||
5855 | |||
5856 | iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY, | ||
5857 | &fotg210->regs->gmir); | ||
5858 | |||
5859 | value = ioread32(&fotg210->regs->otgcsr); | ||
5860 | value &= ~OTGCSR_A_BUS_DROP; | ||
5861 | value |= OTGCSR_A_BUS_REQ; | ||
5862 | iowrite32(value, &fotg210->regs->otgcsr); | ||
5863 | } | ||
5864 | |||
5865 | /** | ||
5866 | * fotg210_hcd_probe - initialize faraday FOTG210 HCDs | ||
5867 | * | ||
5868 | * Allocates basic resources for this USB host controller, and | ||
5869 | * then invokes the start() method for the HCD associated with it | ||
5870 | * through the hotplug entry's driver_data. | ||
5871 | */ | ||
5872 | static int fotg210_hcd_probe(struct platform_device *pdev) | ||
5873 | { | ||
5874 | struct device *dev = &pdev->dev; | ||
5875 | struct usb_hcd *hcd; | ||
5876 | struct resource *res; | ||
5877 | int irq; | ||
5878 | int retval = -ENODEV; | ||
5879 | struct fotg210_hcd *fotg210; | ||
5880 | |||
5881 | if (usb_disabled()) | ||
5882 | return -ENODEV; | ||
5883 | |||
5884 | pdev->dev.power.power_state = PMSG_ON; | ||
5885 | |||
5886 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
5887 | if (!res) { | ||
5888 | dev_err(dev, | ||
5889 | "Found HC with no IRQ. Check %s setup!\n", | ||
5890 | dev_name(dev)); | ||
5891 | return -ENODEV; | ||
5892 | } | ||
5893 | |||
5894 | irq = res->start; | ||
5895 | |||
5896 | hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev, | ||
5897 | dev_name(dev)); | ||
5898 | if (!hcd) { | ||
5899 | dev_err(dev, "failed to create hcd with err %d\n", retval); | ||
5900 | retval = -ENOMEM; | ||
5901 | goto fail_create_hcd; | ||
5902 | } | ||
5903 | |||
5904 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
5905 | if (!res) { | ||
5906 | dev_err(dev, | ||
5907 | "Found HC with no register addr. Check %s setup!\n", | ||
5908 | dev_name(dev)); | ||
5909 | retval = -ENODEV; | ||
5910 | goto fail_request_resource; | ||
5911 | } | ||
5912 | |||
5913 | hcd->rsrc_start = res->start; | ||
5914 | hcd->rsrc_len = resource_size(res); | ||
5915 | hcd->has_tt = 1; | ||
5916 | |||
5917 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, | ||
5918 | fotg210_fotg210_hc_driver.description)) { | ||
5919 | dev_dbg(dev, "controller already in use\n"); | ||
5920 | retval = -EBUSY; | ||
5921 | goto fail_request_resource; | ||
5922 | } | ||
5923 | |||
5924 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | ||
5925 | if (!res) { | ||
5926 | dev_err(dev, | ||
5927 | "Found HC with no register addr. Check %s setup!\n", | ||
5928 | dev_name(dev)); | ||
5929 | retval = -ENODEV; | ||
5930 | goto fail_request_resource; | ||
5931 | } | ||
5932 | |||
5933 | hcd->regs = ioremap_nocache(res->start, resource_size(res)); | ||
5934 | if (hcd->regs == NULL) { | ||
5935 | dev_dbg(dev, "error mapping memory\n"); | ||
5936 | retval = -EFAULT; | ||
5937 | goto fail_ioremap; | ||
5938 | } | ||
5939 | |||
5940 | fotg210 = hcd_to_fotg210(hcd); | ||
5941 | |||
5942 | fotg210->caps = hcd->regs; | ||
5943 | |||
5944 | retval = fotg210_setup(hcd); | ||
5945 | if (retval) | ||
5946 | goto fail_add_hcd; | ||
5947 | |||
5948 | fotg210_init(fotg210); | ||
5949 | |||
5950 | retval = usb_add_hcd(hcd, irq, IRQF_SHARED); | ||
5951 | if (retval) { | ||
5952 | dev_err(dev, "failed to add hcd with err %d\n", retval); | ||
5953 | goto fail_add_hcd; | ||
5954 | } | ||
5955 | |||
5956 | return retval; | ||
5957 | |||
5958 | fail_add_hcd: | ||
5959 | iounmap(hcd->regs); | ||
5960 | fail_ioremap: | ||
5961 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
5962 | fail_request_resource: | ||
5963 | usb_put_hcd(hcd); | ||
5964 | fail_create_hcd: | ||
5965 | dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval); | ||
5966 | return retval; | ||
5967 | } | ||
5968 | |||
5969 | /** | ||
5970 | * fotg210_hcd_remove - shutdown processing for EHCI HCDs | ||
5971 | * @dev: USB Host Controller being removed | ||
5972 | * | ||
5973 | */ | ||
5974 | static int fotg210_hcd_remove(struct platform_device *pdev) | ||
5975 | { | ||
5976 | struct device *dev = &pdev->dev; | ||
5977 | struct usb_hcd *hcd = dev_get_drvdata(dev); | ||
5978 | |||
5979 | if (!hcd) | ||
5980 | return 0; | ||
5981 | |||
5982 | usb_remove_hcd(hcd); | ||
5983 | iounmap(hcd->regs); | ||
5984 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
5985 | usb_put_hcd(hcd); | ||
5986 | |||
5987 | return 0; | ||
5988 | } | ||
5989 | |||
5990 | static struct platform_driver fotg210_hcd_driver = { | ||
5991 | .driver = { | ||
5992 | .name = "fotg210-hcd", | ||
5993 | }, | ||
5994 | .probe = fotg210_hcd_probe, | ||
5995 | .remove = fotg210_hcd_remove, | ||
5996 | }; | ||
5997 | |||
5998 | static int __init fotg210_hcd_init(void) | ||
5999 | { | ||
6000 | int retval = 0; | ||
6001 | |||
6002 | if (usb_disabled()) | ||
6003 | return -ENODEV; | ||
6004 | |||
6005 | pr_info("%s: " DRIVER_DESC "\n", hcd_name); | ||
6006 | set_bit(USB_EHCI_LOADED, &usb_hcds_loaded); | ||
6007 | if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) || | ||
6008 | test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) | ||
6009 | pr_warn(KERN_WARNING "Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n"); | ||
6010 | |||
6011 | pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n", | ||
6012 | hcd_name, | ||
6013 | sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd), | ||
6014 | sizeof(struct fotg210_itd)); | ||
6015 | |||
6016 | #ifdef DEBUG | ||
6017 | fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root); | ||
6018 | if (!fotg210_debug_root) { | ||
6019 | retval = -ENOENT; | ||
6020 | goto err_debug; | ||
6021 | } | ||
6022 | #endif | ||
6023 | |||
6024 | retval = platform_driver_register(&fotg210_hcd_driver); | ||
6025 | if (retval < 0) | ||
6026 | goto clean; | ||
6027 | return retval; | ||
6028 | |||
6029 | platform_driver_unregister(&fotg210_hcd_driver); | ||
6030 | clean: | ||
6031 | #ifdef DEBUG | ||
6032 | debugfs_remove(fotg210_debug_root); | ||
6033 | fotg210_debug_root = NULL; | ||
6034 | err_debug: | ||
6035 | #endif | ||
6036 | clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); | ||
6037 | return retval; | ||
6038 | } | ||
6039 | module_init(fotg210_hcd_init); | ||
6040 | |||
6041 | static void __exit fotg210_hcd_cleanup(void) | ||
6042 | { | ||
6043 | platform_driver_unregister(&fotg210_hcd_driver); | ||
6044 | #ifdef DEBUG | ||
6045 | debugfs_remove(fotg210_debug_root); | ||
6046 | #endif | ||
6047 | clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); | ||
6048 | } | ||
6049 | module_exit(fotg210_hcd_cleanup); | ||
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h new file mode 100644 index 000000000000..8920f9d32564 --- /dev/null +++ b/drivers/usb/host/fotg210.h | |||
@@ -0,0 +1,750 @@ | |||
1 | #ifndef __LINUX_FOTG210_H | ||
2 | #define __LINUX_FOTG210_H | ||
3 | |||
4 | /* definitions used for the EHCI driver */ | ||
5 | |||
6 | /* | ||
7 | * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to | ||
8 | * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on | ||
9 | * the host controller implementation. | ||
10 | * | ||
11 | * To facilitate the strongest possible byte-order checking from "sparse" | ||
12 | * and so on, we use __leXX unless that's not practical. | ||
13 | */ | ||
14 | #define __hc32 __le32 | ||
15 | #define __hc16 __le16 | ||
16 | |||
17 | /* statistics can be kept for tuning/monitoring */ | ||
18 | struct fotg210_stats { | ||
19 | /* irq usage */ | ||
20 | unsigned long normal; | ||
21 | unsigned long error; | ||
22 | unsigned long iaa; | ||
23 | unsigned long lost_iaa; | ||
24 | |||
25 | /* termination of urbs from core */ | ||
26 | unsigned long complete; | ||
27 | unsigned long unlink; | ||
28 | }; | ||
29 | |||
30 | /* fotg210_hcd->lock guards shared data against other CPUs: | ||
31 | * fotg210_hcd: async, unlink, periodic (and shadow), ... | ||
32 | * usb_host_endpoint: hcpriv | ||
33 | * fotg210_qh: qh_next, qtd_list | ||
34 | * fotg210_qtd: qtd_list | ||
35 | * | ||
36 | * Also, hold this lock when talking to HC registers or | ||
37 | * when updating hw_* fields in shared qh/qtd/... structures. | ||
38 | */ | ||
39 | |||
40 | #define FOTG210_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */ | ||
41 | |||
42 | /* | ||
43 | * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the | ||
44 | * controller may be doing DMA. Lower values mean there's no DMA. | ||
45 | */ | ||
46 | enum fotg210_rh_state { | ||
47 | FOTG210_RH_HALTED, | ||
48 | FOTG210_RH_SUSPENDED, | ||
49 | FOTG210_RH_RUNNING, | ||
50 | FOTG210_RH_STOPPING | ||
51 | }; | ||
52 | |||
53 | /* | ||
54 | * Timer events, ordered by increasing delay length. | ||
55 | * Always update event_delays_ns[] and event_handlers[] (defined in | ||
56 | * ehci-timer.c) in parallel with this list. | ||
57 | */ | ||
58 | enum fotg210_hrtimer_event { | ||
59 | FOTG210_HRTIMER_POLL_ASS, /* Poll for async schedule off */ | ||
60 | FOTG210_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */ | ||
61 | FOTG210_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */ | ||
62 | FOTG210_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */ | ||
63 | FOTG210_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */ | ||
64 | FOTG210_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */ | ||
65 | FOTG210_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */ | ||
66 | FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */ | ||
67 | FOTG210_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */ | ||
68 | FOTG210_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */ | ||
69 | FOTG210_HRTIMER_NUM_EVENTS /* Must come last */ | ||
70 | }; | ||
71 | #define FOTG210_HRTIMER_NO_EVENT 99 | ||
72 | |||
73 | struct fotg210_hcd { /* one per controller */ | ||
74 | /* timing support */ | ||
75 | enum fotg210_hrtimer_event next_hrtimer_event; | ||
76 | unsigned enabled_hrtimer_events; | ||
77 | ktime_t hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS]; | ||
78 | struct hrtimer hrtimer; | ||
79 | |||
80 | int PSS_poll_count; | ||
81 | int ASS_poll_count; | ||
82 | int died_poll_count; | ||
83 | |||
84 | /* glue to PCI and HCD framework */ | ||
85 | struct fotg210_caps __iomem *caps; | ||
86 | struct fotg210_regs __iomem *regs; | ||
87 | struct fotg210_dbg_port __iomem *debug; | ||
88 | |||
89 | __u32 hcs_params; /* cached register copy */ | ||
90 | spinlock_t lock; | ||
91 | enum fotg210_rh_state rh_state; | ||
92 | |||
93 | /* general schedule support */ | ||
94 | bool scanning:1; | ||
95 | bool need_rescan:1; | ||
96 | bool intr_unlinking:1; | ||
97 | bool async_unlinking:1; | ||
98 | bool shutdown:1; | ||
99 | struct fotg210_qh *qh_scan_next; | ||
100 | |||
101 | /* async schedule support */ | ||
102 | struct fotg210_qh *async; | ||
103 | struct fotg210_qh *dummy; /* For AMD quirk use */ | ||
104 | struct fotg210_qh *async_unlink; | ||
105 | struct fotg210_qh *async_unlink_last; | ||
106 | struct fotg210_qh *async_iaa; | ||
107 | unsigned async_unlink_cycle; | ||
108 | unsigned async_count; /* async activity count */ | ||
109 | |||
110 | /* periodic schedule support */ | ||
111 | #define DEFAULT_I_TDPS 1024 /* some HCs can do less */ | ||
112 | unsigned periodic_size; | ||
113 | __hc32 *periodic; /* hw periodic table */ | ||
114 | dma_addr_t periodic_dma; | ||
115 | struct list_head intr_qh_list; | ||
116 | unsigned i_thresh; /* uframes HC might cache */ | ||
117 | |||
118 | union fotg210_shadow *pshadow; /* mirror hw periodic table */ | ||
119 | struct fotg210_qh *intr_unlink; | ||
120 | struct fotg210_qh *intr_unlink_last; | ||
121 | unsigned intr_unlink_cycle; | ||
122 | unsigned now_frame; /* frame from HC hardware */ | ||
123 | unsigned next_frame; /* scan periodic, start here */ | ||
124 | unsigned intr_count; /* intr activity count */ | ||
125 | unsigned isoc_count; /* isoc activity count */ | ||
126 | unsigned periodic_count; /* periodic activity count */ | ||
127 | /* max periodic time per uframe */ | ||
128 | unsigned uframe_periodic_max; | ||
129 | |||
130 | |||
131 | /* list of itds completed while now_frame was still active */ | ||
132 | struct list_head cached_itd_list; | ||
133 | struct fotg210_itd *last_itd_to_free; | ||
134 | |||
135 | /* per root hub port */ | ||
136 | unsigned long reset_done[FOTG210_MAX_ROOT_PORTS]; | ||
137 | |||
138 | /* bit vectors (one bit per port) */ | ||
139 | unsigned long bus_suspended; /* which ports were | ||
140 | already suspended at the start of a bus suspend */ | ||
141 | unsigned long companion_ports; /* which ports are | ||
142 | dedicated to the companion controller */ | ||
143 | unsigned long owned_ports; /* which ports are | ||
144 | owned by the companion during a bus suspend */ | ||
145 | unsigned long port_c_suspend; /* which ports have | ||
146 | the change-suspend feature turned on */ | ||
147 | unsigned long suspended_ports; /* which ports are | ||
148 | suspended */ | ||
149 | unsigned long resuming_ports; /* which ports have | ||
150 | started to resume */ | ||
151 | |||
152 | /* per-HC memory pools (could be per-bus, but ...) */ | ||
153 | struct dma_pool *qh_pool; /* qh per active urb */ | ||
154 | struct dma_pool *qtd_pool; /* one or more per qh */ | ||
155 | struct dma_pool *itd_pool; /* itd per iso urb */ | ||
156 | |||
157 | unsigned random_frame; | ||
158 | unsigned long next_statechange; | ||
159 | ktime_t last_periodic_enable; | ||
160 | u32 command; | ||
161 | |||
162 | /* SILICON QUIRKS */ | ||
163 | unsigned need_io_watchdog:1; | ||
164 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ | ||
165 | |||
166 | u8 sbrn; /* packed release number */ | ||
167 | |||
168 | /* irq statistics */ | ||
169 | #ifdef FOTG210_STATS | ||
170 | struct fotg210_stats stats; | ||
171 | # define COUNT(x) ((x)++) | ||
172 | #else | ||
173 | # define COUNT(x) | ||
174 | #endif | ||
175 | |||
176 | /* debug files */ | ||
177 | #ifdef DEBUG | ||
178 | struct dentry *debug_dir; | ||
179 | #endif | ||
180 | }; | ||
181 | |||
182 | /* convert between an HCD pointer and the corresponding FOTG210_HCD */ | ||
183 | static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd) | ||
184 | { | ||
185 | return (struct fotg210_hcd *)(hcd->hcd_priv); | ||
186 | } | ||
187 | static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210) | ||
188 | { | ||
189 | return container_of((void *) fotg210, struct usb_hcd, hcd_priv); | ||
190 | } | ||
191 | |||
192 | /*-------------------------------------------------------------------------*/ | ||
193 | |||
194 | /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ | ||
195 | |||
196 | /* Section 2.2 Host Controller Capability Registers */ | ||
197 | struct fotg210_caps { | ||
198 | /* these fields are specified as 8 and 16 bit registers, | ||
199 | * but some hosts can't perform 8 or 16 bit PCI accesses. | ||
200 | * some hosts treat caplength and hciversion as parts of a 32-bit | ||
201 | * register, others treat them as two separate registers, this | ||
202 | * affects the memory map for big endian controllers. | ||
203 | */ | ||
204 | u32 hc_capbase; | ||
205 | #define HC_LENGTH(fotg210, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \ | ||
206 | (fotg210_big_endian_capbase(fotg210) ? 24 : 0))) | ||
207 | #define HC_VERSION(fotg210, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \ | ||
208 | (fotg210_big_endian_capbase(fotg210) ? 0 : 16))) | ||
209 | u32 hcs_params; /* HCSPARAMS - offset 0x4 */ | ||
210 | #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ | ||
211 | |||
212 | u32 hcc_params; /* HCCPARAMS - offset 0x8 */ | ||
213 | #define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */ | ||
214 | #define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/ | ||
215 | u8 portroute[8]; /* nibbles for routing - offset 0xC */ | ||
216 | }; | ||
217 | |||
218 | |||
219 | /* Section 2.3 Host Controller Operational Registers */ | ||
220 | struct fotg210_regs { | ||
221 | |||
222 | /* USBCMD: offset 0x00 */ | ||
223 | u32 command; | ||
224 | |||
225 | /* EHCI 1.1 addendum */ | ||
226 | /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ | ||
227 | #define CMD_PARK (1<<11) /* enable "park" on async qh */ | ||
228 | #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ | ||
229 | #define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */ | ||
230 | #define CMD_ASE (1<<5) /* async schedule enable */ | ||
231 | #define CMD_PSE (1<<4) /* periodic schedule enable */ | ||
232 | /* 3:2 is periodic frame list size */ | ||
233 | #define CMD_RESET (1<<1) /* reset HC not bus */ | ||
234 | #define CMD_RUN (1<<0) /* start/stop HC */ | ||
235 | |||
236 | /* USBSTS: offset 0x04 */ | ||
237 | u32 status; | ||
238 | #define STS_ASS (1<<15) /* Async Schedule Status */ | ||
239 | #define STS_PSS (1<<14) /* Periodic Schedule Status */ | ||
240 | #define STS_RECL (1<<13) /* Reclamation */ | ||
241 | #define STS_HALT (1<<12) /* Not running (any reason) */ | ||
242 | /* some bits reserved */ | ||
243 | /* these STS_* flags are also intr_enable bits (USBINTR) */ | ||
244 | #define STS_IAA (1<<5) /* Interrupted on async advance */ | ||
245 | #define STS_FATAL (1<<4) /* such as some PCI access errors */ | ||
246 | #define STS_FLR (1<<3) /* frame list rolled over */ | ||
247 | #define STS_PCD (1<<2) /* port change detect */ | ||
248 | #define STS_ERR (1<<1) /* "error" completion (overflow, ...) */ | ||
249 | #define STS_INT (1<<0) /* "normal" completion (short, ...) */ | ||
250 | |||
251 | /* USBINTR: offset 0x08 */ | ||
252 | u32 intr_enable; | ||
253 | |||
254 | /* FRINDEX: offset 0x0C */ | ||
255 | u32 frame_index; /* current microframe number */ | ||
256 | /* CTRLDSSEGMENT: offset 0x10 */ | ||
257 | u32 segment; /* address bits 63:32 if needed */ | ||
258 | /* PERIODICLISTBASE: offset 0x14 */ | ||
259 | u32 frame_list; /* points to periodic list */ | ||
260 | /* ASYNCLISTADDR: offset 0x18 */ | ||
261 | u32 async_next; /* address of next async queue head */ | ||
262 | |||
263 | u32 reserved1; | ||
264 | /* PORTSC: offset 0x20 */ | ||
265 | u32 port_status; | ||
266 | /* 31:23 reserved */ | ||
267 | #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ | ||
268 | #define PORT_RESET (1<<8) /* reset port */ | ||
269 | #define PORT_SUSPEND (1<<7) /* suspend port */ | ||
270 | #define PORT_RESUME (1<<6) /* resume it */ | ||
271 | #define PORT_PEC (1<<3) /* port enable change */ | ||
272 | #define PORT_PE (1<<2) /* port enable */ | ||
273 | #define PORT_CSC (1<<1) /* connect status change */ | ||
274 | #define PORT_CONNECT (1<<0) /* device connected */ | ||
275 | #define PORT_RWC_BITS (PORT_CSC | PORT_PEC) | ||
276 | u32 reserved2[19]; | ||
277 | |||
278 | /* OTGCSR: offet 0x70 */ | ||
279 | u32 otgcsr; | ||
280 | #define OTGCSR_HOST_SPD_TYP (3 << 22) | ||
281 | #define OTGCSR_A_BUS_DROP (1 << 5) | ||
282 | #define OTGCSR_A_BUS_REQ (1 << 4) | ||
283 | |||
284 | /* OTGISR: offset 0x74 */ | ||
285 | u32 otgisr; | ||
286 | #define OTGISR_OVC (1 << 10) | ||
287 | |||
288 | u32 reserved3[15]; | ||
289 | |||
290 | /* GMIR: offset 0xB4 */ | ||
291 | u32 gmir; | ||
292 | #define GMIR_INT_POLARITY (1 << 3) /*Active High*/ | ||
293 | #define GMIR_MHC_INT (1 << 2) | ||
294 | #define GMIR_MOTG_INT (1 << 1) | ||
295 | #define GMIR_MDEV_INT (1 << 0) | ||
296 | }; | ||
297 | |||
298 | /* Appendix C, Debug port ... intended for use with special "debug devices" | ||
299 | * that can help if there's no serial console. (nonstandard enumeration.) | ||
300 | */ | ||
301 | struct fotg210_dbg_port { | ||
302 | u32 control; | ||
303 | #define DBGP_OWNER (1<<30) | ||
304 | #define DBGP_ENABLED (1<<28) | ||
305 | #define DBGP_DONE (1<<16) | ||
306 | #define DBGP_INUSE (1<<10) | ||
307 | #define DBGP_ERRCODE(x) (((x)>>7)&0x07) | ||
308 | # define DBGP_ERR_BAD 1 | ||
309 | # define DBGP_ERR_SIGNAL 2 | ||
310 | #define DBGP_ERROR (1<<6) | ||
311 | #define DBGP_GO (1<<5) | ||
312 | #define DBGP_OUT (1<<4) | ||
313 | #define DBGP_LEN(x) (((x)>>0)&0x0f) | ||
314 | u32 pids; | ||
315 | #define DBGP_PID_GET(x) (((x)>>16)&0xff) | ||
316 | #define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) | ||
317 | u32 data03; | ||
318 | u32 data47; | ||
319 | u32 address; | ||
320 | #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) | ||
321 | }; | ||
322 | |||
323 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
324 | #include <linux/init.h> | ||
325 | extern int __init early_dbgp_init(char *s); | ||
326 | extern struct console early_dbgp_console; | ||
327 | #endif /* CONFIG_EARLY_PRINTK_DBGP */ | ||
328 | |||
329 | struct usb_hcd; | ||
330 | |||
331 | static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) | ||
332 | { | ||
333 | return 1; /* Shouldn't this be 0? */ | ||
334 | } | ||
335 | |||
336 | static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) | ||
337 | { | ||
338 | return -1; | ||
339 | } | ||
340 | |||
341 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
342 | /* Call backs from fotg210 host driver to fotg210 debug driver */ | ||
343 | extern int dbgp_external_startup(struct usb_hcd *); | ||
344 | extern int dbgp_reset_prep(struct usb_hcd *hcd); | ||
345 | #else | ||
346 | static inline int dbgp_reset_prep(struct usb_hcd *hcd) | ||
347 | { | ||
348 | return xen_dbgp_reset_prep(hcd); | ||
349 | } | ||
350 | static inline int dbgp_external_startup(struct usb_hcd *hcd) | ||
351 | { | ||
352 | return xen_dbgp_external_startup(hcd); | ||
353 | } | ||
354 | #endif | ||
355 | |||
356 | /*-------------------------------------------------------------------------*/ | ||
357 | |||
358 | #define QTD_NEXT(fotg210, dma) cpu_to_hc32(fotg210, (u32)dma) | ||
359 | |||
360 | /* | ||
361 | * EHCI Specification 0.95 Section 3.5 | ||
362 | * QTD: describe data transfer components (buffer, direction, ...) | ||
363 | * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram". | ||
364 | * | ||
365 | * These are associated only with "QH" (Queue Head) structures, | ||
366 | * used with control, bulk, and interrupt transfers. | ||
367 | */ | ||
368 | struct fotg210_qtd { | ||
369 | /* first part defined by EHCI spec */ | ||
370 | __hc32 hw_next; /* see EHCI 3.5.1 */ | ||
371 | __hc32 hw_alt_next; /* see EHCI 3.5.2 */ | ||
372 | __hc32 hw_token; /* see EHCI 3.5.3 */ | ||
373 | #define QTD_TOGGLE (1 << 31) /* data toggle */ | ||
374 | #define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff) | ||
375 | #define QTD_IOC (1 << 15) /* interrupt on complete */ | ||
376 | #define QTD_CERR(tok) (((tok)>>10) & 0x3) | ||
377 | #define QTD_PID(tok) (((tok)>>8) & 0x3) | ||
378 | #define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */ | ||
379 | #define QTD_STS_HALT (1 << 6) /* halted on error */ | ||
380 | #define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */ | ||
381 | #define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */ | ||
382 | #define QTD_STS_XACT (1 << 3) /* device gave illegal response */ | ||
383 | #define QTD_STS_MMF (1 << 2) /* incomplete split transaction */ | ||
384 | #define QTD_STS_STS (1 << 1) /* split transaction state */ | ||
385 | #define QTD_STS_PING (1 << 0) /* issue PING? */ | ||
386 | |||
387 | #define ACTIVE_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_ACTIVE) | ||
388 | #define HALT_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_HALT) | ||
389 | #define STATUS_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_STS) | ||
390 | |||
391 | __hc32 hw_buf[5]; /* see EHCI 3.5.4 */ | ||
392 | __hc32 hw_buf_hi[5]; /* Appendix B */ | ||
393 | |||
394 | /* the rest is HCD-private */ | ||
395 | dma_addr_t qtd_dma; /* qtd address */ | ||
396 | struct list_head qtd_list; /* sw qtd list */ | ||
397 | struct urb *urb; /* qtd's urb */ | ||
398 | size_t length; /* length of buffer */ | ||
399 | } __aligned(32); | ||
400 | |||
401 | /* mask NakCnt+T in qh->hw_alt_next */ | ||
402 | #define QTD_MASK(fotg210) cpu_to_hc32(fotg210, ~0x1f) | ||
403 | |||
404 | #define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1) | ||
405 | |||
406 | /*-------------------------------------------------------------------------*/ | ||
407 | |||
408 | /* type tag from {qh,itd,fstn}->hw_next */ | ||
409 | #define Q_NEXT_TYPE(fotg210, dma) ((dma) & cpu_to_hc32(fotg210, 3 << 1)) | ||
410 | |||
411 | /* | ||
412 | * Now the following defines are not converted using the | ||
413 | * cpu_to_le32() macro anymore, since we have to support | ||
414 | * "dynamic" switching between be and le support, so that the driver | ||
415 | * can be used on one system with SoC EHCI controller using big-endian | ||
416 | * descriptors as well as a normal little-endian PCI EHCI controller. | ||
417 | */ | ||
418 | /* values for that type tag */ | ||
419 | #define Q_TYPE_ITD (0 << 1) | ||
420 | #define Q_TYPE_QH (1 << 1) | ||
421 | #define Q_TYPE_SITD (2 << 1) | ||
422 | #define Q_TYPE_FSTN (3 << 1) | ||
423 | |||
424 | /* next async queue entry, or pointer to interrupt/periodic QH */ | ||
425 | #define QH_NEXT(fotg210, dma) \ | ||
426 | (cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH)) | ||
427 | |||
428 | /* for periodic/async schedules and qtd lists, mark end of list */ | ||
429 | #define FOTG210_LIST_END(fotg210) \ | ||
430 | cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */ | ||
431 | |||
432 | /* | ||
433 | * Entries in periodic shadow table are pointers to one of four kinds | ||
434 | * of data structure. That's dictated by the hardware; a type tag is | ||
435 | * encoded in the low bits of the hardware's periodic schedule. Use | ||
436 | * Q_NEXT_TYPE to get the tag. | ||
437 | * | ||
438 | * For entries in the async schedule, the type tag always says "qh". | ||
439 | */ | ||
440 | union fotg210_shadow { | ||
441 | struct fotg210_qh *qh; /* Q_TYPE_QH */ | ||
442 | struct fotg210_itd *itd; /* Q_TYPE_ITD */ | ||
443 | struct fotg210_fstn *fstn; /* Q_TYPE_FSTN */ | ||
444 | __hc32 *hw_next; /* (all types) */ | ||
445 | void *ptr; | ||
446 | }; | ||
447 | |||
448 | /*-------------------------------------------------------------------------*/ | ||
449 | |||
450 | /* | ||
451 | * EHCI Specification 0.95 Section 3.6 | ||
452 | * QH: describes control/bulk/interrupt endpoints | ||
453 | * See Fig 3-7 "Queue Head Structure Layout". | ||
454 | * | ||
455 | * These appear in both the async and (for interrupt) periodic schedules. | ||
456 | */ | ||
457 | |||
458 | /* first part defined by EHCI spec */ | ||
459 | struct fotg210_qh_hw { | ||
460 | __hc32 hw_next; /* see EHCI 3.6.1 */ | ||
461 | __hc32 hw_info1; /* see EHCI 3.6.2 */ | ||
462 | #define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */ | ||
463 | #define QH_HEAD (1 << 15) /* Head of async reclamation list */ | ||
464 | #define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */ | ||
465 | #define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */ | ||
466 | #define QH_LOW_SPEED (1 << 12) | ||
467 | #define QH_FULL_SPEED (0 << 12) | ||
468 | #define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */ | ||
469 | __hc32 hw_info2; /* see EHCI 3.6.2 */ | ||
470 | #define QH_SMASK 0x000000ff | ||
471 | #define QH_CMASK 0x0000ff00 | ||
472 | #define QH_HUBADDR 0x007f0000 | ||
473 | #define QH_HUBPORT 0x3f800000 | ||
474 | #define QH_MULT 0xc0000000 | ||
475 | __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */ | ||
476 | |||
477 | /* qtd overlay (hardware parts of a struct fotg210_qtd) */ | ||
478 | __hc32 hw_qtd_next; | ||
479 | __hc32 hw_alt_next; | ||
480 | __hc32 hw_token; | ||
481 | __hc32 hw_buf[5]; | ||
482 | __hc32 hw_buf_hi[5]; | ||
483 | } __aligned(32); | ||
484 | |||
485 | struct fotg210_qh { | ||
486 | struct fotg210_qh_hw *hw; /* Must come first */ | ||
487 | /* the rest is HCD-private */ | ||
488 | dma_addr_t qh_dma; /* address of qh */ | ||
489 | union fotg210_shadow qh_next; /* ptr to qh; or periodic */ | ||
490 | struct list_head qtd_list; /* sw qtd list */ | ||
491 | struct list_head intr_node; /* list of intr QHs */ | ||
492 | struct fotg210_qtd *dummy; | ||
493 | struct fotg210_qh *unlink_next; /* next on unlink list */ | ||
494 | |||
495 | unsigned unlink_cycle; | ||
496 | |||
497 | u8 needs_rescan; /* Dequeue during giveback */ | ||
498 | u8 qh_state; | ||
499 | #define QH_STATE_LINKED 1 /* HC sees this */ | ||
500 | #define QH_STATE_UNLINK 2 /* HC may still see this */ | ||
501 | #define QH_STATE_IDLE 3 /* HC doesn't see this */ | ||
502 | #define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */ | ||
503 | #define QH_STATE_COMPLETING 5 /* don't touch token.HALT */ | ||
504 | |||
505 | u8 xacterrs; /* XactErr retry counter */ | ||
506 | #define QH_XACTERR_MAX 32 /* XactErr retry limit */ | ||
507 | |||
508 | /* periodic schedule info */ | ||
509 | u8 usecs; /* intr bandwidth */ | ||
510 | u8 gap_uf; /* uframes split/csplit gap */ | ||
511 | u8 c_usecs; /* ... split completion bw */ | ||
512 | u16 tt_usecs; /* tt downstream bandwidth */ | ||
513 | unsigned short period; /* polling interval */ | ||
514 | unsigned short start; /* where polling starts */ | ||
515 | #define NO_FRAME ((unsigned short)~0) /* pick new start */ | ||
516 | |||
517 | struct usb_device *dev; /* access to TT */ | ||
518 | unsigned is_out:1; /* bulk or intr OUT */ | ||
519 | unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ | ||
520 | }; | ||
521 | |||
522 | /*-------------------------------------------------------------------------*/ | ||
523 | |||
524 | /* description of one iso transaction (up to 3 KB data if highspeed) */ | ||
525 | struct fotg210_iso_packet { | ||
526 | /* These will be copied to iTD when scheduling */ | ||
527 | u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */ | ||
528 | __hc32 transaction; /* itd->hw_transaction[i] |= */ | ||
529 | u8 cross; /* buf crosses pages */ | ||
530 | /* for full speed OUT splits */ | ||
531 | u32 buf1; | ||
532 | }; | ||
533 | |||
534 | /* temporary schedule data for packets from iso urbs (both speeds) | ||
535 | * each packet is one logical usb transaction to the device (not TT), | ||
536 | * beginning at stream->next_uframe | ||
537 | */ | ||
538 | struct fotg210_iso_sched { | ||
539 | struct list_head td_list; | ||
540 | unsigned span; | ||
541 | struct fotg210_iso_packet packet[0]; | ||
542 | }; | ||
543 | |||
544 | /* | ||
545 | * fotg210_iso_stream - groups all (s)itds for this endpoint. | ||
546 | * acts like a qh would, if EHCI had them for ISO. | ||
547 | */ | ||
548 | struct fotg210_iso_stream { | ||
549 | /* first field matches fotg210_hq, but is NULL */ | ||
550 | struct fotg210_qh_hw *hw; | ||
551 | |||
552 | u8 bEndpointAddress; | ||
553 | u8 highspeed; | ||
554 | struct list_head td_list; /* queued itds */ | ||
555 | struct list_head free_list; /* list of unused itds */ | ||
556 | struct usb_device *udev; | ||
557 | struct usb_host_endpoint *ep; | ||
558 | |||
559 | /* output of (re)scheduling */ | ||
560 | int next_uframe; | ||
561 | __hc32 splits; | ||
562 | |||
563 | /* the rest is derived from the endpoint descriptor, | ||
564 | * trusting urb->interval == f(epdesc->bInterval) and | ||
565 | * including the extra info for hw_bufp[0..2] | ||
566 | */ | ||
567 | u8 usecs, c_usecs; | ||
568 | u16 interval; | ||
569 | u16 tt_usecs; | ||
570 | u16 maxp; | ||
571 | u16 raw_mask; | ||
572 | unsigned bandwidth; | ||
573 | |||
574 | /* This is used to initialize iTD's hw_bufp fields */ | ||
575 | __hc32 buf0; | ||
576 | __hc32 buf1; | ||
577 | __hc32 buf2; | ||
578 | |||
579 | /* this is used to initialize sITD's tt info */ | ||
580 | __hc32 address; | ||
581 | }; | ||
582 | |||
583 | /*-------------------------------------------------------------------------*/ | ||
584 | |||
585 | /* | ||
586 | * EHCI Specification 0.95 Section 3.3 | ||
587 | * Fig 3-4 "Isochronous Transaction Descriptor (iTD)" | ||
588 | * | ||
589 | * Schedule records for high speed iso xfers | ||
590 | */ | ||
591 | struct fotg210_itd { | ||
592 | /* first part defined by EHCI spec */ | ||
593 | __hc32 hw_next; /* see EHCI 3.3.1 */ | ||
594 | __hc32 hw_transaction[8]; /* see EHCI 3.3.2 */ | ||
595 | #define FOTG210_ISOC_ACTIVE (1<<31) /* activate transfer this slot */ | ||
596 | #define FOTG210_ISOC_BUF_ERR (1<<30) /* Data buffer error */ | ||
597 | #define FOTG210_ISOC_BABBLE (1<<29) /* babble detected */ | ||
598 | #define FOTG210_ISOC_XACTERR (1<<28) /* XactErr - transaction error */ | ||
599 | #define FOTG210_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff) | ||
600 | #define FOTG210_ITD_IOC (1 << 15) /* interrupt on complete */ | ||
601 | |||
602 | #define ITD_ACTIVE(fotg210) cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE) | ||
603 | |||
604 | __hc32 hw_bufp[7]; /* see EHCI 3.3.3 */ | ||
605 | __hc32 hw_bufp_hi[7]; /* Appendix B */ | ||
606 | |||
607 | /* the rest is HCD-private */ | ||
608 | dma_addr_t itd_dma; /* for this itd */ | ||
609 | union fotg210_shadow itd_next; /* ptr to periodic q entry */ | ||
610 | |||
611 | struct urb *urb; | ||
612 | struct fotg210_iso_stream *stream; /* endpoint's queue */ | ||
613 | struct list_head itd_list; /* list of stream's itds */ | ||
614 | |||
615 | /* any/all hw_transactions here may be used by that urb */ | ||
616 | unsigned frame; /* where scheduled */ | ||
617 | unsigned pg; | ||
618 | unsigned index[8]; /* in urb->iso_frame_desc */ | ||
619 | } __aligned(32); | ||
620 | |||
621 | /*-------------------------------------------------------------------------*/ | ||
622 | |||
623 | /* | ||
624 | * EHCI Specification 0.96 Section 3.7 | ||
625 | * Periodic Frame Span Traversal Node (FSTN) | ||
626 | * | ||
627 | * Manages split interrupt transactions (using TT) that span frame boundaries | ||
628 | * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN | ||
629 | * makes the HC jump (back) to a QH to scan for fs/ls QH completions until | ||
630 | * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work. | ||
631 | */ | ||
632 | struct fotg210_fstn { | ||
633 | __hc32 hw_next; /* any periodic q entry */ | ||
634 | __hc32 hw_prev; /* qh or FOTG210_LIST_END */ | ||
635 | |||
636 | /* the rest is HCD-private */ | ||
637 | dma_addr_t fstn_dma; | ||
638 | union fotg210_shadow fstn_next; /* ptr to periodic q entry */ | ||
639 | } __aligned(32); | ||
640 | |||
641 | /*-------------------------------------------------------------------------*/ | ||
642 | |||
643 | /* Prepare the PORTSC wakeup flags during controller suspend/resume */ | ||
644 | |||
645 | #define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \ | ||
646 | fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup); | ||
647 | |||
648 | #define fotg210_prepare_ports_for_controller_resume(fotg210) \ | ||
649 | fotg210_adjust_port_wakeup_flags(fotg210, false, false); | ||
650 | |||
651 | /*-------------------------------------------------------------------------*/ | ||
652 | |||
653 | /* | ||
654 | * Some EHCI controllers have a Transaction Translator built into the | ||
655 | * root hub. This is a non-standard feature. Each controller will need | ||
656 | * to add code to the following inline functions, and call them as | ||
657 | * needed (mostly in root hub code). | ||
658 | */ | ||
659 | |||
660 | static inline unsigned int | ||
661 | fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc) | ||
662 | { | ||
663 | return (readl(&fotg210->regs->otgcsr) | ||
664 | & OTGCSR_HOST_SPD_TYP) >> 22; | ||
665 | } | ||
666 | |||
667 | /* Returns the speed of a device attached to a port on the root hub. */ | ||
668 | static inline unsigned int | ||
669 | fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc) | ||
670 | { | ||
671 | switch (fotg210_get_speed(fotg210, portsc)) { | ||
672 | case 0: | ||
673 | return 0; | ||
674 | case 1: | ||
675 | return USB_PORT_STAT_LOW_SPEED; | ||
676 | case 2: | ||
677 | default: | ||
678 | return USB_PORT_STAT_HIGH_SPEED; | ||
679 | } | ||
680 | } | ||
681 | |||
682 | /*-------------------------------------------------------------------------*/ | ||
683 | |||
684 | #define fotg210_has_fsl_portno_bug(e) (0) | ||
685 | |||
686 | /* | ||
687 | * While most USB host controllers implement their registers in | ||
688 | * little-endian format, a minority (celleb companion chip) implement | ||
689 | * them in big endian format. | ||
690 | * | ||
691 | * This attempts to support either format at compile time without a | ||
692 | * runtime penalty, or both formats with the additional overhead | ||
693 | * of checking a flag bit. | ||
694 | * | ||
695 | */ | ||
696 | |||
697 | #define fotg210_big_endian_mmio(e) 0 | ||
698 | #define fotg210_big_endian_capbase(e) 0 | ||
699 | |||
700 | static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210, | ||
701 | __u32 __iomem *regs) | ||
702 | { | ||
703 | return readl(regs); | ||
704 | } | ||
705 | |||
706 | static inline void fotg210_writel(const struct fotg210_hcd *fotg210, | ||
707 | const unsigned int val, __u32 __iomem *regs) | ||
708 | { | ||
709 | writel(val, regs); | ||
710 | } | ||
711 | |||
712 | /* cpu to fotg210 */ | ||
713 | static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x) | ||
714 | { | ||
715 | return cpu_to_le32(x); | ||
716 | } | ||
717 | |||
718 | /* fotg210 to cpu */ | ||
719 | static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x) | ||
720 | { | ||
721 | return le32_to_cpu(x); | ||
722 | } | ||
723 | |||
724 | static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210, | ||
725 | const __hc32 *x) | ||
726 | { | ||
727 | return le32_to_cpup(x); | ||
728 | } | ||
729 | |||
730 | /*-------------------------------------------------------------------------*/ | ||
731 | |||
732 | static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210) | ||
733 | { | ||
734 | return fotg210_readl(fotg210, &fotg210->regs->frame_index); | ||
735 | } | ||
736 | |||
737 | #define fotg210_itdlen(urb, desc, t) ({ \ | ||
738 | usb_pipein((urb)->pipe) ? \ | ||
739 | (desc)->length - FOTG210_ITD_LENGTH(t) : \ | ||
740 | FOTG210_ITD_LENGTH(t); \ | ||
741 | }) | ||
742 | /*-------------------------------------------------------------------------*/ | ||
743 | |||
744 | #ifndef DEBUG | ||
745 | #define STUB_DEBUG_FILES | ||
746 | #endif /* DEBUG */ | ||
747 | |||
748 | /*-------------------------------------------------------------------------*/ | ||
749 | |||
750 | #endif /* __LINUX_FOTG210_H */ | ||