aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorYuan-Hsin Chen <yuanlmm@gmail.com>2013-05-17 06:14:14 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-05-17 13:12:52 -0400
commit6c920bfb98d07a883918733075a4bc4287e36946 (patch)
treeabcc093ac580efd2f8444dc37e7b5eb3cecd75d8 /drivers
parenta1fefaab1bbf9cd409cb11887953567d0a8faeb6 (diff)
usb host: Faraday USB2.0 FUSBH200-HCD driver
FUSBH200-HCD is an USB2.0 hcd for Faraday FUSBH200. FUSBH200 is an ehci-like controller with some differences. First, register layout of FUSBH200 is incompatible with EHCI. Furthermore, FUSBH200 is lack of siTDs which means iTDs are used for both HS and FS ISO transfer. Signed-off-by: Yuan-Hsin Chen <yhchen@faraday-tech.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/fusbh200-hcd.c5975
-rw-r--r--drivers/usb/host/fusbh200.h743
5 files changed, 6731 insertions, 0 deletions
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index c41feba8d5c0..238c5d47cadb 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_USB_HWA_HCD) += host/
25obj-$(CONFIG_USB_ISP1760_HCD) += host/ 25obj-$(CONFIG_USB_ISP1760_HCD) += host/
26obj-$(CONFIG_USB_IMX21_HCD) += host/ 26obj-$(CONFIG_USB_IMX21_HCD) += host/
27obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/ 27obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/
28obj-$(CONFIG_USB_FUSBH200_HCD) += host/
28 29
29obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ 30obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
30 31
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index de94f2699063..605e2773aad4 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -345,6 +345,17 @@ config USB_ISP1362_HCD
345 To compile this driver as a module, choose M here: the 345 To compile this driver as a module, choose M here: the
346 module will be called isp1362-hcd. 346 module will be called isp1362-hcd.
347 347
348config USB_FUSBH200_HCD
349 tristate "FUSBH200 HCD support"
350 depends on USB
351 default N
352 ---help---
353 Faraday FUSBH200 is designed to meet USB2.0 EHCI specification
354 with minor modification.
355
356 To compile this driver as a module, choose M here: the
357 module will be called fusbh200-hcd.
358
348config USB_OHCI_HCD 359config USB_OHCI_HCD
349 tristate "OHCI HCD support" 360 tristate "OHCI HCD support"
350 depends on USB_ARCH_HAS_OHCI 361 depends on USB_ARCH_HAS_OHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 4fb73c156d72..58b7ae87efae 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -52,3 +52,4 @@ obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
52obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o 52obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
53obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o 53obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
54obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o 54obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
55obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
new file mode 100644
index 000000000000..79ce799406c9
--- /dev/null
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -0,0 +1,5975 @@
1/*
2 * Faraday FUSBH200 EHCI-like driver
3 *
4 * Copyright (c) 2013 Faraday Technology Corporation
5 *
6 * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
7 * Feng-Hsin Chiang <john453@faraday-tech.com>
8 * Po-Yu Chuang <ratbert.chuang@gmail.com>
9 *
10 * Most of code borrowed from the Linux-3.7 EHCI driver
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
19 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software Foundation,
24 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/dmapool.h>
30#include <linux/kernel.h>
31#include <linux/delay.h>
32#include <linux/ioport.h>
33#include <linux/sched.h>
34#include <linux/vmalloc.h>
35#include <linux/errno.h>
36#include <linux/init.h>
37#include <linux/hrtimer.h>
38#include <linux/list.h>
39#include <linux/interrupt.h>
40#include <linux/usb.h>
41#include <linux/usb/hcd.h>
42#include <linux/moduleparam.h>
43#include <linux/dma-mapping.h>
44#include <linux/debugfs.h>
45#include <linux/slab.h>
46#include <linux/uaccess.h>
47#include <linux/platform_device.h>
48
49#include <asm/byteorder.h>
50#include <asm/io.h>
51#include <asm/irq.h>
52#include <asm/unaligned.h>
53
54/*-------------------------------------------------------------------------*/
55#define DRIVER_AUTHOR "Yuan-Hsin Chen"
56#define DRIVER_DESC "FUSBH200 Host Controller (EHCI) Driver"
57
58static const char hcd_name [] = "fusbh200_hcd";
59
60#undef VERBOSE_DEBUG
61#undef FUSBH200_URB_TRACE
62
63#ifdef DEBUG
64#define FUSBH200_STATS
65#endif
66
67/* magic numbers that can affect system performance */
68#define FUSBH200_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
69#define FUSBH200_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
70#define FUSBH200_TUNE_RL_TT 0
71#define FUSBH200_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
72#define FUSBH200_TUNE_MULT_TT 1
73/*
74 * Some drivers think it's safe to schedule isochronous transfers more than
75 * 256 ms into the future (partly as a result of an old bug in the scheduling
76 * code). In an attempt to avoid trouble, we will use a minimum scheduling
77 * length of 512 frames instead of 256.
78 */
79#define FUSBH200_TUNE_FLS 1 /* (medium) 512-frame schedule */
80
81/* Initial IRQ latency: faster than hw default */
82static int log2_irq_thresh = 0; // 0 to 6
83module_param (log2_irq_thresh, int, S_IRUGO);
84MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
85
86/* initial park setting: slower than hw default */
87static unsigned park = 0;
88module_param (park, uint, S_IRUGO);
89MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
90
91/* for link power management(LPM) feature */
92static unsigned int hird;
93module_param(hird, int, S_IRUGO);
94MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
95
96#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
97
98#include "fusbh200.h"
99
100/*-------------------------------------------------------------------------*/
101
102#define fusbh200_dbg(fusbh200, fmt, args...) \
103 dev_dbg (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
104#define fusbh200_err(fusbh200, fmt, args...) \
105 dev_err (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
106#define fusbh200_info(fusbh200, fmt, args...) \
107 dev_info (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
108#define fusbh200_warn(fusbh200, fmt, args...) \
109 dev_warn (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
110
111#ifdef VERBOSE_DEBUG
112# define fusbh200_vdbg fusbh200_dbg
113#else
114 static inline void fusbh200_vdbg(struct fusbh200_hcd *fusbh200, ...) {}
115#endif
116
117#ifdef DEBUG
118
119/* check the values in the HCSPARAMS register
120 * (host controller _Structural_ parameters)
121 * see EHCI spec, Table 2-4 for each value
122 */
123static void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label)
124{
125 u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
126
127 fusbh200_dbg (fusbh200,
128 "%s hcs_params 0x%x ports=%d\n",
129 label, params,
130 HCS_N_PORTS (params)
131 );
132}
133#else
134
135static inline void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label) {}
136
137#endif
138
139#ifdef DEBUG
140
141/* check the values in the HCCPARAMS register
142 * (host controller _Capability_ parameters)
143 * see EHCI Spec, Table 2-5 for each value
144 * */
145static void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label)
146{
147 u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
148
149 fusbh200_dbg (fusbh200,
150 "%s hcc_params %04x uframes %s%s\n",
151 label,
152 params,
153 HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
154 HCC_CANPARK(params) ? " park" : "");
155}
156#else
157
158static inline void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label) {}
159
160#endif
161
162#ifdef DEBUG
163
164static void __maybe_unused
165dbg_qtd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
166{
167 fusbh200_dbg(fusbh200, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
168 hc32_to_cpup(fusbh200, &qtd->hw_next),
169 hc32_to_cpup(fusbh200, &qtd->hw_alt_next),
170 hc32_to_cpup(fusbh200, &qtd->hw_token),
171 hc32_to_cpup(fusbh200, &qtd->hw_buf [0]));
172 if (qtd->hw_buf [1])
173 fusbh200_dbg(fusbh200, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
174 hc32_to_cpup(fusbh200, &qtd->hw_buf[1]),
175 hc32_to_cpup(fusbh200, &qtd->hw_buf[2]),
176 hc32_to_cpup(fusbh200, &qtd->hw_buf[3]),
177 hc32_to_cpup(fusbh200, &qtd->hw_buf[4]));
178}
179
180static void __maybe_unused
181dbg_qh (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
182{
183 struct fusbh200_qh_hw *hw = qh->hw;
184
185 fusbh200_dbg (fusbh200, "%s qh %p n%08x info %x %x qtd %x\n", label,
186 qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
187 dbg_qtd("overlay", fusbh200, (struct fusbh200_qtd *) &hw->hw_qtd_next);
188}
189
190static void __maybe_unused
191dbg_itd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
192{
193 fusbh200_dbg (fusbh200, "%s [%d] itd %p, next %08x, urb %p\n",
194 label, itd->frame, itd, hc32_to_cpu(fusbh200, itd->hw_next),
195 itd->urb);
196 fusbh200_dbg (fusbh200,
197 " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
198 hc32_to_cpu(fusbh200, itd->hw_transaction[0]),
199 hc32_to_cpu(fusbh200, itd->hw_transaction[1]),
200 hc32_to_cpu(fusbh200, itd->hw_transaction[2]),
201 hc32_to_cpu(fusbh200, itd->hw_transaction[3]),
202 hc32_to_cpu(fusbh200, itd->hw_transaction[4]),
203 hc32_to_cpu(fusbh200, itd->hw_transaction[5]),
204 hc32_to_cpu(fusbh200, itd->hw_transaction[6]),
205 hc32_to_cpu(fusbh200, itd->hw_transaction[7]));
206 fusbh200_dbg (fusbh200,
207 " buf: %08x %08x %08x %08x %08x %08x %08x\n",
208 hc32_to_cpu(fusbh200, itd->hw_bufp[0]),
209 hc32_to_cpu(fusbh200, itd->hw_bufp[1]),
210 hc32_to_cpu(fusbh200, itd->hw_bufp[2]),
211 hc32_to_cpu(fusbh200, itd->hw_bufp[3]),
212 hc32_to_cpu(fusbh200, itd->hw_bufp[4]),
213 hc32_to_cpu(fusbh200, itd->hw_bufp[5]),
214 hc32_to_cpu(fusbh200, itd->hw_bufp[6]));
215 fusbh200_dbg (fusbh200, " index: %d %d %d %d %d %d %d %d\n",
216 itd->index[0], itd->index[1], itd->index[2],
217 itd->index[3], itd->index[4], itd->index[5],
218 itd->index[6], itd->index[7]);
219}
220
221static int __maybe_unused
222dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
223{
224 return scnprintf (buf, len,
225 "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
226 label, label [0] ? " " : "", status,
227 (status & STS_ASS) ? " Async" : "",
228 (status & STS_PSS) ? " Periodic" : "",
229 (status & STS_RECL) ? " Recl" : "",
230 (status & STS_HALT) ? " Halt" : "",
231 (status & STS_IAA) ? " IAA" : "",
232 (status & STS_FATAL) ? " FATAL" : "",
233 (status & STS_FLR) ? " FLR" : "",
234 (status & STS_PCD) ? " PCD" : "",
235 (status & STS_ERR) ? " ERR" : "",
236 (status & STS_INT) ? " INT" : ""
237 );
238}
239
240static int __maybe_unused
241dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
242{
243 return scnprintf (buf, len,
244 "%s%sintrenable %02x%s%s%s%s%s%s",
245 label, label [0] ? " " : "", enable,
246 (enable & STS_IAA) ? " IAA" : "",
247 (enable & STS_FATAL) ? " FATAL" : "",
248 (enable & STS_FLR) ? " FLR" : "",
249 (enable & STS_PCD) ? " PCD" : "",
250 (enable & STS_ERR) ? " ERR" : "",
251 (enable & STS_INT) ? " INT" : ""
252 );
253}
254
255static const char *const fls_strings [] =
256 { "1024", "512", "256", "??" };
257
258static int
259dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
260{
261 return scnprintf (buf, len,
262 "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
263 "period=%s%s %s",
264 label, label [0] ? " " : "", command,
265 (command & CMD_PARK) ? " park" : "(park)",
266 CMD_PARK_CNT (command),
267 (command >> 16) & 0x3f,
268 (command & CMD_IAAD) ? " IAAD" : "",
269 (command & CMD_ASE) ? " Async" : "",
270 (command & CMD_PSE) ? " Periodic" : "",
271 fls_strings [(command >> 2) & 0x3],
272 (command & CMD_RESET) ? " Reset" : "",
273 (command & CMD_RUN) ? "RUN" : "HALT"
274 );
275}
276
277static int
278dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
279{
280 char *sig;
281
282 /* signaling state */
283 switch (status & (3 << 10)) {
284 case 0 << 10: sig = "se0"; break;
285 case 1 << 10: sig = "k"; break; /* low speed */
286 case 2 << 10: sig = "j"; break;
287 default: sig = "?"; break;
288 }
289
290 return scnprintf (buf, len,
291 "%s%sport:%d status %06x %d "
292 "sig=%s%s%s%s%s%s%s%s",
293 label, label [0] ? " " : "", port, status,
294 status>>25,/*device address */
295 sig,
296 (status & PORT_RESET) ? " RESET" : "",
297 (status & PORT_SUSPEND) ? " SUSPEND" : "",
298 (status & PORT_RESUME) ? " RESUME" : "",
299 (status & PORT_PEC) ? " PEC" : "",
300 (status & PORT_PE) ? " PE" : "",
301 (status & PORT_CSC) ? " CSC" : "",
302 (status & PORT_CONNECT) ? " CONNECT" : "");
303}
304
305#else
306static inline void __maybe_unused
307dbg_qh (char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
308{}
309
310static inline int __maybe_unused
311dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
312{ return 0; }
313
314static inline int __maybe_unused
315dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
316{ return 0; }
317
318static inline int __maybe_unused
319dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
320{ return 0; }
321
322static inline int __maybe_unused
323dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
324{ return 0; }
325
326#endif /* DEBUG */
327
328/* functions have the "wrong" filename when they're output... */
329#define dbg_status(fusbh200, label, status) { \
330 char _buf [80]; \
331 dbg_status_buf (_buf, sizeof _buf, label, status); \
332 fusbh200_dbg (fusbh200, "%s\n", _buf); \
333}
334
335#define dbg_cmd(fusbh200, label, command) { \
336 char _buf [80]; \
337 dbg_command_buf (_buf, sizeof _buf, label, command); \
338 fusbh200_dbg (fusbh200, "%s\n", _buf); \
339}
340
341#define dbg_port(fusbh200, label, port, status) { \
342 char _buf [80]; \
343 dbg_port_buf (_buf, sizeof _buf, label, port, status); \
344 fusbh200_dbg (fusbh200, "%s\n", _buf); \
345}
346
347/*-------------------------------------------------------------------------*/
348
349#ifdef STUB_DEBUG_FILES
350
351static inline void create_debug_files (struct fusbh200_hcd *bus) { }
352static inline void remove_debug_files (struct fusbh200_hcd *bus) { }
353
354#else
355
356/* troubleshooting help: expose state in debugfs */
357
358static int debug_async_open(struct inode *, struct file *);
359static int debug_periodic_open(struct inode *, struct file *);
360static int debug_registers_open(struct inode *, struct file *);
361static int debug_async_open(struct inode *, struct file *);
362
363static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
364static int debug_close(struct inode *, struct file *);
365
366static const struct file_operations debug_async_fops = {
367 .owner = THIS_MODULE,
368 .open = debug_async_open,
369 .read = debug_output,
370 .release = debug_close,
371 .llseek = default_llseek,
372};
373static const struct file_operations debug_periodic_fops = {
374 .owner = THIS_MODULE,
375 .open = debug_periodic_open,
376 .read = debug_output,
377 .release = debug_close,
378 .llseek = default_llseek,
379};
380static const struct file_operations debug_registers_fops = {
381 .owner = THIS_MODULE,
382 .open = debug_registers_open,
383 .read = debug_output,
384 .release = debug_close,
385 .llseek = default_llseek,
386};
387
388static struct dentry *fusbh200_debug_root;
389
390struct debug_buffer {
391 ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
392 struct usb_bus *bus;
393 struct mutex mutex; /* protect filling of buffer */
394 size_t count; /* number of characters filled into buffer */
395 char *output_buf;
396 size_t alloc_size;
397};
398
399#define speed_char(info1) ({ char tmp; \
400 switch (info1 & (3 << 12)) { \
401 case QH_FULL_SPEED: tmp = 'f'; break; \
402 case QH_LOW_SPEED: tmp = 'l'; break; \
403 case QH_HIGH_SPEED: tmp = 'h'; break; \
404 default: tmp = '?'; break; \
405 }; tmp; })
406
407static inline char token_mark(struct fusbh200_hcd *fusbh200, __hc32 token)
408{
409 __u32 v = hc32_to_cpu(fusbh200, token);
410
411 if (v & QTD_STS_ACTIVE)
412 return '*';
413 if (v & QTD_STS_HALT)
414 return '-';
415 if (!IS_SHORT_READ (v))
416 return ' ';
417 /* tries to advance through hw_alt_next */
418 return '/';
419}
420
421static void qh_lines (
422 struct fusbh200_hcd *fusbh200,
423 struct fusbh200_qh *qh,
424 char **nextp,
425 unsigned *sizep
426)
427{
428 u32 scratch;
429 u32 hw_curr;
430 struct list_head *entry;
431 struct fusbh200_qtd *td;
432 unsigned temp;
433 unsigned size = *sizep;
434 char *next = *nextp;
435 char mark;
436 __le32 list_end = FUSBH200_LIST_END(fusbh200);
437 struct fusbh200_qh_hw *hw = qh->hw;
438
439 if (hw->hw_qtd_next == list_end) /* NEC does this */
440 mark = '@';
441 else
442 mark = token_mark(fusbh200, hw->hw_token);
443 if (mark == '/') { /* qh_alt_next controls qh advance? */
444 if ((hw->hw_alt_next & QTD_MASK(fusbh200))
445 == fusbh200->async->hw->hw_alt_next)
446 mark = '#'; /* blocked */
447 else if (hw->hw_alt_next == list_end)
448 mark = '.'; /* use hw_qtd_next */
449 /* else alt_next points to some other qtd */
450 }
451 scratch = hc32_to_cpup(fusbh200, &hw->hw_info1);
452 hw_curr = (mark == '*') ? hc32_to_cpup(fusbh200, &hw->hw_current) : 0;
453 temp = scnprintf (next, size,
454 "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
455 qh, scratch & 0x007f,
456 speed_char (scratch),
457 (scratch >> 8) & 0x000f,
458 scratch, hc32_to_cpup(fusbh200, &hw->hw_info2),
459 hc32_to_cpup(fusbh200, &hw->hw_token), mark,
460 (cpu_to_hc32(fusbh200, QTD_TOGGLE) & hw->hw_token)
461 ? "data1" : "data0",
462 (hc32_to_cpup(fusbh200, &hw->hw_alt_next) >> 1) & 0x0f);
463 size -= temp;
464 next += temp;
465
466 /* hc may be modifying the list as we read it ... */
467 list_for_each (entry, &qh->qtd_list) {
468 td = list_entry (entry, struct fusbh200_qtd, qtd_list);
469 scratch = hc32_to_cpup(fusbh200, &td->hw_token);
470 mark = ' ';
471 if (hw_curr == td->qtd_dma)
472 mark = '*';
473 else if (hw->hw_qtd_next == cpu_to_hc32(fusbh200, td->qtd_dma))
474 mark = '+';
475 else if (QTD_LENGTH (scratch)) {
476 if (td->hw_alt_next == fusbh200->async->hw->hw_alt_next)
477 mark = '#';
478 else if (td->hw_alt_next != list_end)
479 mark = '/';
480 }
481 temp = snprintf (next, size,
482 "\n\t%p%c%s len=%d %08x urb %p",
483 td, mark, ({ char *tmp;
484 switch ((scratch>>8)&0x03) {
485 case 0: tmp = "out"; break;
486 case 1: tmp = "in"; break;
487 case 2: tmp = "setup"; break;
488 default: tmp = "?"; break;
489 } tmp;}),
490 (scratch >> 16) & 0x7fff,
491 scratch,
492 td->urb);
493 if (size < temp)
494 temp = size;
495 size -= temp;
496 next += temp;
497 if (temp == size)
498 goto done;
499 }
500
501 temp = snprintf (next, size, "\n");
502 if (size < temp)
503 temp = size;
504 size -= temp;
505 next += temp;
506
507done:
508 *sizep = size;
509 *nextp = next;
510}
511
512static ssize_t fill_async_buffer(struct debug_buffer *buf)
513{
514 struct usb_hcd *hcd;
515 struct fusbh200_hcd *fusbh200;
516 unsigned long flags;
517 unsigned temp, size;
518 char *next;
519 struct fusbh200_qh *qh;
520
521 hcd = bus_to_hcd(buf->bus);
522 fusbh200 = hcd_to_fusbh200 (hcd);
523 next = buf->output_buf;
524 size = buf->alloc_size;
525
526 *next = 0;
527
528 /* dumps a snapshot of the async schedule.
529 * usually empty except for long-term bulk reads, or head.
530 * one QH per line, and TDs we know about
531 */
532 spin_lock_irqsave (&fusbh200->lock, flags);
533 for (qh = fusbh200->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
534 qh_lines (fusbh200, qh, &next, &size);
535 if (fusbh200->async_unlink && size > 0) {
536 temp = scnprintf(next, size, "\nunlink =\n");
537 size -= temp;
538 next += temp;
539
540 for (qh = fusbh200->async_unlink; size > 0 && qh;
541 qh = qh->unlink_next)
542 qh_lines (fusbh200, qh, &next, &size);
543 }
544 spin_unlock_irqrestore (&fusbh200->lock, flags);
545
546 return strlen(buf->output_buf);
547}
548
549#define DBG_SCHED_LIMIT 64
550static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
551{
552 struct usb_hcd *hcd;
553 struct fusbh200_hcd *fusbh200;
554 unsigned long flags;
555 union fusbh200_shadow p, *seen;
556 unsigned temp, size, seen_count;
557 char *next;
558 unsigned i;
559 __hc32 tag;
560
561 if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
562 return 0;
563 seen_count = 0;
564
565 hcd = bus_to_hcd(buf->bus);
566 fusbh200 = hcd_to_fusbh200 (hcd);
567 next = buf->output_buf;
568 size = buf->alloc_size;
569
570 temp = scnprintf (next, size, "size = %d\n", fusbh200->periodic_size);
571 size -= temp;
572 next += temp;
573
574 /* dump a snapshot of the periodic schedule.
575 * iso changes, interrupt usually doesn't.
576 */
577 spin_lock_irqsave (&fusbh200->lock, flags);
578 for (i = 0; i < fusbh200->periodic_size; i++) {
579 p = fusbh200->pshadow [i];
580 if (likely (!p.ptr))
581 continue;
582 tag = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [i]);
583
584 temp = scnprintf (next, size, "%4d: ", i);
585 size -= temp;
586 next += temp;
587
588 do {
589 struct fusbh200_qh_hw *hw;
590
591 switch (hc32_to_cpu(fusbh200, tag)) {
592 case Q_TYPE_QH:
593 hw = p.qh->hw;
594 temp = scnprintf (next, size, " qh%d-%04x/%p",
595 p.qh->period,
596 hc32_to_cpup(fusbh200,
597 &hw->hw_info2)
598 /* uframe masks */
599 & (QH_CMASK | QH_SMASK),
600 p.qh);
601 size -= temp;
602 next += temp;
603 /* don't repeat what follows this qh */
604 for (temp = 0; temp < seen_count; temp++) {
605 if (seen [temp].ptr != p.ptr)
606 continue;
607 if (p.qh->qh_next.ptr) {
608 temp = scnprintf (next, size,
609 " ...");
610 size -= temp;
611 next += temp;
612 }
613 break;
614 }
615 /* show more info the first time around */
616 if (temp == seen_count) {
617 u32 scratch = hc32_to_cpup(fusbh200,
618 &hw->hw_info1);
619 struct fusbh200_qtd *qtd;
620 char *type = "";
621
622 /* count tds, get ep direction */
623 temp = 0;
624 list_for_each_entry (qtd,
625 &p.qh->qtd_list,
626 qtd_list) {
627 temp++;
628 switch (0x03 & (hc32_to_cpu(
629 fusbh200,
630 qtd->hw_token) >> 8)) {
631 case 0: type = "out"; continue;
632 case 1: type = "in"; continue;
633 }
634 }
635
636 temp = scnprintf (next, size,
637 " (%c%d ep%d%s "
638 "[%d/%d] q%d p%d)",
639 speed_char (scratch),
640 scratch & 0x007f,
641 (scratch >> 8) & 0x000f, type,
642 p.qh->usecs, p.qh->c_usecs,
643 temp,
644 0x7ff & (scratch >> 16));
645
646 if (seen_count < DBG_SCHED_LIMIT)
647 seen [seen_count++].qh = p.qh;
648 } else
649 temp = 0;
650 tag = Q_NEXT_TYPE(fusbh200, hw->hw_next);
651 p = p.qh->qh_next;
652 break;
653 case Q_TYPE_FSTN:
654 temp = scnprintf (next, size,
655 " fstn-%8x/%p", p.fstn->hw_prev,
656 p.fstn);
657 tag = Q_NEXT_TYPE(fusbh200, p.fstn->hw_next);
658 p = p.fstn->fstn_next;
659 break;
660 case Q_TYPE_ITD:
661 temp = scnprintf (next, size,
662 " itd/%p", p.itd);
663 tag = Q_NEXT_TYPE(fusbh200, p.itd->hw_next);
664 p = p.itd->itd_next;
665 break;
666 }
667 size -= temp;
668 next += temp;
669 } while (p.ptr);
670
671 temp = scnprintf (next, size, "\n");
672 size -= temp;
673 next += temp;
674 }
675 spin_unlock_irqrestore (&fusbh200->lock, flags);
676 kfree (seen);
677
678 return buf->alloc_size - size;
679}
680#undef DBG_SCHED_LIMIT
681
682static const char *rh_state_string(struct fusbh200_hcd *fusbh200)
683{
684 switch (fusbh200->rh_state) {
685 case FUSBH200_RH_HALTED:
686 return "halted";
687 case FUSBH200_RH_SUSPENDED:
688 return "suspended";
689 case FUSBH200_RH_RUNNING:
690 return "running";
691 case FUSBH200_RH_STOPPING:
692 return "stopping";
693 }
694 return "?";
695}
696
697static ssize_t fill_registers_buffer(struct debug_buffer *buf)
698{
699 struct usb_hcd *hcd;
700 struct fusbh200_hcd *fusbh200;
701 unsigned long flags;
702 unsigned temp, size, i;
703 char *next, scratch [80];
704 static char fmt [] = "%*s\n";
705 static char label [] = "";
706
707 hcd = bus_to_hcd(buf->bus);
708 fusbh200 = hcd_to_fusbh200 (hcd);
709 next = buf->output_buf;
710 size = buf->alloc_size;
711
712 spin_lock_irqsave (&fusbh200->lock, flags);
713
714 if (!HCD_HW_ACCESSIBLE(hcd)) {
715 size = scnprintf (next, size,
716 "bus %s, device %s\n"
717 "%s\n"
718 "SUSPENDED (no register access)\n",
719 hcd->self.controller->bus->name,
720 dev_name(hcd->self.controller),
721 hcd->product_desc);
722 goto done;
723 }
724
725 /* Capability Registers */
726 i = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
727 temp = scnprintf (next, size,
728 "bus %s, device %s\n"
729 "%s\n"
730 "EHCI %x.%02x, rh state %s\n",
731 hcd->self.controller->bus->name,
732 dev_name(hcd->self.controller),
733 hcd->product_desc,
734 i >> 8, i & 0x0ff, rh_state_string(fusbh200));
735 size -= temp;
736 next += temp;
737
738 // FIXME interpret both types of params
739 i = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
740 temp = scnprintf (next, size, "structural params 0x%08x\n", i);
741 size -= temp;
742 next += temp;
743
744 i = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
745 temp = scnprintf (next, size, "capability params 0x%08x\n", i);
746 size -= temp;
747 next += temp;
748
749 /* Operational Registers */
750 temp = dbg_status_buf (scratch, sizeof scratch, label,
751 fusbh200_readl(fusbh200, &fusbh200->regs->status));
752 temp = scnprintf (next, size, fmt, temp, scratch);
753 size -= temp;
754 next += temp;
755
756 temp = dbg_command_buf (scratch, sizeof scratch, label,
757 fusbh200_readl(fusbh200, &fusbh200->regs->command));
758 temp = scnprintf (next, size, fmt, temp, scratch);
759 size -= temp;
760 next += temp;
761
762 temp = dbg_intr_buf (scratch, sizeof scratch, label,
763 fusbh200_readl(fusbh200, &fusbh200->regs->intr_enable));
764 temp = scnprintf (next, size, fmt, temp, scratch);
765 size -= temp;
766 next += temp;
767
768 temp = scnprintf (next, size, "uframe %04x\n",
769 fusbh200_read_frame_index(fusbh200));
770 size -= temp;
771 next += temp;
772
773 if (fusbh200->async_unlink) {
774 temp = scnprintf(next, size, "async unlink qh %p\n",
775 fusbh200->async_unlink);
776 size -= temp;
777 next += temp;
778 }
779
780#ifdef FUSBH200_STATS
781 temp = scnprintf (next, size,
782 "irq normal %ld err %ld iaa %ld (lost %ld)\n",
783 fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
784 fusbh200->stats.lost_iaa);
785 size -= temp;
786 next += temp;
787
788 temp = scnprintf (next, size, "complete %ld unlink %ld\n",
789 fusbh200->stats.complete, fusbh200->stats.unlink);
790 size -= temp;
791 next += temp;
792#endif
793
794done:
795 spin_unlock_irqrestore (&fusbh200->lock, flags);
796
797 return buf->alloc_size - size;
798}
799
800static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
801 ssize_t (*fill_func)(struct debug_buffer *))
802{
803 struct debug_buffer *buf;
804
805 buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
806
807 if (buf) {
808 buf->bus = bus;
809 buf->fill_func = fill_func;
810 mutex_init(&buf->mutex);
811 buf->alloc_size = PAGE_SIZE;
812 }
813
814 return buf;
815}
816
817static int fill_buffer(struct debug_buffer *buf)
818{
819 int ret = 0;
820
821 if (!buf->output_buf)
822 buf->output_buf = vmalloc(buf->alloc_size);
823
824 if (!buf->output_buf) {
825 ret = -ENOMEM;
826 goto out;
827 }
828
829 ret = buf->fill_func(buf);
830
831 if (ret >= 0) {
832 buf->count = ret;
833 ret = 0;
834 }
835
836out:
837 return ret;
838}
839
840static ssize_t debug_output(struct file *file, char __user *user_buf,
841 size_t len, loff_t *offset)
842{
843 struct debug_buffer *buf = file->private_data;
844 int ret = 0;
845
846 mutex_lock(&buf->mutex);
847 if (buf->count == 0) {
848 ret = fill_buffer(buf);
849 if (ret != 0) {
850 mutex_unlock(&buf->mutex);
851 goto out;
852 }
853 }
854 mutex_unlock(&buf->mutex);
855
856 ret = simple_read_from_buffer(user_buf, len, offset,
857 buf->output_buf, buf->count);
858
859out:
860 return ret;
861
862}
863
864static int debug_close(struct inode *inode, struct file *file)
865{
866 struct debug_buffer *buf = file->private_data;
867
868 if (buf) {
869 vfree(buf->output_buf);
870 kfree(buf);
871 }
872
873 return 0;
874}
875static int debug_async_open(struct inode *inode, struct file *file)
876{
877 file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
878
879 return file->private_data ? 0 : -ENOMEM;
880}
881
882static int debug_periodic_open(struct inode *inode, struct file *file)
883{
884 struct debug_buffer *buf;
885 buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
886 if (!buf)
887 return -ENOMEM;
888
889 buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
890 file->private_data = buf;
891 return 0;
892}
893
894static int debug_registers_open(struct inode *inode, struct file *file)
895{
896 file->private_data = alloc_buffer(inode->i_private,
897 fill_registers_buffer);
898
899 return file->private_data ? 0 : -ENOMEM;
900}
901
902static inline void create_debug_files (struct fusbh200_hcd *fusbh200)
903{
904 struct usb_bus *bus = &fusbh200_to_hcd(fusbh200)->self;
905
906 fusbh200->debug_dir = debugfs_create_dir(bus->bus_name, fusbh200_debug_root);
907 if (!fusbh200->debug_dir)
908 return;
909
910 if (!debugfs_create_file("async", S_IRUGO, fusbh200->debug_dir, bus,
911 &debug_async_fops))
912 goto file_error;
913
914 if (!debugfs_create_file("periodic", S_IRUGO, fusbh200->debug_dir, bus,
915 &debug_periodic_fops))
916 goto file_error;
917
918 if (!debugfs_create_file("registers", S_IRUGO, fusbh200->debug_dir, bus,
919 &debug_registers_fops))
920 goto file_error;
921
922 return;
923
924file_error:
925 debugfs_remove_recursive(fusbh200->debug_dir);
926}
927
928static inline void remove_debug_files (struct fusbh200_hcd *fusbh200)
929{
930 debugfs_remove_recursive(fusbh200->debug_dir);
931}
932
933#endif /* STUB_DEBUG_FILES */
934/*-------------------------------------------------------------------------*/
935
936/*
937 * handshake - spin reading hc until handshake completes or fails
938 * @ptr: address of hc register to be read
939 * @mask: bits to look at in result of read
940 * @done: value of those bits when handshake succeeds
941 * @usec: timeout in microseconds
942 *
943 * Returns negative errno, or zero on success
944 *
945 * Success happens when the "mask" bits have the specified value (hardware
946 * handshake done). There are two failure modes: "usec" have passed (major
947 * hardware flakeout), or the register reads as all-ones (hardware removed).
948 *
949 * That last failure should_only happen in cases like physical cardbus eject
950 * before driver shutdown. But it also seems to be caused by bugs in cardbus
951 * bridge shutdown: shutting down the bridge before the devices using it.
952 */
953static int handshake (struct fusbh200_hcd *fusbh200, void __iomem *ptr,
954 u32 mask, u32 done, int usec)
955{
956 u32 result;
957
958 do {
959 result = fusbh200_readl(fusbh200, ptr);
960 if (result == ~(u32)0) /* card removed */
961 return -ENODEV;
962 result &= mask;
963 if (result == done)
964 return 0;
965 udelay (1);
966 usec--;
967 } while (usec > 0);
968 return -ETIMEDOUT;
969}
970
971/*
972 * Force HC to halt state from unknown (EHCI spec section 2.3).
973 * Must be called with interrupts enabled and the lock not held.
974 */
975static int fusbh200_halt (struct fusbh200_hcd *fusbh200)
976{
977 u32 temp;
978
979 spin_lock_irq(&fusbh200->lock);
980
981 /* disable any irqs left enabled by previous code */
982 fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
983
984 /*
985 * This routine gets called during probe before fusbh200->command
986 * has been initialized, so we can't rely on its value.
987 */
988 fusbh200->command &= ~CMD_RUN;
989 temp = fusbh200_readl(fusbh200, &fusbh200->regs->command);
990 temp &= ~(CMD_RUN | CMD_IAAD);
991 fusbh200_writel(fusbh200, temp, &fusbh200->regs->command);
992
993 spin_unlock_irq(&fusbh200->lock);
994 synchronize_irq(fusbh200_to_hcd(fusbh200)->irq);
995
996 return handshake(fusbh200, &fusbh200->regs->status,
997 STS_HALT, STS_HALT, 16 * 125);
998}
999
1000/*
1001 * Reset a non-running (STS_HALT == 1) controller.
1002 * Must be called with interrupts enabled and the lock not held.
1003 */
1004static int fusbh200_reset (struct fusbh200_hcd *fusbh200)
1005{
1006 int retval;
1007 u32 command = fusbh200_readl(fusbh200, &fusbh200->regs->command);
1008
1009 /* If the EHCI debug controller is active, special care must be
1010 * taken before and after a host controller reset */
1011 if (fusbh200->debug && !dbgp_reset_prep(fusbh200_to_hcd(fusbh200)))
1012 fusbh200->debug = NULL;
1013
1014 command |= CMD_RESET;
1015 dbg_cmd (fusbh200, "reset", command);
1016 fusbh200_writel(fusbh200, command, &fusbh200->regs->command);
1017 fusbh200->rh_state = FUSBH200_RH_HALTED;
1018 fusbh200->next_statechange = jiffies;
1019 retval = handshake (fusbh200, &fusbh200->regs->command,
1020 CMD_RESET, 0, 250 * 1000);
1021
1022 if (retval)
1023 return retval;
1024
1025 if (fusbh200->debug)
1026 dbgp_external_startup(fusbh200_to_hcd(fusbh200));
1027
1028 fusbh200->port_c_suspend = fusbh200->suspended_ports =
1029 fusbh200->resuming_ports = 0;
1030 return retval;
1031}
1032
1033/*
1034 * Idle the controller (turn off the schedules).
1035 * Must be called with interrupts enabled and the lock not held.
1036 */
1037static void fusbh200_quiesce (struct fusbh200_hcd *fusbh200)
1038{
1039 u32 temp;
1040
1041 if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
1042 return;
1043
1044 /* wait for any schedule enables/disables to take effect */
1045 temp = (fusbh200->command << 10) & (STS_ASS | STS_PSS);
1046 handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, temp, 16 * 125);
1047
1048 /* then disable anything that's still active */
1049 spin_lock_irq(&fusbh200->lock);
1050 fusbh200->command &= ~(CMD_ASE | CMD_PSE);
1051 fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
1052 spin_unlock_irq(&fusbh200->lock);
1053
1054 /* hardware can take 16 microframes to turn off ... */
1055 handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, 0, 16 * 125);
1056}
1057
1058/*-------------------------------------------------------------------------*/
1059
1060static void end_unlink_async(struct fusbh200_hcd *fusbh200);
1061static void unlink_empty_async(struct fusbh200_hcd *fusbh200);
1062static void fusbh200_work(struct fusbh200_hcd *fusbh200);
1063static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
1064static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
1065
1066/*-------------------------------------------------------------------------*/
1067
1068/* Set a bit in the USBCMD register */
1069static void fusbh200_set_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
1070{
1071 fusbh200->command |= bit;
1072 fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
1073
1074 /* unblock posted write */
1075 fusbh200_readl(fusbh200, &fusbh200->regs->command);
1076}
1077
1078/* Clear a bit in the USBCMD register */
1079static void fusbh200_clear_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
1080{
1081 fusbh200->command &= ~bit;
1082 fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
1083
1084 /* unblock posted write */
1085 fusbh200_readl(fusbh200, &fusbh200->regs->command);
1086}
1087
1088/*-------------------------------------------------------------------------*/
1089
1090/*
1091 * EHCI timer support... Now using hrtimers.
1092 *
1093 * Lots of different events are triggered from fusbh200->hrtimer. Whenever
1094 * the timer routine runs, it checks each possible event; events that are
1095 * currently enabled and whose expiration time has passed get handled.
1096 * The set of enabled events is stored as a collection of bitflags in
1097 * fusbh200->enabled_hrtimer_events, and they are numbered in order of
1098 * increasing delay values (ranging between 1 ms and 100 ms).
1099 *
1100 * Rather than implementing a sorted list or tree of all pending events,
1101 * we keep track only of the lowest-numbered pending event, in
1102 * fusbh200->next_hrtimer_event. Whenever fusbh200->hrtimer gets restarted, its
1103 * expiration time is set to the timeout value for this event.
1104 *
1105 * As a result, events might not get handled right away; the actual delay
1106 * could be anywhere up to twice the requested delay. This doesn't
1107 * matter, because none of the events are especially time-critical. The
1108 * ones that matter most all have a delay of 1 ms, so they will be
1109 * handled after 2 ms at most, which is okay. In addition to this, we
1110 * allow for an expiration range of 1 ms.
1111 */
1112
1113/*
1114 * Delay lengths for the hrtimer event types.
1115 * Keep this list sorted by delay length, in the same order as
1116 * the event types indexed by enum fusbh200_hrtimer_event in fusbh200.h.
1117 */
1118static unsigned event_delays_ns[] = {
1119 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_ASS */
1120 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_PSS */
1121 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_DEAD */
1122 1125 * NSEC_PER_USEC, /* FUSBH200_HRTIMER_UNLINK_INTR */
1123 2 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_FREE_ITDS */
1124 6 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
1125 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IAA_WATCHDOG */
1126 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
1127 15 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_ASYNC */
1128 100 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IO_WATCHDOG */
1129};
1130
1131/* Enable a pending hrtimer event */
1132static void fusbh200_enable_event(struct fusbh200_hcd *fusbh200, unsigned event,
1133 bool resched)
1134{
1135 ktime_t *timeout = &fusbh200->hr_timeouts[event];
1136
1137 if (resched)
1138 *timeout = ktime_add(ktime_get(),
1139 ktime_set(0, event_delays_ns[event]));
1140 fusbh200->enabled_hrtimer_events |= (1 << event);
1141
1142 /* Track only the lowest-numbered pending event */
1143 if (event < fusbh200->next_hrtimer_event) {
1144 fusbh200->next_hrtimer_event = event;
1145 hrtimer_start_range_ns(&fusbh200->hrtimer, *timeout,
1146 NSEC_PER_MSEC, HRTIMER_MODE_ABS);
1147 }
1148}
1149
1150
1151/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
1152static void fusbh200_poll_ASS(struct fusbh200_hcd *fusbh200)
1153{
1154 unsigned actual, want;
1155
1156 /* Don't enable anything if the controller isn't running (e.g., died) */
1157 if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
1158 return;
1159
1160 want = (fusbh200->command & CMD_ASE) ? STS_ASS : 0;
1161 actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_ASS;
1162
1163 if (want != actual) {
1164
1165 /* Poll again later, but give up after about 20 ms */
1166 if (fusbh200->ASS_poll_count++ < 20) {
1167 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_ASS, true);
1168 return;
1169 }
1170 fusbh200_dbg(fusbh200, "Waited too long for the async schedule status (%x/%x), giving up\n",
1171 want, actual);
1172 }
1173 fusbh200->ASS_poll_count = 0;
1174
1175 /* The status is up-to-date; restart or stop the schedule as needed */
1176 if (want == 0) { /* Stopped */
1177 if (fusbh200->async_count > 0)
1178 fusbh200_set_command_bit(fusbh200, CMD_ASE);
1179
1180 } else { /* Running */
1181 if (fusbh200->async_count == 0) {
1182
1183 /* Turn off the schedule after a while */
1184 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_ASYNC,
1185 true);
1186 }
1187 }
1188}
1189
1190/* Turn off the async schedule after a brief delay */
1191static void fusbh200_disable_ASE(struct fusbh200_hcd *fusbh200)
1192{
1193 fusbh200_clear_command_bit(fusbh200, CMD_ASE);
1194}
1195
1196
1197/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
1198static void fusbh200_poll_PSS(struct fusbh200_hcd *fusbh200)
1199{
1200 unsigned actual, want;
1201
1202 /* Don't do anything if the controller isn't running (e.g., died) */
1203 if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
1204 return;
1205
1206 want = (fusbh200->command & CMD_PSE) ? STS_PSS : 0;
1207 actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_PSS;
1208
1209 if (want != actual) {
1210
1211 /* Poll again later, but give up after about 20 ms */
1212 if (fusbh200->PSS_poll_count++ < 20) {
1213 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_PSS, true);
1214 return;
1215 }
1216 fusbh200_dbg(fusbh200, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
1217 want, actual);
1218 }
1219 fusbh200->PSS_poll_count = 0;
1220
1221 /* The status is up-to-date; restart or stop the schedule as needed */
1222 if (want == 0) { /* Stopped */
1223 if (fusbh200->periodic_count > 0)
1224 fusbh200_set_command_bit(fusbh200, CMD_PSE);
1225
1226 } else { /* Running */
1227 if (fusbh200->periodic_count == 0) {
1228
1229 /* Turn off the schedule after a while */
1230 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_PERIODIC,
1231 true);
1232 }
1233 }
1234}
1235
1236/* Turn off the periodic schedule after a brief delay */
1237static void fusbh200_disable_PSE(struct fusbh200_hcd *fusbh200)
1238{
1239 fusbh200_clear_command_bit(fusbh200, CMD_PSE);
1240}
1241
1242
1243/* Poll the STS_HALT status bit; see when a dead controller stops */
1244static void fusbh200_handle_controller_death(struct fusbh200_hcd *fusbh200)
1245{
1246 if (!(fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_HALT)) {
1247
1248 /* Give up after a few milliseconds */
1249 if (fusbh200->died_poll_count++ < 5) {
1250 /* Try again later */
1251 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_DEAD, true);
1252 return;
1253 }
1254 fusbh200_warn(fusbh200, "Waited too long for the controller to stop, giving up\n");
1255 }
1256
1257 /* Clean up the mess */
1258 fusbh200->rh_state = FUSBH200_RH_HALTED;
1259 fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
1260 fusbh200_work(fusbh200);
1261 end_unlink_async(fusbh200);
1262
1263 /* Not in process context, so don't try to reset the controller */
1264}
1265
1266
1267/* Handle unlinked interrupt QHs once they are gone from the hardware */
1268static void fusbh200_handle_intr_unlinks(struct fusbh200_hcd *fusbh200)
1269{
1270 bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
1271
1272 /*
1273 * Process all the QHs on the intr_unlink list that were added
1274 * before the current unlink cycle began. The list is in
1275 * temporal order, so stop when we reach the first entry in the
1276 * current cycle. But if the root hub isn't running then
1277 * process all the QHs on the list.
1278 */
1279 fusbh200->intr_unlinking = true;
1280 while (fusbh200->intr_unlink) {
1281 struct fusbh200_qh *qh = fusbh200->intr_unlink;
1282
1283 if (!stopped && qh->unlink_cycle == fusbh200->intr_unlink_cycle)
1284 break;
1285 fusbh200->intr_unlink = qh->unlink_next;
1286 qh->unlink_next = NULL;
1287 end_unlink_intr(fusbh200, qh);
1288 }
1289
1290 /* Handle remaining entries later */
1291 if (fusbh200->intr_unlink) {
1292 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
1293 ++fusbh200->intr_unlink_cycle;
1294 }
1295 fusbh200->intr_unlinking = false;
1296}
1297
1298
1299/* Start another free-iTDs/siTDs cycle */
1300static void start_free_itds(struct fusbh200_hcd *fusbh200)
1301{
1302 if (!(fusbh200->enabled_hrtimer_events & BIT(FUSBH200_HRTIMER_FREE_ITDS))) {
1303 fusbh200->last_itd_to_free = list_entry(
1304 fusbh200->cached_itd_list.prev,
1305 struct fusbh200_itd, itd_list);
1306 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_FREE_ITDS, true);
1307 }
1308}
1309
1310/* Wait for controller to stop using old iTDs and siTDs */
1311static void end_free_itds(struct fusbh200_hcd *fusbh200)
1312{
1313 struct fusbh200_itd *itd, *n;
1314
1315 if (fusbh200->rh_state < FUSBH200_RH_RUNNING) {
1316 fusbh200->last_itd_to_free = NULL;
1317 }
1318
1319 list_for_each_entry_safe(itd, n, &fusbh200->cached_itd_list, itd_list) {
1320 list_del(&itd->itd_list);
1321 dma_pool_free(fusbh200->itd_pool, itd, itd->itd_dma);
1322 if (itd == fusbh200->last_itd_to_free)
1323 break;
1324 }
1325
1326 if (!list_empty(&fusbh200->cached_itd_list))
1327 start_free_itds(fusbh200);
1328}
1329
1330
1331/* Handle lost (or very late) IAA interrupts */
1332static void fusbh200_iaa_watchdog(struct fusbh200_hcd *fusbh200)
1333{
1334 if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
1335 return;
1336
1337 /*
1338 * Lost IAA irqs wedge things badly; seen first with a vt8235.
1339 * So we need this watchdog, but must protect it against both
1340 * (a) SMP races against real IAA firing and retriggering, and
1341 * (b) clean HC shutdown, when IAA watchdog was pending.
1342 */
1343 if (fusbh200->async_iaa) {
1344 u32 cmd, status;
1345
1346 /* If we get here, IAA is *REALLY* late. It's barely
1347 * conceivable that the system is so busy that CMD_IAAD
1348 * is still legitimately set, so let's be sure it's
1349 * clear before we read STS_IAA. (The HC should clear
1350 * CMD_IAAD when it sets STS_IAA.)
1351 */
1352 cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
1353
1354 /*
1355 * If IAA is set here it either legitimately triggered
1356 * after the watchdog timer expired (_way_ late, so we'll
1357 * still count it as lost) ... or a silicon erratum:
1358 * - VIA seems to set IAA without triggering the IRQ;
1359 * - IAAD potentially cleared without setting IAA.
1360 */
1361 status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
1362 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
1363 COUNT(fusbh200->stats.lost_iaa);
1364 fusbh200_writel(fusbh200, STS_IAA, &fusbh200->regs->status);
1365 }
1366
1367 fusbh200_vdbg(fusbh200, "IAA watchdog: status %x cmd %x\n",
1368 status, cmd);
1369 end_unlink_async(fusbh200);
1370 }
1371}
1372
1373
1374/* Enable the I/O watchdog, if appropriate */
1375static void turn_on_io_watchdog(struct fusbh200_hcd *fusbh200)
1376{
1377 /* Not needed if the controller isn't running or it's already enabled */
1378 if (fusbh200->rh_state != FUSBH200_RH_RUNNING ||
1379 (fusbh200->enabled_hrtimer_events &
1380 BIT(FUSBH200_HRTIMER_IO_WATCHDOG)))
1381 return;
1382
1383 /*
1384 * Isochronous transfers always need the watchdog.
1385 * For other sorts we use it only if the flag is set.
1386 */
1387 if (fusbh200->isoc_count > 0 || (fusbh200->need_io_watchdog &&
1388 fusbh200->async_count + fusbh200->intr_count > 0))
1389 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IO_WATCHDOG, true);
1390}
1391
1392
1393/*
1394 * Handler functions for the hrtimer event types.
1395 * Keep this array in the same order as the event types indexed by
1396 * enum fusbh200_hrtimer_event in fusbh200.h.
1397 */
1398static void (*event_handlers[])(struct fusbh200_hcd *) = {
1399 fusbh200_poll_ASS, /* FUSBH200_HRTIMER_POLL_ASS */
1400 fusbh200_poll_PSS, /* FUSBH200_HRTIMER_POLL_PSS */
1401 fusbh200_handle_controller_death, /* FUSBH200_HRTIMER_POLL_DEAD */
1402 fusbh200_handle_intr_unlinks, /* FUSBH200_HRTIMER_UNLINK_INTR */
1403 end_free_itds, /* FUSBH200_HRTIMER_FREE_ITDS */
1404 unlink_empty_async, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
1405 fusbh200_iaa_watchdog, /* FUSBH200_HRTIMER_IAA_WATCHDOG */
1406 fusbh200_disable_PSE, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
1407 fusbh200_disable_ASE, /* FUSBH200_HRTIMER_DISABLE_ASYNC */
1408 fusbh200_work, /* FUSBH200_HRTIMER_IO_WATCHDOG */
1409};
1410
1411static enum hrtimer_restart fusbh200_hrtimer_func(struct hrtimer *t)
1412{
1413 struct fusbh200_hcd *fusbh200 = container_of(t, struct fusbh200_hcd, hrtimer);
1414 ktime_t now;
1415 unsigned long events;
1416 unsigned long flags;
1417 unsigned e;
1418
1419 spin_lock_irqsave(&fusbh200->lock, flags);
1420
1421 events = fusbh200->enabled_hrtimer_events;
1422 fusbh200->enabled_hrtimer_events = 0;
1423 fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
1424
1425 /*
1426 * Check each pending event. If its time has expired, handle
1427 * the event; otherwise re-enable it.
1428 */
1429 now = ktime_get();
1430 for_each_set_bit(e, &events, FUSBH200_HRTIMER_NUM_EVENTS) {
1431 if (now.tv64 >= fusbh200->hr_timeouts[e].tv64)
1432 event_handlers[e](fusbh200);
1433 else
1434 fusbh200_enable_event(fusbh200, e, false);
1435 }
1436
1437 spin_unlock_irqrestore(&fusbh200->lock, flags);
1438 return HRTIMER_NORESTART;
1439}
1440
1441/*-------------------------------------------------------------------------*/
1442
1443#define fusbh200_bus_suspend NULL
1444#define fusbh200_bus_resume NULL
1445
1446/*-------------------------------------------------------------------------*/
1447
1448static int check_reset_complete (
1449 struct fusbh200_hcd *fusbh200,
1450 int index,
1451 u32 __iomem *status_reg,
1452 int port_status
1453) {
1454 if (!(port_status & PORT_CONNECT))
1455 return port_status;
1456
1457 /* if reset finished and it's still not enabled -- handoff */
1458 if (!(port_status & PORT_PE)) {
1459 /* with integrated TT, there's nobody to hand it to! */
1460 fusbh200_dbg (fusbh200,
1461 "Failed to enable port %d on root hub TT\n",
1462 index+1);
1463 return port_status;
1464 } else {
1465 fusbh200_dbg(fusbh200, "port %d reset complete, port enabled\n",
1466 index + 1);
1467 }
1468
1469 return port_status;
1470}
1471
1472/*-------------------------------------------------------------------------*/
1473
1474
1475/* build "status change" packet (one or two bytes) from HC registers */
1476
1477static int
1478fusbh200_hub_status_data (struct usb_hcd *hcd, char *buf)
1479{
1480 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
1481 u32 temp, status;
1482 u32 mask;
1483 int retval = 1;
1484 unsigned long flags;
1485
1486 /* init status to no-changes */
1487 buf [0] = 0;
1488
1489 /* Inform the core about resumes-in-progress by returning
1490 * a non-zero value even if there are no status changes.
1491 */
1492 status = fusbh200->resuming_ports;
1493
1494 mask = PORT_CSC | PORT_PEC;
1495 // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
1496
1497 /* no hub change reports (bit 0) for now (power, ...) */
1498
1499 /* port N changes (bit N)? */
1500 spin_lock_irqsave (&fusbh200->lock, flags);
1501
1502 temp = fusbh200_readl(fusbh200, &fusbh200->regs->port_status);
1503
1504 /*
1505 * Return status information even for ports with OWNER set.
1506 * Otherwise khubd wouldn't see the disconnect event when a
1507 * high-speed device is switched over to the companion
1508 * controller by the user.
1509 */
1510
1511 if ((temp & mask) != 0 || test_bit(0, &fusbh200->port_c_suspend)
1512 || (fusbh200->reset_done[0] && time_after_eq(
1513 jiffies, fusbh200->reset_done[0]))) {
1514 buf [0] |= 1 << 1;
1515 status = STS_PCD;
1516 }
1517 /* FIXME autosuspend idle root hubs */
1518 spin_unlock_irqrestore (&fusbh200->lock, flags);
1519 return status ? retval : 0;
1520}
1521
1522/*-------------------------------------------------------------------------*/
1523
1524static void
1525fusbh200_hub_descriptor (
1526 struct fusbh200_hcd *fusbh200,
1527 struct usb_hub_descriptor *desc
1528) {
1529 int ports = HCS_N_PORTS (fusbh200->hcs_params);
1530 u16 temp;
1531
1532 desc->bDescriptorType = 0x29;
1533 desc->bPwrOn2PwrGood = 10; /* fusbh200 1.0, 2.3.9 says 20ms max */
1534 desc->bHubContrCurrent = 0;
1535
1536 desc->bNbrPorts = ports;
1537 temp = 1 + (ports / 8);
1538 desc->bDescLength = 7 + 2 * temp;
1539
1540 /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
1541 memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
1542 memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
1543
1544 temp = 0x0008; /* per-port overcurrent reporting */
1545 temp |= 0x0002; /* no power switching */
1546 desc->wHubCharacteristics = cpu_to_le16(temp);
1547}
1548
1549/*-------------------------------------------------------------------------*/
1550
1551static int fusbh200_hub_control (
1552 struct usb_hcd *hcd,
1553 u16 typeReq,
1554 u16 wValue,
1555 u16 wIndex,
1556 char *buf,
1557 u16 wLength
1558) {
1559 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
1560 int ports = HCS_N_PORTS (fusbh200->hcs_params);
1561 u32 __iomem *status_reg = &fusbh200->regs->port_status;
1562 u32 temp, temp1, status;
1563 unsigned long flags;
1564 int retval = 0;
1565 unsigned selector;
1566
1567 /*
1568 * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
1569 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
1570 * (track current state ourselves) ... blink for diagnostics,
1571 * power, "this is the one", etc. EHCI spec supports this.
1572 */
1573
1574 spin_lock_irqsave (&fusbh200->lock, flags);
1575 switch (typeReq) {
1576 case ClearHubFeature:
1577 switch (wValue) {
1578 case C_HUB_LOCAL_POWER:
1579 case C_HUB_OVER_CURRENT:
1580 /* no hub-wide feature/status flags */
1581 break;
1582 default:
1583 goto error;
1584 }
1585 break;
1586 case ClearPortFeature:
1587 if (!wIndex || wIndex > ports)
1588 goto error;
1589 wIndex--;
1590 temp = fusbh200_readl(fusbh200, status_reg);
1591 temp &= ~PORT_RWC_BITS;
1592
1593 /*
1594 * Even if OWNER is set, so the port is owned by the
1595 * companion controller, khubd needs to be able to clear
1596 * the port-change status bits (especially
1597 * USB_PORT_STAT_C_CONNECTION).
1598 */
1599
1600 switch (wValue) {
1601 case USB_PORT_FEAT_ENABLE:
1602 fusbh200_writel(fusbh200, temp & ~PORT_PE, status_reg);
1603 break;
1604 case USB_PORT_FEAT_C_ENABLE:
1605 fusbh200_writel(fusbh200, temp | PORT_PEC, status_reg);
1606 break;
1607 case USB_PORT_FEAT_SUSPEND:
1608 if (temp & PORT_RESET)
1609 goto error;
1610 if (!(temp & PORT_SUSPEND))
1611 break;
1612 if ((temp & PORT_PE) == 0)
1613 goto error;
1614
1615 /* resume signaling for 20 msec */
1616 fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
1617 fusbh200->reset_done[wIndex] = jiffies
1618 + msecs_to_jiffies(20);
1619 break;
1620 case USB_PORT_FEAT_C_SUSPEND:
1621 clear_bit(wIndex, &fusbh200->port_c_suspend);
1622 break;
1623 case USB_PORT_FEAT_C_CONNECTION:
1624 fusbh200_writel(fusbh200, temp | PORT_CSC, status_reg);
1625 break;
1626 case USB_PORT_FEAT_C_OVER_CURRENT:
1627 fusbh200_writel(fusbh200, temp | BMISR_OVC, &fusbh200->regs->bmisr);
1628 break;
1629 case USB_PORT_FEAT_C_RESET:
1630 /* GetPortStatus clears reset */
1631 break;
1632 default:
1633 goto error;
1634 }
1635 fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted write */
1636 break;
1637 case GetHubDescriptor:
1638 fusbh200_hub_descriptor (fusbh200, (struct usb_hub_descriptor *)
1639 buf);
1640 break;
1641 case GetHubStatus:
1642 /* no hub-wide feature/status flags */
1643 memset (buf, 0, 4);
1644 //cpu_to_le32s ((u32 *) buf);
1645 break;
1646 case GetPortStatus:
1647 if (!wIndex || wIndex > ports)
1648 goto error;
1649 wIndex--;
1650 status = 0;
1651 temp = fusbh200_readl(fusbh200, status_reg);
1652
1653 // wPortChange bits
1654 if (temp & PORT_CSC)
1655 status |= USB_PORT_STAT_C_CONNECTION << 16;
1656 if (temp & PORT_PEC)
1657 status |= USB_PORT_STAT_C_ENABLE << 16;
1658
1659 temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
1660 if (temp1 & BMISR_OVC)
1661 status |= USB_PORT_STAT_C_OVERCURRENT << 16;
1662
1663 /* whoever resumes must GetPortStatus to complete it!! */
1664 if (temp & PORT_RESUME) {
1665
1666 /* Remote Wakeup received? */
1667 if (!fusbh200->reset_done[wIndex]) {
1668 /* resume signaling for 20 msec */
1669 fusbh200->reset_done[wIndex] = jiffies
1670 + msecs_to_jiffies(20);
1671 /* check the port again */
1672 mod_timer(&fusbh200_to_hcd(fusbh200)->rh_timer,
1673 fusbh200->reset_done[wIndex]);
1674 }
1675
1676 /* resume completed? */
1677 else if (time_after_eq(jiffies,
1678 fusbh200->reset_done[wIndex])) {
1679 clear_bit(wIndex, &fusbh200->suspended_ports);
1680 set_bit(wIndex, &fusbh200->port_c_suspend);
1681 fusbh200->reset_done[wIndex] = 0;
1682
1683 /* stop resume signaling */
1684 temp = fusbh200_readl(fusbh200, status_reg);
1685 fusbh200_writel(fusbh200,
1686 temp & ~(PORT_RWC_BITS | PORT_RESUME),
1687 status_reg);
1688 clear_bit(wIndex, &fusbh200->resuming_ports);
1689 retval = handshake(fusbh200, status_reg,
1690 PORT_RESUME, 0, 2000 /* 2msec */);
1691 if (retval != 0) {
1692 fusbh200_err(fusbh200,
1693 "port %d resume error %d\n",
1694 wIndex + 1, retval);
1695 goto error;
1696 }
1697 temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
1698 }
1699 }
1700
1701 /* whoever resets must GetPortStatus to complete it!! */
1702 if ((temp & PORT_RESET)
1703 && time_after_eq(jiffies,
1704 fusbh200->reset_done[wIndex])) {
1705 status |= USB_PORT_STAT_C_RESET << 16;
1706 fusbh200->reset_done [wIndex] = 0;
1707 clear_bit(wIndex, &fusbh200->resuming_ports);
1708
1709 /* force reset to complete */
1710 fusbh200_writel(fusbh200, temp & ~(PORT_RWC_BITS | PORT_RESET),
1711 status_reg);
1712 /* REVISIT: some hardware needs 550+ usec to clear
1713 * this bit; seems too long to spin routinely...
1714 */
1715 retval = handshake(fusbh200, status_reg,
1716 PORT_RESET, 0, 1000);
1717 if (retval != 0) {
1718 fusbh200_err (fusbh200, "port %d reset error %d\n",
1719 wIndex + 1, retval);
1720 goto error;
1721 }
1722
1723 /* see what we found out */
1724 temp = check_reset_complete (fusbh200, wIndex, status_reg,
1725 fusbh200_readl(fusbh200, status_reg));
1726 }
1727
1728 if (!(temp & (PORT_RESUME|PORT_RESET))) {
1729 fusbh200->reset_done[wIndex] = 0;
1730 clear_bit(wIndex, &fusbh200->resuming_ports);
1731 }
1732
1733 /* transfer dedicated ports to the companion hc */
1734 if ((temp & PORT_CONNECT) &&
1735 test_bit(wIndex, &fusbh200->companion_ports)) {
1736 temp &= ~PORT_RWC_BITS;
1737 fusbh200_writel(fusbh200, temp, status_reg);
1738 fusbh200_dbg(fusbh200, "port %d --> companion\n", wIndex + 1);
1739 temp = fusbh200_readl(fusbh200, status_reg);
1740 }
1741
1742 /*
1743 * Even if OWNER is set, there's no harm letting khubd
1744 * see the wPortStatus values (they should all be 0 except
1745 * for PORT_POWER anyway).
1746 */
1747
1748 if (temp & PORT_CONNECT) {
1749 status |= USB_PORT_STAT_CONNECTION;
1750 status |= fusbh200_port_speed(fusbh200, temp);
1751 }
1752 if (temp & PORT_PE)
1753 status |= USB_PORT_STAT_ENABLE;
1754
1755 /* maybe the port was unsuspended without our knowledge */
1756 if (temp & (PORT_SUSPEND|PORT_RESUME)) {
1757 status |= USB_PORT_STAT_SUSPEND;
1758 } else if (test_bit(wIndex, &fusbh200->suspended_ports)) {
1759 clear_bit(wIndex, &fusbh200->suspended_ports);
1760 clear_bit(wIndex, &fusbh200->resuming_ports);
1761 fusbh200->reset_done[wIndex] = 0;
1762 if (temp & PORT_PE)
1763 set_bit(wIndex, &fusbh200->port_c_suspend);
1764 }
1765
1766 temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
1767 if (temp1 & BMISR_OVC)
1768 status |= USB_PORT_STAT_OVERCURRENT;
1769 if (temp & PORT_RESET)
1770 status |= USB_PORT_STAT_RESET;
1771 if (test_bit(wIndex, &fusbh200->port_c_suspend))
1772 status |= USB_PORT_STAT_C_SUSPEND << 16;
1773
1774#ifndef VERBOSE_DEBUG
1775 if (status & ~0xffff) /* only if wPortChange is interesting */
1776#endif
1777 dbg_port (fusbh200, "GetStatus", wIndex + 1, temp);
1778 put_unaligned_le32(status, buf);
1779 break;
1780 case SetHubFeature:
1781 switch (wValue) {
1782 case C_HUB_LOCAL_POWER:
1783 case C_HUB_OVER_CURRENT:
1784 /* no hub-wide feature/status flags */
1785 break;
1786 default:
1787 goto error;
1788 }
1789 break;
1790 case SetPortFeature:
1791 selector = wIndex >> 8;
1792 wIndex &= 0xff;
1793
1794 if (!wIndex || wIndex > ports)
1795 goto error;
1796 wIndex--;
1797 temp = fusbh200_readl(fusbh200, status_reg);
1798 temp &= ~PORT_RWC_BITS;
1799 switch (wValue) {
1800 case USB_PORT_FEAT_SUSPEND:
1801 if ((temp & PORT_PE) == 0
1802 || (temp & PORT_RESET) != 0)
1803 goto error;
1804
1805 /* After above check the port must be connected.
1806 * Set appropriate bit thus could put phy into low power
1807 * mode if we have hostpc feature
1808 */
1809 fusbh200_writel(fusbh200, temp | PORT_SUSPEND, status_reg);
1810 set_bit(wIndex, &fusbh200->suspended_ports);
1811 break;
1812 case USB_PORT_FEAT_RESET:
1813 if (temp & PORT_RESUME)
1814 goto error;
1815 /* line status bits may report this as low speed,
1816 * which can be fine if this root hub has a
1817 * transaction translator built in.
1818 */
1819 fusbh200_vdbg (fusbh200, "port %d reset\n", wIndex + 1);
1820 temp |= PORT_RESET;
1821 temp &= ~PORT_PE;
1822
1823 /*
1824 * caller must wait, then call GetPortStatus
1825 * usb 2.0 spec says 50 ms resets on root
1826 */
1827 fusbh200->reset_done [wIndex] = jiffies
1828 + msecs_to_jiffies (50);
1829 fusbh200_writel(fusbh200, temp, status_reg);
1830 break;
1831
1832 /* For downstream facing ports (these): one hub port is put
1833 * into test mode according to USB2 11.24.2.13, then the hub
1834 * must be reset (which for root hub now means rmmod+modprobe,
1835 * or else system reboot). See EHCI 2.3.9 and 4.14 for info
1836 * about the EHCI-specific stuff.
1837 */
1838 case USB_PORT_FEAT_TEST:
1839 if (!selector || selector > 5)
1840 goto error;
1841 spin_unlock_irqrestore(&fusbh200->lock, flags);
1842 fusbh200_quiesce(fusbh200);
1843 spin_lock_irqsave(&fusbh200->lock, flags);
1844
1845 /* Put all enabled ports into suspend */
1846 temp = fusbh200_readl(fusbh200, status_reg) & ~PORT_RWC_BITS;
1847 if (temp & PORT_PE)
1848 fusbh200_writel(fusbh200, temp | PORT_SUSPEND,
1849 status_reg);
1850
1851 spin_unlock_irqrestore(&fusbh200->lock, flags);
1852 fusbh200_halt(fusbh200);
1853 spin_lock_irqsave(&fusbh200->lock, flags);
1854
1855 temp = fusbh200_readl(fusbh200, status_reg);
1856 temp |= selector << 16;
1857 fusbh200_writel(fusbh200, temp, status_reg);
1858 break;
1859
1860 default:
1861 goto error;
1862 }
1863 fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */
1864 break;
1865
1866 default:
1867error:
1868 /* "stall" on error */
1869 retval = -EPIPE;
1870 }
1871 spin_unlock_irqrestore (&fusbh200->lock, flags);
1872 return retval;
1873}
1874
1875static void __maybe_unused fusbh200_relinquish_port(struct usb_hcd *hcd,
1876 int portnum)
1877{
1878 return;
1879}
1880
1881static int __maybe_unused fusbh200_port_handed_over(struct usb_hcd *hcd,
1882 int portnum)
1883{
1884 return 0;
1885}
1886/*-------------------------------------------------------------------------*/
1887/*
1888 * There's basically three types of memory:
1889 * - data used only by the HCD ... kmalloc is fine
1890 * - async and periodic schedules, shared by HC and HCD ... these
1891 * need to use dma_pool or dma_alloc_coherent
1892 * - driver buffers, read/written by HC ... single shot DMA mapped
1893 *
1894 * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
1895 * No memory seen by this driver is pageable.
1896 */
1897
1898/*-------------------------------------------------------------------------*/
1899
1900/* Allocate the key transfer structures from the previously allocated pool */
1901
1902static inline void fusbh200_qtd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd,
1903 dma_addr_t dma)
1904{
1905 memset (qtd, 0, sizeof *qtd);
1906 qtd->qtd_dma = dma;
1907 qtd->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
1908 qtd->hw_next = FUSBH200_LIST_END(fusbh200);
1909 qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
1910 INIT_LIST_HEAD (&qtd->qtd_list);
1911}
1912
1913static struct fusbh200_qtd *fusbh200_qtd_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
1914{
1915 struct fusbh200_qtd *qtd;
1916 dma_addr_t dma;
1917
1918 qtd = dma_pool_alloc (fusbh200->qtd_pool, flags, &dma);
1919 if (qtd != NULL) {
1920 fusbh200_qtd_init(fusbh200, qtd, dma);
1921 }
1922 return qtd;
1923}
1924
1925static inline void fusbh200_qtd_free (struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
1926{
1927 dma_pool_free (fusbh200->qtd_pool, qtd, qtd->qtd_dma);
1928}
1929
1930
1931static void qh_destroy(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
1932{
1933 /* clean qtds first, and know this is not linked */
1934 if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
1935 fusbh200_dbg (fusbh200, "unused qh not empty!\n");
1936 BUG ();
1937 }
1938 if (qh->dummy)
1939 fusbh200_qtd_free (fusbh200, qh->dummy);
1940 dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
1941 kfree(qh);
1942}
1943
1944static struct fusbh200_qh *fusbh200_qh_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
1945{
1946 struct fusbh200_qh *qh;
1947 dma_addr_t dma;
1948
1949 qh = kzalloc(sizeof *qh, GFP_ATOMIC);
1950 if (!qh)
1951 goto done;
1952 qh->hw = (struct fusbh200_qh_hw *)
1953 dma_pool_alloc(fusbh200->qh_pool, flags, &dma);
1954 if (!qh->hw)
1955 goto fail;
1956 memset(qh->hw, 0, sizeof *qh->hw);
1957 qh->qh_dma = dma;
1958 // INIT_LIST_HEAD (&qh->qh_list);
1959 INIT_LIST_HEAD (&qh->qtd_list);
1960
1961 /* dummy td enables safe urb queuing */
1962 qh->dummy = fusbh200_qtd_alloc (fusbh200, flags);
1963 if (qh->dummy == NULL) {
1964 fusbh200_dbg (fusbh200, "no dummy td\n");
1965 goto fail1;
1966 }
1967done:
1968 return qh;
1969fail1:
1970 dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
1971fail:
1972 kfree(qh);
1973 return NULL;
1974}
1975
1976/*-------------------------------------------------------------------------*/
1977
1978/* The queue heads and transfer descriptors are managed from pools tied
1979 * to each of the "per device" structures.
1980 * This is the initialisation and cleanup code.
1981 */
1982
1983static void fusbh200_mem_cleanup (struct fusbh200_hcd *fusbh200)
1984{
1985 if (fusbh200->async)
1986 qh_destroy(fusbh200, fusbh200->async);
1987 fusbh200->async = NULL;
1988
1989 if (fusbh200->dummy)
1990 qh_destroy(fusbh200, fusbh200->dummy);
1991 fusbh200->dummy = NULL;
1992
1993 /* DMA consistent memory and pools */
1994 if (fusbh200->qtd_pool)
1995 dma_pool_destroy (fusbh200->qtd_pool);
1996 fusbh200->qtd_pool = NULL;
1997
1998 if (fusbh200->qh_pool) {
1999 dma_pool_destroy (fusbh200->qh_pool);
2000 fusbh200->qh_pool = NULL;
2001 }
2002
2003 if (fusbh200->itd_pool)
2004 dma_pool_destroy (fusbh200->itd_pool);
2005 fusbh200->itd_pool = NULL;
2006
2007 if (fusbh200->periodic)
2008 dma_free_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
2009 fusbh200->periodic_size * sizeof (u32),
2010 fusbh200->periodic, fusbh200->periodic_dma);
2011 fusbh200->periodic = NULL;
2012
2013 /* shadow periodic table */
2014 kfree(fusbh200->pshadow);
2015 fusbh200->pshadow = NULL;
2016}
2017
2018/* remember to add cleanup code (above) if you add anything here */
2019static int fusbh200_mem_init (struct fusbh200_hcd *fusbh200, gfp_t flags)
2020{
2021 int i;
2022
2023 /* QTDs for control/bulk/intr transfers */
2024 fusbh200->qtd_pool = dma_pool_create ("fusbh200_qtd",
2025 fusbh200_to_hcd(fusbh200)->self.controller,
2026 sizeof (struct fusbh200_qtd),
2027 32 /* byte alignment (for hw parts) */,
2028 4096 /* can't cross 4K */);
2029 if (!fusbh200->qtd_pool) {
2030 goto fail;
2031 }
2032
2033 /* QHs for control/bulk/intr transfers */
2034 fusbh200->qh_pool = dma_pool_create ("fusbh200_qh",
2035 fusbh200_to_hcd(fusbh200)->self.controller,
2036 sizeof(struct fusbh200_qh_hw),
2037 32 /* byte alignment (for hw parts) */,
2038 4096 /* can't cross 4K */);
2039 if (!fusbh200->qh_pool) {
2040 goto fail;
2041 }
2042 fusbh200->async = fusbh200_qh_alloc (fusbh200, flags);
2043 if (!fusbh200->async) {
2044 goto fail;
2045 }
2046
2047 /* ITD for high speed ISO transfers */
2048 fusbh200->itd_pool = dma_pool_create ("fusbh200_itd",
2049 fusbh200_to_hcd(fusbh200)->self.controller,
2050 sizeof (struct fusbh200_itd),
2051 64 /* byte alignment (for hw parts) */,
2052 4096 /* can't cross 4K */);
2053 if (!fusbh200->itd_pool) {
2054 goto fail;
2055 }
2056
2057 /* Hardware periodic table */
2058 fusbh200->periodic = (__le32 *)
2059 dma_alloc_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
2060 fusbh200->periodic_size * sizeof(__le32),
2061 &fusbh200->periodic_dma, 0);
2062 if (fusbh200->periodic == NULL) {
2063 goto fail;
2064 }
2065
2066 for (i = 0; i < fusbh200->periodic_size; i++)
2067 fusbh200->periodic[i] = FUSBH200_LIST_END(fusbh200);
2068
2069 /* software shadow of hardware table */
2070 fusbh200->pshadow = kcalloc(fusbh200->periodic_size, sizeof(void *), flags);
2071 if (fusbh200->pshadow != NULL)
2072 return 0;
2073
2074fail:
2075 fusbh200_dbg (fusbh200, "couldn't init memory\n");
2076 fusbh200_mem_cleanup (fusbh200);
2077 return -ENOMEM;
2078}
2079/*-------------------------------------------------------------------------*/
2080/*
2081 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
2082 *
2083 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
2084 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
2085 * buffers needed for the larger number). We use one QH per endpoint, queue
2086 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
2087 *
2088 * ISO traffic uses "ISO TD" (itd) records, and (along with
2089 * interrupts) needs careful scheduling. Performance improvements can be
2090 * an ongoing challenge. That's in "ehci-sched.c".
2091 *
2092 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
2093 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
2094 * (b) special fields in qh entries or (c) split iso entries. TTs will
2095 * buffer low/full speed data so the host collects it at high speed.
2096 */
2097
2098/*-------------------------------------------------------------------------*/
2099
2100/* fill a qtd, returning how much of the buffer we were able to queue up */
2101
2102static int
2103qtd_fill(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd, dma_addr_t buf,
2104 size_t len, int token, int maxpacket)
2105{
2106 int i, count;
2107 u64 addr = buf;
2108
2109 /* one buffer entry per 4K ... first might be short or unaligned */
2110 qtd->hw_buf[0] = cpu_to_hc32(fusbh200, (u32)addr);
2111 qtd->hw_buf_hi[0] = cpu_to_hc32(fusbh200, (u32)(addr >> 32));
2112 count = 0x1000 - (buf & 0x0fff); /* rest of that page */
2113 if (likely (len < count)) /* ... iff needed */
2114 count = len;
2115 else {
2116 buf += 0x1000;
2117 buf &= ~0x0fff;
2118
2119 /* per-qtd limit: from 16K to 20K (best alignment) */
2120 for (i = 1; count < len && i < 5; i++) {
2121 addr = buf;
2122 qtd->hw_buf[i] = cpu_to_hc32(fusbh200, (u32)addr);
2123 qtd->hw_buf_hi[i] = cpu_to_hc32(fusbh200,
2124 (u32)(addr >> 32));
2125 buf += 0x1000;
2126 if ((count + 0x1000) < len)
2127 count += 0x1000;
2128 else
2129 count = len;
2130 }
2131
2132 /* short packets may only terminate transfers */
2133 if (count != len)
2134 count -= (count % maxpacket);
2135 }
2136 qtd->hw_token = cpu_to_hc32(fusbh200, (count << 16) | token);
2137 qtd->length = count;
2138
2139 return count;
2140}
2141
2142/*-------------------------------------------------------------------------*/
2143
2144static inline void
2145qh_update (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh, struct fusbh200_qtd *qtd)
2146{
2147 struct fusbh200_qh_hw *hw = qh->hw;
2148
2149 /* writes to an active overlay are unsafe */
2150 BUG_ON(qh->qh_state != QH_STATE_IDLE);
2151
2152 hw->hw_qtd_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
2153 hw->hw_alt_next = FUSBH200_LIST_END(fusbh200);
2154
2155 /* Except for control endpoints, we make hardware maintain data
2156 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
2157 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
2158 * ever clear it.
2159 */
2160 if (!(hw->hw_info1 & cpu_to_hc32(fusbh200, QH_TOGGLE_CTL))) {
2161 unsigned is_out, epnum;
2162
2163 is_out = qh->is_out;
2164 epnum = (hc32_to_cpup(fusbh200, &hw->hw_info1) >> 8) & 0x0f;
2165 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
2166 hw->hw_token &= ~cpu_to_hc32(fusbh200, QTD_TOGGLE);
2167 usb_settoggle (qh->dev, epnum, is_out, 1);
2168 }
2169 }
2170
2171 hw->hw_token &= cpu_to_hc32(fusbh200, QTD_TOGGLE | QTD_STS_PING);
2172}
2173
2174/* if it weren't for a common silicon quirk (writing the dummy into the qh
2175 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
2176 * recovery (including urb dequeue) would need software changes to a QH...
2177 */
2178static void
2179qh_refresh (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
2180{
2181 struct fusbh200_qtd *qtd;
2182
2183 if (list_empty (&qh->qtd_list))
2184 qtd = qh->dummy;
2185 else {
2186 qtd = list_entry (qh->qtd_list.next,
2187 struct fusbh200_qtd, qtd_list);
2188 /*
2189 * first qtd may already be partially processed.
2190 * If we come here during unlink, the QH overlay region
2191 * might have reference to the just unlinked qtd. The
2192 * qtd is updated in qh_completions(). Update the QH
2193 * overlay here.
2194 */
2195 if (cpu_to_hc32(fusbh200, qtd->qtd_dma) == qh->hw->hw_current) {
2196 qh->hw->hw_qtd_next = qtd->hw_next;
2197 qtd = NULL;
2198 }
2199 }
2200
2201 if (qtd)
2202 qh_update (fusbh200, qh, qtd);
2203}
2204
2205/*-------------------------------------------------------------------------*/
2206
2207static void qh_link_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
2208
2209static void fusbh200_clear_tt_buffer_complete(struct usb_hcd *hcd,
2210 struct usb_host_endpoint *ep)
2211{
2212 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
2213 struct fusbh200_qh *qh = ep->hcpriv;
2214 unsigned long flags;
2215
2216 spin_lock_irqsave(&fusbh200->lock, flags);
2217 qh->clearing_tt = 0;
2218 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
2219 && fusbh200->rh_state == FUSBH200_RH_RUNNING)
2220 qh_link_async(fusbh200, qh);
2221 spin_unlock_irqrestore(&fusbh200->lock, flags);
2222}
2223
2224static void fusbh200_clear_tt_buffer(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh,
2225 struct urb *urb, u32 token)
2226{
2227
2228 /* If an async split transaction gets an error or is unlinked,
2229 * the TT buffer may be left in an indeterminate state. We
2230 * have to clear the TT buffer.
2231 *
2232 * Note: this routine is never called for Isochronous transfers.
2233 */
2234 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
2235#ifdef DEBUG
2236 struct usb_device *tt = urb->dev->tt->hub;
2237 dev_dbg(&tt->dev,
2238 "clear tt buffer port %d, a%d ep%d t%08x\n",
2239 urb->dev->ttport, urb->dev->devnum,
2240 usb_pipeendpoint(urb->pipe), token);
2241#endif /* DEBUG */
2242 if (urb->dev->tt->hub !=
2243 fusbh200_to_hcd(fusbh200)->self.root_hub) {
2244 if (usb_hub_clear_tt_buffer(urb) == 0)
2245 qh->clearing_tt = 1;
2246 }
2247 }
2248}
2249
2250static int qtd_copy_status (
2251 struct fusbh200_hcd *fusbh200,
2252 struct urb *urb,
2253 size_t length,
2254 u32 token
2255)
2256{
2257 int status = -EINPROGRESS;
2258
2259 /* count IN/OUT bytes, not SETUP (even short packets) */
2260 if (likely (QTD_PID (token) != 2))
2261 urb->actual_length += length - QTD_LENGTH (token);
2262
2263 /* don't modify error codes */
2264 if (unlikely(urb->unlinked))
2265 return status;
2266
2267 /* force cleanup after short read; not always an error */
2268 if (unlikely (IS_SHORT_READ (token)))
2269 status = -EREMOTEIO;
2270
2271 /* serious "can't proceed" faults reported by the hardware */
2272 if (token & QTD_STS_HALT) {
2273 if (token & QTD_STS_BABBLE) {
2274 /* FIXME "must" disable babbling device's port too */
2275 status = -EOVERFLOW;
2276 /* CERR nonzero + halt --> stall */
2277 } else if (QTD_CERR(token)) {
2278 status = -EPIPE;
2279
2280 /* In theory, more than one of the following bits can be set
2281 * since they are sticky and the transaction is retried.
2282 * Which to test first is rather arbitrary.
2283 */
2284 } else if (token & QTD_STS_MMF) {
2285 /* fs/ls interrupt xfer missed the complete-split */
2286 status = -EPROTO;
2287 } else if (token & QTD_STS_DBE) {
2288 status = (QTD_PID (token) == 1) /* IN ? */
2289 ? -ENOSR /* hc couldn't read data */
2290 : -ECOMM; /* hc couldn't write data */
2291 } else if (token & QTD_STS_XACT) {
2292 /* timeout, bad CRC, wrong PID, etc */
2293 fusbh200_dbg(fusbh200, "devpath %s ep%d%s 3strikes\n",
2294 urb->dev->devpath,
2295 usb_pipeendpoint(urb->pipe),
2296 usb_pipein(urb->pipe) ? "in" : "out");
2297 status = -EPROTO;
2298 } else { /* unknown */
2299 status = -EPROTO;
2300 }
2301
2302 fusbh200_vdbg (fusbh200,
2303 "dev%d ep%d%s qtd token %08x --> status %d\n",
2304 usb_pipedevice (urb->pipe),
2305 usb_pipeendpoint (urb->pipe),
2306 usb_pipein (urb->pipe) ? "in" : "out",
2307 token, status);
2308 }
2309
2310 return status;
2311}
2312
2313static void
2314fusbh200_urb_done(struct fusbh200_hcd *fusbh200, struct urb *urb, int status)
2315__releases(fusbh200->lock)
2316__acquires(fusbh200->lock)
2317{
2318 if (likely (urb->hcpriv != NULL)) {
2319 struct fusbh200_qh *qh = (struct fusbh200_qh *) urb->hcpriv;
2320
2321 /* S-mask in a QH means it's an interrupt urb */
2322 if ((qh->hw->hw_info2 & cpu_to_hc32(fusbh200, QH_SMASK)) != 0) {
2323
2324 /* ... update hc-wide periodic stats (for usbfs) */
2325 fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs--;
2326 }
2327 }
2328
2329 if (unlikely(urb->unlinked)) {
2330 COUNT(fusbh200->stats.unlink);
2331 } else {
2332 /* report non-error and short read status as zero */
2333 if (status == -EINPROGRESS || status == -EREMOTEIO)
2334 status = 0;
2335 COUNT(fusbh200->stats.complete);
2336 }
2337
2338#ifdef FUSBH200_URB_TRACE
2339 fusbh200_dbg (fusbh200,
2340 "%s %s urb %p ep%d%s status %d len %d/%d\n",
2341 __func__, urb->dev->devpath, urb,
2342 usb_pipeendpoint (urb->pipe),
2343 usb_pipein (urb->pipe) ? "in" : "out",
2344 status,
2345 urb->actual_length, urb->transfer_buffer_length);
2346#endif
2347
2348 /* complete() can reenter this HCD */
2349 usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
2350 spin_unlock (&fusbh200->lock);
2351 usb_hcd_giveback_urb(fusbh200_to_hcd(fusbh200), urb, status);
2352 spin_lock (&fusbh200->lock);
2353}
2354
2355static int qh_schedule (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
2356
2357/*
2358 * Process and free completed qtds for a qh, returning URBs to drivers.
2359 * Chases up to qh->hw_current. Returns number of completions called,
2360 * indicating how much "real" work we did.
2361 */
2362static unsigned
2363qh_completions (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
2364{
2365 struct fusbh200_qtd *last, *end = qh->dummy;
2366 struct list_head *entry, *tmp;
2367 int last_status;
2368 int stopped;
2369 unsigned count = 0;
2370 u8 state;
2371 struct fusbh200_qh_hw *hw = qh->hw;
2372
2373 if (unlikely (list_empty (&qh->qtd_list)))
2374 return count;
2375
2376 /* completions (or tasks on other cpus) must never clobber HALT
2377 * till we've gone through and cleaned everything up, even when
2378 * they add urbs to this qh's queue or mark them for unlinking.
2379 *
2380 * NOTE: unlinking expects to be done in queue order.
2381 *
2382 * It's a bug for qh->qh_state to be anything other than
2383 * QH_STATE_IDLE, unless our caller is scan_async() or
2384 * scan_intr().
2385 */
2386 state = qh->qh_state;
2387 qh->qh_state = QH_STATE_COMPLETING;
2388 stopped = (state == QH_STATE_IDLE);
2389
2390 rescan:
2391 last = NULL;
2392 last_status = -EINPROGRESS;
2393 qh->needs_rescan = 0;
2394
2395 /* remove de-activated QTDs from front of queue.
2396 * after faults (including short reads), cleanup this urb
2397 * then let the queue advance.
2398 * if queue is stopped, handles unlinks.
2399 */
2400 list_for_each_safe (entry, tmp, &qh->qtd_list) {
2401 struct fusbh200_qtd *qtd;
2402 struct urb *urb;
2403 u32 token = 0;
2404
2405 qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
2406 urb = qtd->urb;
2407
2408 /* clean up any state from previous QTD ...*/
2409 if (last) {
2410 if (likely (last->urb != urb)) {
2411 fusbh200_urb_done(fusbh200, last->urb, last_status);
2412 count++;
2413 last_status = -EINPROGRESS;
2414 }
2415 fusbh200_qtd_free (fusbh200, last);
2416 last = NULL;
2417 }
2418
2419 /* ignore urbs submitted during completions we reported */
2420 if (qtd == end)
2421 break;
2422
2423 /* hardware copies qtd out of qh overlay */
2424 rmb ();
2425 token = hc32_to_cpu(fusbh200, qtd->hw_token);
2426
2427 /* always clean up qtds the hc de-activated */
2428 retry_xacterr:
2429 if ((token & QTD_STS_ACTIVE) == 0) {
2430
2431 /* Report Data Buffer Error: non-fatal but useful */
2432 if (token & QTD_STS_DBE)
2433 fusbh200_dbg(fusbh200,
2434 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
2435 urb,
2436 usb_endpoint_num(&urb->ep->desc),
2437 usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
2438 urb->transfer_buffer_length,
2439 qtd,
2440 qh);
2441
2442 /* on STALL, error, and short reads this urb must
2443 * complete and all its qtds must be recycled.
2444 */
2445 if ((token & QTD_STS_HALT) != 0) {
2446
2447 /* retry transaction errors until we
2448 * reach the software xacterr limit
2449 */
2450 if ((token & QTD_STS_XACT) &&
2451 QTD_CERR(token) == 0 &&
2452 ++qh->xacterrs < QH_XACTERR_MAX &&
2453 !urb->unlinked) {
2454 fusbh200_dbg(fusbh200,
2455 "detected XactErr len %zu/%zu retry %d\n",
2456 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
2457
2458 /* reset the token in the qtd and the
2459 * qh overlay (which still contains
2460 * the qtd) so that we pick up from
2461 * where we left off
2462 */
2463 token &= ~QTD_STS_HALT;
2464 token |= QTD_STS_ACTIVE |
2465 (FUSBH200_TUNE_CERR << 10);
2466 qtd->hw_token = cpu_to_hc32(fusbh200,
2467 token);
2468 wmb();
2469 hw->hw_token = cpu_to_hc32(fusbh200,
2470 token);
2471 goto retry_xacterr;
2472 }
2473 stopped = 1;
2474
2475 /* magic dummy for some short reads; qh won't advance.
2476 * that silicon quirk can kick in with this dummy too.
2477 *
2478 * other short reads won't stop the queue, including
2479 * control transfers (status stage handles that) or
2480 * most other single-qtd reads ... the queue stops if
2481 * URB_SHORT_NOT_OK was set so the driver submitting
2482 * the urbs could clean it up.
2483 */
2484 } else if (IS_SHORT_READ (token)
2485 && !(qtd->hw_alt_next
2486 & FUSBH200_LIST_END(fusbh200))) {
2487 stopped = 1;
2488 }
2489
2490 /* stop scanning when we reach qtds the hc is using */
2491 } else if (likely (!stopped
2492 && fusbh200->rh_state >= FUSBH200_RH_RUNNING)) {
2493 break;
2494
2495 /* scan the whole queue for unlinks whenever it stops */
2496 } else {
2497 stopped = 1;
2498
2499 /* cancel everything if we halt, suspend, etc */
2500 if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
2501 last_status = -ESHUTDOWN;
2502
2503 /* this qtd is active; skip it unless a previous qtd
2504 * for its urb faulted, or its urb was canceled.
2505 */
2506 else if (last_status == -EINPROGRESS && !urb->unlinked)
2507 continue;
2508
2509 /* qh unlinked; token in overlay may be most current */
2510 if (state == QH_STATE_IDLE
2511 && cpu_to_hc32(fusbh200, qtd->qtd_dma)
2512 == hw->hw_current) {
2513 token = hc32_to_cpu(fusbh200, hw->hw_token);
2514
2515 /* An unlink may leave an incomplete
2516 * async transaction in the TT buffer.
2517 * We have to clear it.
2518 */
2519 fusbh200_clear_tt_buffer(fusbh200, qh, urb, token);
2520 }
2521 }
2522
2523 /* unless we already know the urb's status, collect qtd status
2524 * and update count of bytes transferred. in common short read
2525 * cases with only one data qtd (including control transfers),
2526 * queue processing won't halt. but with two or more qtds (for
2527 * example, with a 32 KB transfer), when the first qtd gets a
2528 * short read the second must be removed by hand.
2529 */
2530 if (last_status == -EINPROGRESS) {
2531 last_status = qtd_copy_status(fusbh200, urb,
2532 qtd->length, token);
2533 if (last_status == -EREMOTEIO
2534 && (qtd->hw_alt_next
2535 & FUSBH200_LIST_END(fusbh200)))
2536 last_status = -EINPROGRESS;
2537
2538 /* As part of low/full-speed endpoint-halt processing
2539 * we must clear the TT buffer (11.17.5).
2540 */
2541 if (unlikely(last_status != -EINPROGRESS &&
2542 last_status != -EREMOTEIO)) {
2543 /* The TT's in some hubs malfunction when they
2544 * receive this request following a STALL (they
2545 * stop sending isochronous packets). Since a
2546 * STALL can't leave the TT buffer in a busy
2547 * state (if you believe Figures 11-48 - 11-51
2548 * in the USB 2.0 spec), we won't clear the TT
2549 * buffer in this case. Strictly speaking this
2550 * is a violation of the spec.
2551 */
2552 if (last_status != -EPIPE)
2553 fusbh200_clear_tt_buffer(fusbh200, qh, urb,
2554 token);
2555 }
2556 }
2557
2558 /* if we're removing something not at the queue head,
2559 * patch the hardware queue pointer.
2560 */
2561 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
2562 last = list_entry (qtd->qtd_list.prev,
2563 struct fusbh200_qtd, qtd_list);
2564 last->hw_next = qtd->hw_next;
2565 }
2566
2567 /* remove qtd; it's recycled after possible urb completion */
2568 list_del (&qtd->qtd_list);
2569 last = qtd;
2570
2571 /* reinit the xacterr counter for the next qtd */
2572 qh->xacterrs = 0;
2573 }
2574
2575 /* last urb's completion might still need calling */
2576 if (likely (last != NULL)) {
2577 fusbh200_urb_done(fusbh200, last->urb, last_status);
2578 count++;
2579 fusbh200_qtd_free (fusbh200, last);
2580 }
2581
2582 /* Do we need to rescan for URBs dequeued during a giveback? */
2583 if (unlikely(qh->needs_rescan)) {
2584 /* If the QH is already unlinked, do the rescan now. */
2585 if (state == QH_STATE_IDLE)
2586 goto rescan;
2587
2588 /* Otherwise we have to wait until the QH is fully unlinked.
2589 * Our caller will start an unlink if qh->needs_rescan is
2590 * set. But if an unlink has already started, nothing needs
2591 * to be done.
2592 */
2593 if (state != QH_STATE_LINKED)
2594 qh->needs_rescan = 0;
2595 }
2596
2597 /* restore original state; caller must unlink or relink */
2598 qh->qh_state = state;
2599
2600 /* be sure the hardware's done with the qh before refreshing
2601 * it after fault cleanup, or recovering from silicon wrongly
2602 * overlaying the dummy qtd (which reduces DMA chatter).
2603 */
2604 if (stopped != 0 || hw->hw_qtd_next == FUSBH200_LIST_END(fusbh200)) {
2605 switch (state) {
2606 case QH_STATE_IDLE:
2607 qh_refresh(fusbh200, qh);
2608 break;
2609 case QH_STATE_LINKED:
2610 /* We won't refresh a QH that's linked (after the HC
2611 * stopped the queue). That avoids a race:
2612 * - HC reads first part of QH;
2613 * - CPU updates that first part and the token;
2614 * - HC reads rest of that QH, including token
2615 * Result: HC gets an inconsistent image, and then
2616 * DMAs to/from the wrong memory (corrupting it).
2617 *
2618 * That should be rare for interrupt transfers,
2619 * except maybe high bandwidth ...
2620 */
2621
2622 /* Tell the caller to start an unlink */
2623 qh->needs_rescan = 1;
2624 break;
2625 /* otherwise, unlink already started */
2626 }
2627 }
2628
2629 return count;
2630}
2631
2632/*-------------------------------------------------------------------------*/
2633
2634// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
2635#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
2636// ... and packet size, for any kind of endpoint descriptor
2637#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
2638
2639/*
2640 * reverse of qh_urb_transaction: free a list of TDs.
2641 * used for cleanup after errors, before HC sees an URB's TDs.
2642 */
2643static void qtd_list_free (
2644 struct fusbh200_hcd *fusbh200,
2645 struct urb *urb,
2646 struct list_head *qtd_list
2647) {
2648 struct list_head *entry, *temp;
2649
2650 list_for_each_safe (entry, temp, qtd_list) {
2651 struct fusbh200_qtd *qtd;
2652
2653 qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
2654 list_del (&qtd->qtd_list);
2655 fusbh200_qtd_free (fusbh200, qtd);
2656 }
2657}
2658
2659/*
2660 * create a list of filled qtds for this URB; won't link into qh.
2661 */
2662static struct list_head *
2663qh_urb_transaction (
2664 struct fusbh200_hcd *fusbh200,
2665 struct urb *urb,
2666 struct list_head *head,
2667 gfp_t flags
2668) {
2669 struct fusbh200_qtd *qtd, *qtd_prev;
2670 dma_addr_t buf;
2671 int len, this_sg_len, maxpacket;
2672 int is_input;
2673 u32 token;
2674 int i;
2675 struct scatterlist *sg;
2676
2677 /*
2678 * URBs map to sequences of QTDs: one logical transaction
2679 */
2680 qtd = fusbh200_qtd_alloc (fusbh200, flags);
2681 if (unlikely (!qtd))
2682 return NULL;
2683 list_add_tail (&qtd->qtd_list, head);
2684 qtd->urb = urb;
2685
2686 token = QTD_STS_ACTIVE;
2687 token |= (FUSBH200_TUNE_CERR << 10);
2688 /* for split transactions, SplitXState initialized to zero */
2689
2690 len = urb->transfer_buffer_length;
2691 is_input = usb_pipein (urb->pipe);
2692 if (usb_pipecontrol (urb->pipe)) {
2693 /* SETUP pid */
2694 qtd_fill(fusbh200, qtd, urb->setup_dma,
2695 sizeof (struct usb_ctrlrequest),
2696 token | (2 /* "setup" */ << 8), 8);
2697
2698 /* ... and always at least one more pid */
2699 token ^= QTD_TOGGLE;
2700 qtd_prev = qtd;
2701 qtd = fusbh200_qtd_alloc (fusbh200, flags);
2702 if (unlikely (!qtd))
2703 goto cleanup;
2704 qtd->urb = urb;
2705 qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
2706 list_add_tail (&qtd->qtd_list, head);
2707
2708 /* for zero length DATA stages, STATUS is always IN */
2709 if (len == 0)
2710 token |= (1 /* "in" */ << 8);
2711 }
2712
2713 /*
2714 * data transfer stage: buffer setup
2715 */
2716 i = urb->num_mapped_sgs;
2717 if (len > 0 && i > 0) {
2718 sg = urb->sg;
2719 buf = sg_dma_address(sg);
2720
2721 /* urb->transfer_buffer_length may be smaller than the
2722 * size of the scatterlist (or vice versa)
2723 */
2724 this_sg_len = min_t(int, sg_dma_len(sg), len);
2725 } else {
2726 sg = NULL;
2727 buf = urb->transfer_dma;
2728 this_sg_len = len;
2729 }
2730
2731 if (is_input)
2732 token |= (1 /* "in" */ << 8);
2733 /* else it's already initted to "out" pid (0 << 8) */
2734
2735 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
2736
2737 /*
2738 * buffer gets wrapped in one or more qtds;
2739 * last one may be "short" (including zero len)
2740 * and may serve as a control status ack
2741 */
2742 for (;;) {
2743 int this_qtd_len;
2744
2745 this_qtd_len = qtd_fill(fusbh200, qtd, buf, this_sg_len, token,
2746 maxpacket);
2747 this_sg_len -= this_qtd_len;
2748 len -= this_qtd_len;
2749 buf += this_qtd_len;
2750
2751 /*
2752 * short reads advance to a "magic" dummy instead of the next
2753 * qtd ... that forces the queue to stop, for manual cleanup.
2754 * (this will usually be overridden later.)
2755 */
2756 if (is_input)
2757 qtd->hw_alt_next = fusbh200->async->hw->hw_alt_next;
2758
2759 /* qh makes control packets use qtd toggle; maybe switch it */
2760 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
2761 token ^= QTD_TOGGLE;
2762
2763 if (likely(this_sg_len <= 0)) {
2764 if (--i <= 0 || len <= 0)
2765 break;
2766 sg = sg_next(sg);
2767 buf = sg_dma_address(sg);
2768 this_sg_len = min_t(int, sg_dma_len(sg), len);
2769 }
2770
2771 qtd_prev = qtd;
2772 qtd = fusbh200_qtd_alloc (fusbh200, flags);
2773 if (unlikely (!qtd))
2774 goto cleanup;
2775 qtd->urb = urb;
2776 qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
2777 list_add_tail (&qtd->qtd_list, head);
2778 }
2779
2780 /*
2781 * unless the caller requires manual cleanup after short reads,
2782 * have the alt_next mechanism keep the queue running after the
2783 * last data qtd (the only one, for control and most other cases).
2784 */
2785 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
2786 || usb_pipecontrol (urb->pipe)))
2787 qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
2788
2789 /*
2790 * control requests may need a terminating data "status" ack;
2791 * other OUT ones may need a terminating short packet
2792 * (zero length).
2793 */
2794 if (likely (urb->transfer_buffer_length != 0)) {
2795 int one_more = 0;
2796
2797 if (usb_pipecontrol (urb->pipe)) {
2798 one_more = 1;
2799 token ^= 0x0100; /* "in" <--> "out" */
2800 token |= QTD_TOGGLE; /* force DATA1 */
2801 } else if (usb_pipeout(urb->pipe)
2802 && (urb->transfer_flags & URB_ZERO_PACKET)
2803 && !(urb->transfer_buffer_length % maxpacket)) {
2804 one_more = 1;
2805 }
2806 if (one_more) {
2807 qtd_prev = qtd;
2808 qtd = fusbh200_qtd_alloc (fusbh200, flags);
2809 if (unlikely (!qtd))
2810 goto cleanup;
2811 qtd->urb = urb;
2812 qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
2813 list_add_tail (&qtd->qtd_list, head);
2814
2815 /* never any data in such packets */
2816 qtd_fill(fusbh200, qtd, 0, 0, token, 0);
2817 }
2818 }
2819
2820 /* by default, enable interrupt on urb completion */
2821 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
2822 qtd->hw_token |= cpu_to_hc32(fusbh200, QTD_IOC);
2823 return head;
2824
2825cleanup:
2826 qtd_list_free (fusbh200, urb, head);
2827 return NULL;
2828}
2829
2830/*-------------------------------------------------------------------------*/
2831
2832// Would be best to create all qh's from config descriptors,
2833// when each interface/altsetting is established. Unlink
2834// any previous qh and cancel its urbs first; endpoints are
2835// implicitly reset then (data toggle too).
2836// That'd mean updating how usbcore talks to HCDs. (2.7?)
2837
2838
2839/*
2840 * Each QH holds a qtd list; a QH is used for everything except iso.
2841 *
2842 * For interrupt urbs, the scheduler must set the microframe scheduling
2843 * mask(s) each time the QH gets scheduled. For highspeed, that's
2844 * just one microframe in the s-mask. For split interrupt transactions
2845 * there are additional complications: c-mask, maybe FSTNs.
2846 */
2847static struct fusbh200_qh *
2848qh_make (
2849 struct fusbh200_hcd *fusbh200,
2850 struct urb *urb,
2851 gfp_t flags
2852) {
2853 struct fusbh200_qh *qh = fusbh200_qh_alloc (fusbh200, flags);
2854 u32 info1 = 0, info2 = 0;
2855 int is_input, type;
2856 int maxp = 0;
2857 struct usb_tt *tt = urb->dev->tt;
2858 struct fusbh200_qh_hw *hw;
2859
2860 if (!qh)
2861 return qh;
2862
2863 /*
2864 * init endpoint/device data for this QH
2865 */
2866 info1 |= usb_pipeendpoint (urb->pipe) << 8;
2867 info1 |= usb_pipedevice (urb->pipe) << 0;
2868
2869 is_input = usb_pipein (urb->pipe);
2870 type = usb_pipetype (urb->pipe);
2871 maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
2872
2873 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
2874 * acts like up to 3KB, but is built from smaller packets.
2875 */
2876 if (max_packet(maxp) > 1024) {
2877 fusbh200_dbg(fusbh200, "bogus qh maxpacket %d\n", max_packet(maxp));
2878 goto done;
2879 }
2880
2881 /* Compute interrupt scheduling parameters just once, and save.
2882 * - allowing for high bandwidth, how many nsec/uframe are used?
2883 * - split transactions need a second CSPLIT uframe; same question
2884 * - splits also need a schedule gap (for full/low speed I/O)
2885 * - qh has a polling interval
2886 *
2887 * For control/bulk requests, the HC or TT handles these.
2888 */
2889 if (type == PIPE_INTERRUPT) {
2890 qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
2891 is_input, 0,
2892 hb_mult(maxp) * max_packet(maxp)));
2893 qh->start = NO_FRAME;
2894
2895 if (urb->dev->speed == USB_SPEED_HIGH) {
2896 qh->c_usecs = 0;
2897 qh->gap_uf = 0;
2898
2899 qh->period = urb->interval >> 3;
2900 if (qh->period == 0 && urb->interval != 1) {
2901 /* NOTE interval 2 or 4 uframes could work.
2902 * But interval 1 scheduling is simpler, and
2903 * includes high bandwidth.
2904 */
2905 urb->interval = 1;
2906 } else if (qh->period > fusbh200->periodic_size) {
2907 qh->period = fusbh200->periodic_size;
2908 urb->interval = qh->period << 3;
2909 }
2910 } else {
2911 int think_time;
2912
2913 /* gap is f(FS/LS transfer times) */
2914 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
2915 is_input, 0, maxp) / (125 * 1000);
2916
2917 /* FIXME this just approximates SPLIT/CSPLIT times */
2918 if (is_input) { // SPLIT, gap, CSPLIT+DATA
2919 qh->c_usecs = qh->usecs + HS_USECS (0);
2920 qh->usecs = HS_USECS (1);
2921 } else { // SPLIT+DATA, gap, CSPLIT
2922 qh->usecs += HS_USECS (1);
2923 qh->c_usecs = HS_USECS (0);
2924 }
2925
2926 think_time = tt ? tt->think_time : 0;
2927 qh->tt_usecs = NS_TO_US (think_time +
2928 usb_calc_bus_time (urb->dev->speed,
2929 is_input, 0, max_packet (maxp)));
2930 qh->period = urb->interval;
2931 if (qh->period > fusbh200->periodic_size) {
2932 qh->period = fusbh200->periodic_size;
2933 urb->interval = qh->period;
2934 }
2935 }
2936 }
2937
2938 /* support for tt scheduling, and access to toggles */
2939 qh->dev = urb->dev;
2940
2941 /* using TT? */
2942 switch (urb->dev->speed) {
2943 case USB_SPEED_LOW:
2944 info1 |= QH_LOW_SPEED;
2945 /* FALL THROUGH */
2946
2947 case USB_SPEED_FULL:
2948 /* EPS 0 means "full" */
2949 if (type != PIPE_INTERRUPT)
2950 info1 |= (FUSBH200_TUNE_RL_TT << 28);
2951 if (type == PIPE_CONTROL) {
2952 info1 |= QH_CONTROL_EP; /* for TT */
2953 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
2954 }
2955 info1 |= maxp << 16;
2956
2957 info2 |= (FUSBH200_TUNE_MULT_TT << 30);
2958
2959 /* Some Freescale processors have an erratum in which the
2960 * port number in the queue head was 0..N-1 instead of 1..N.
2961 */
2962 if (fusbh200_has_fsl_portno_bug(fusbh200))
2963 info2 |= (urb->dev->ttport-1) << 23;
2964 else
2965 info2 |= urb->dev->ttport << 23;
2966
2967 /* set the address of the TT; for TDI's integrated
2968 * root hub tt, leave it zeroed.
2969 */
2970 if (tt && tt->hub != fusbh200_to_hcd(fusbh200)->self.root_hub)
2971 info2 |= tt->hub->devnum << 16;
2972
2973 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
2974
2975 break;
2976
2977 case USB_SPEED_HIGH: /* no TT involved */
2978 info1 |= QH_HIGH_SPEED;
2979 if (type == PIPE_CONTROL) {
2980 info1 |= (FUSBH200_TUNE_RL_HS << 28);
2981 info1 |= 64 << 16; /* usb2 fixed maxpacket */
2982 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
2983 info2 |= (FUSBH200_TUNE_MULT_HS << 30);
2984 } else if (type == PIPE_BULK) {
2985 info1 |= (FUSBH200_TUNE_RL_HS << 28);
2986 /* The USB spec says that high speed bulk endpoints
2987 * always use 512 byte maxpacket. But some device
2988 * vendors decided to ignore that, and MSFT is happy
2989 * to help them do so. So now people expect to use
2990 * such nonconformant devices with Linux too; sigh.
2991 */
2992 info1 |= max_packet(maxp) << 16;
2993 info2 |= (FUSBH200_TUNE_MULT_HS << 30);
2994 } else { /* PIPE_INTERRUPT */
2995 info1 |= max_packet (maxp) << 16;
2996 info2 |= hb_mult (maxp) << 30;
2997 }
2998 break;
2999 default:
3000 fusbh200_dbg(fusbh200, "bogus dev %p speed %d\n", urb->dev,
3001 urb->dev->speed);
3002done:
3003 qh_destroy(fusbh200, qh);
3004 return NULL;
3005 }
3006
3007 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
3008
3009 /* init as live, toggle clear, advance to dummy */
3010 qh->qh_state = QH_STATE_IDLE;
3011 hw = qh->hw;
3012 hw->hw_info1 = cpu_to_hc32(fusbh200, info1);
3013 hw->hw_info2 = cpu_to_hc32(fusbh200, info2);
3014 qh->is_out = !is_input;
3015 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
3016 qh_refresh (fusbh200, qh);
3017 return qh;
3018}
3019
3020/*-------------------------------------------------------------------------*/
3021
3022static void enable_async(struct fusbh200_hcd *fusbh200)
3023{
3024 if (fusbh200->async_count++)
3025 return;
3026
3027 /* Stop waiting to turn off the async schedule */
3028 fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_ASYNC);
3029
3030 /* Don't start the schedule until ASS is 0 */
3031 fusbh200_poll_ASS(fusbh200);
3032 turn_on_io_watchdog(fusbh200);
3033}
3034
3035static void disable_async(struct fusbh200_hcd *fusbh200)
3036{
3037 if (--fusbh200->async_count)
3038 return;
3039
3040 /* The async schedule and async_unlink list are supposed to be empty */
3041 WARN_ON(fusbh200->async->qh_next.qh || fusbh200->async_unlink);
3042
3043 /* Don't turn off the schedule until ASS is 1 */
3044 fusbh200_poll_ASS(fusbh200);
3045}
3046
3047/* move qh (and its qtds) onto async queue; maybe enable queue. */
3048
3049static void qh_link_async (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
3050{
3051 __hc32 dma = QH_NEXT(fusbh200, qh->qh_dma);
3052 struct fusbh200_qh *head;
3053
3054 /* Don't link a QH if there's a Clear-TT-Buffer pending */
3055 if (unlikely(qh->clearing_tt))
3056 return;
3057
3058 WARN_ON(qh->qh_state != QH_STATE_IDLE);
3059
3060 /* clear halt and/or toggle; and maybe recover from silicon quirk */
3061 qh_refresh(fusbh200, qh);
3062
3063 /* splice right after start */
3064 head = fusbh200->async;
3065 qh->qh_next = head->qh_next;
3066 qh->hw->hw_next = head->hw->hw_next;
3067 wmb ();
3068
3069 head->qh_next.qh = qh;
3070 head->hw->hw_next = dma;
3071
3072 qh->xacterrs = 0;
3073 qh->qh_state = QH_STATE_LINKED;
3074 /* qtd completions reported later by interrupt */
3075
3076 enable_async(fusbh200);
3077}
3078
3079/*-------------------------------------------------------------------------*/
3080
3081/*
3082 * For control/bulk/interrupt, return QH with these TDs appended.
3083 * Allocates and initializes the QH if necessary.
3084 * Returns null if it can't allocate a QH it needs to.
3085 * If the QH has TDs (urbs) already, that's great.
3086 */
3087static struct fusbh200_qh *qh_append_tds (
3088 struct fusbh200_hcd *fusbh200,
3089 struct urb *urb,
3090 struct list_head *qtd_list,
3091 int epnum,
3092 void **ptr
3093)
3094{
3095 struct fusbh200_qh *qh = NULL;
3096 __hc32 qh_addr_mask = cpu_to_hc32(fusbh200, 0x7f);
3097
3098 qh = (struct fusbh200_qh *) *ptr;
3099 if (unlikely (qh == NULL)) {
3100 /* can't sleep here, we have fusbh200->lock... */
3101 qh = qh_make (fusbh200, urb, GFP_ATOMIC);
3102 *ptr = qh;
3103 }
3104 if (likely (qh != NULL)) {
3105 struct fusbh200_qtd *qtd;
3106
3107 if (unlikely (list_empty (qtd_list)))
3108 qtd = NULL;
3109 else
3110 qtd = list_entry (qtd_list->next, struct fusbh200_qtd,
3111 qtd_list);
3112
3113 /* control qh may need patching ... */
3114 if (unlikely (epnum == 0)) {
3115
3116 /* usb_reset_device() briefly reverts to address 0 */
3117 if (usb_pipedevice (urb->pipe) == 0)
3118 qh->hw->hw_info1 &= ~qh_addr_mask;
3119 }
3120
3121 /* just one way to queue requests: swap with the dummy qtd.
3122 * only hc or qh_refresh() ever modify the overlay.
3123 */
3124 if (likely (qtd != NULL)) {
3125 struct fusbh200_qtd *dummy;
3126 dma_addr_t dma;
3127 __hc32 token;
3128
3129 /* to avoid racing the HC, use the dummy td instead of
3130 * the first td of our list (becomes new dummy). both
3131 * tds stay deactivated until we're done, when the
3132 * HC is allowed to fetch the old dummy (4.10.2).
3133 */
3134 token = qtd->hw_token;
3135 qtd->hw_token = HALT_BIT(fusbh200);
3136
3137 dummy = qh->dummy;
3138
3139 dma = dummy->qtd_dma;
3140 *dummy = *qtd;
3141 dummy->qtd_dma = dma;
3142
3143 list_del (&qtd->qtd_list);
3144 list_add (&dummy->qtd_list, qtd_list);
3145 list_splice_tail(qtd_list, &qh->qtd_list);
3146
3147 fusbh200_qtd_init(fusbh200, qtd, qtd->qtd_dma);
3148 qh->dummy = qtd;
3149
3150 /* hc must see the new dummy at list end */
3151 dma = qtd->qtd_dma;
3152 qtd = list_entry (qh->qtd_list.prev,
3153 struct fusbh200_qtd, qtd_list);
3154 qtd->hw_next = QTD_NEXT(fusbh200, dma);
3155
3156 /* let the hc process these next qtds */
3157 wmb ();
3158 dummy->hw_token = token;
3159
3160 urb->hcpriv = qh;
3161 }
3162 }
3163 return qh;
3164}
3165
3166/*-------------------------------------------------------------------------*/
3167
3168static int
3169submit_async (
3170 struct fusbh200_hcd *fusbh200,
3171 struct urb *urb,
3172 struct list_head *qtd_list,
3173 gfp_t mem_flags
3174) {
3175 int epnum;
3176 unsigned long flags;
3177 struct fusbh200_qh *qh = NULL;
3178 int rc;
3179
3180 epnum = urb->ep->desc.bEndpointAddress;
3181
3182#ifdef FUSBH200_URB_TRACE
3183 {
3184 struct fusbh200_qtd *qtd;
3185 qtd = list_entry(qtd_list->next, struct fusbh200_qtd, qtd_list);
3186 fusbh200_dbg(fusbh200,
3187 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
3188 __func__, urb->dev->devpath, urb,
3189 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
3190 urb->transfer_buffer_length,
3191 qtd, urb->ep->hcpriv);
3192 }
3193#endif
3194
3195 spin_lock_irqsave (&fusbh200->lock, flags);
3196 if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
3197 rc = -ESHUTDOWN;
3198 goto done;
3199 }
3200 rc = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
3201 if (unlikely(rc))
3202 goto done;
3203
3204 qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
3205 if (unlikely(qh == NULL)) {
3206 usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
3207 rc = -ENOMEM;
3208 goto done;
3209 }
3210
3211 /* Control/bulk operations through TTs don't need scheduling,
3212 * the HC and TT handle it when the TT has a buffer ready.
3213 */
3214 if (likely (qh->qh_state == QH_STATE_IDLE))
3215 qh_link_async(fusbh200, qh);
3216 done:
3217 spin_unlock_irqrestore (&fusbh200->lock, flags);
3218 if (unlikely (qh == NULL))
3219 qtd_list_free (fusbh200, urb, qtd_list);
3220 return rc;
3221}
3222
3223/*-------------------------------------------------------------------------*/
3224
3225static void single_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
3226{
3227 struct fusbh200_qh *prev;
3228
3229 /* Add to the end of the list of QHs waiting for the next IAAD */
3230 qh->qh_state = QH_STATE_UNLINK;
3231 if (fusbh200->async_unlink)
3232 fusbh200->async_unlink_last->unlink_next = qh;
3233 else
3234 fusbh200->async_unlink = qh;
3235 fusbh200->async_unlink_last = qh;
3236
3237 /* Unlink it from the schedule */
3238 prev = fusbh200->async;
3239 while (prev->qh_next.qh != qh)
3240 prev = prev->qh_next.qh;
3241
3242 prev->hw->hw_next = qh->hw->hw_next;
3243 prev->qh_next = qh->qh_next;
3244 if (fusbh200->qh_scan_next == qh)
3245 fusbh200->qh_scan_next = qh->qh_next.qh;
3246}
3247
3248static void start_iaa_cycle(struct fusbh200_hcd *fusbh200, bool nested)
3249{
3250 /*
3251 * Do nothing if an IAA cycle is already running or
3252 * if one will be started shortly.
3253 */
3254 if (fusbh200->async_iaa || fusbh200->async_unlinking)
3255 return;
3256
3257 /* Do all the waiting QHs at once */
3258 fusbh200->async_iaa = fusbh200->async_unlink;
3259 fusbh200->async_unlink = NULL;
3260
3261 /* If the controller isn't running, we don't have to wait for it */
3262 if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING)) {
3263 if (!nested) /* Avoid recursion */
3264 end_unlink_async(fusbh200);
3265
3266 /* Otherwise start a new IAA cycle */
3267 } else if (likely(fusbh200->rh_state == FUSBH200_RH_RUNNING)) {
3268 /* Make sure the unlinks are all visible to the hardware */
3269 wmb();
3270
3271 fusbh200_writel(fusbh200, fusbh200->command | CMD_IAAD,
3272 &fusbh200->regs->command);
3273 fusbh200_readl(fusbh200, &fusbh200->regs->command);
3274 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IAA_WATCHDOG, true);
3275 }
3276}
3277
3278/* the async qh for the qtds being unlinked are now gone from the HC */
3279
3280static void end_unlink_async(struct fusbh200_hcd *fusbh200)
3281{
3282 struct fusbh200_qh *qh;
3283
3284 /* Process the idle QHs */
3285 restart:
3286 fusbh200->async_unlinking = true;
3287 while (fusbh200->async_iaa) {
3288 qh = fusbh200->async_iaa;
3289 fusbh200->async_iaa = qh->unlink_next;
3290 qh->unlink_next = NULL;
3291
3292 qh->qh_state = QH_STATE_IDLE;
3293 qh->qh_next.qh = NULL;
3294
3295 qh_completions(fusbh200, qh);
3296 if (!list_empty(&qh->qtd_list) &&
3297 fusbh200->rh_state == FUSBH200_RH_RUNNING)
3298 qh_link_async(fusbh200, qh);
3299 disable_async(fusbh200);
3300 }
3301 fusbh200->async_unlinking = false;
3302
3303 /* Start a new IAA cycle if any QHs are waiting for it */
3304 if (fusbh200->async_unlink) {
3305 start_iaa_cycle(fusbh200, true);
3306 if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING))
3307 goto restart;
3308 }
3309}
3310
3311static void unlink_empty_async(struct fusbh200_hcd *fusbh200)
3312{
3313 struct fusbh200_qh *qh, *next;
3314 bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
3315 bool check_unlinks_later = false;
3316
3317 /* Unlink all the async QHs that have been empty for a timer cycle */
3318 next = fusbh200->async->qh_next.qh;
3319 while (next) {
3320 qh = next;
3321 next = qh->qh_next.qh;
3322
3323 if (list_empty(&qh->qtd_list) &&
3324 qh->qh_state == QH_STATE_LINKED) {
3325 if (!stopped && qh->unlink_cycle ==
3326 fusbh200->async_unlink_cycle)
3327 check_unlinks_later = true;
3328 else
3329 single_unlink_async(fusbh200, qh);
3330 }
3331 }
3332
3333 /* Start a new IAA cycle if any QHs are waiting for it */
3334 if (fusbh200->async_unlink)
3335 start_iaa_cycle(fusbh200, false);
3336
3337 /* QHs that haven't been empty for long enough will be handled later */
3338 if (check_unlinks_later) {
3339 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
3340 ++fusbh200->async_unlink_cycle;
3341 }
3342}
3343
3344/* makes sure the async qh will become idle */
3345/* caller must own fusbh200->lock */
3346
3347static void start_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
3348{
3349 /*
3350 * If the QH isn't linked then there's nothing we can do
3351 * unless we were called during a giveback, in which case
3352 * qh_completions() has to deal with it.
3353 */
3354 if (qh->qh_state != QH_STATE_LINKED) {
3355 if (qh->qh_state == QH_STATE_COMPLETING)
3356 qh->needs_rescan = 1;
3357 return;
3358 }
3359
3360 single_unlink_async(fusbh200, qh);
3361 start_iaa_cycle(fusbh200, false);
3362}
3363
3364/*-------------------------------------------------------------------------*/
3365
3366static void scan_async (struct fusbh200_hcd *fusbh200)
3367{
3368 struct fusbh200_qh *qh;
3369 bool check_unlinks_later = false;
3370
3371 fusbh200->qh_scan_next = fusbh200->async->qh_next.qh;
3372 while (fusbh200->qh_scan_next) {
3373 qh = fusbh200->qh_scan_next;
3374 fusbh200->qh_scan_next = qh->qh_next.qh;
3375 rescan:
3376 /* clean any finished work for this qh */
3377 if (!list_empty(&qh->qtd_list)) {
3378 int temp;
3379
3380 /*
3381 * Unlinks could happen here; completion reporting
3382 * drops the lock. That's why fusbh200->qh_scan_next
3383 * always holds the next qh to scan; if the next qh
3384 * gets unlinked then fusbh200->qh_scan_next is adjusted
3385 * in single_unlink_async().
3386 */
3387 temp = qh_completions(fusbh200, qh);
3388 if (qh->needs_rescan) {
3389 start_unlink_async(fusbh200, qh);
3390 } else if (list_empty(&qh->qtd_list)
3391 && qh->qh_state == QH_STATE_LINKED) {
3392 qh->unlink_cycle = fusbh200->async_unlink_cycle;
3393 check_unlinks_later = true;
3394 } else if (temp != 0)
3395 goto rescan;
3396 }
3397 }
3398
3399 /*
3400 * Unlink empty entries, reducing DMA usage as well
3401 * as HCD schedule-scanning costs. Delay for any qh
3402 * we just scanned, there's a not-unusual case that it
3403 * doesn't stay idle for long.
3404 */
3405 if (check_unlinks_later && fusbh200->rh_state == FUSBH200_RH_RUNNING &&
3406 !(fusbh200->enabled_hrtimer_events &
3407 BIT(FUSBH200_HRTIMER_ASYNC_UNLINKS))) {
3408 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
3409 ++fusbh200->async_unlink_cycle;
3410 }
3411}
3412/*-------------------------------------------------------------------------*/
3413/*
3414 * EHCI scheduled transaction support: interrupt, iso, split iso
3415 * These are called "periodic" transactions in the EHCI spec.
3416 *
3417 * Note that for interrupt transfers, the QH/QTD manipulation is shared
3418 * with the "asynchronous" transaction support (control/bulk transfers).
3419 * The only real difference is in how interrupt transfers are scheduled.
3420 *
3421 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
3422 * It keeps track of every ITD (or SITD) that's linked, and holds enough
3423 * pre-calculated schedule data to make appending to the queue be quick.
3424 */
3425
3426static int fusbh200_get_frame (struct usb_hcd *hcd);
3427
3428/*-------------------------------------------------------------------------*/
3429
3430/*
3431 * periodic_next_shadow - return "next" pointer on shadow list
3432 * @periodic: host pointer to qh/itd
3433 * @tag: hardware tag for type of this record
3434 */
3435static union fusbh200_shadow *
3436periodic_next_shadow(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
3437 __hc32 tag)
3438{
3439 switch (hc32_to_cpu(fusbh200, tag)) {
3440 case Q_TYPE_QH:
3441 return &periodic->qh->qh_next;
3442 case Q_TYPE_FSTN:
3443 return &periodic->fstn->fstn_next;
3444 default:
3445 return &periodic->itd->itd_next;
3446 }
3447}
3448
3449static __hc32 *
3450shadow_next_periodic(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
3451 __hc32 tag)
3452{
3453 switch (hc32_to_cpu(fusbh200, tag)) {
3454 /* our fusbh200_shadow.qh is actually software part */
3455 case Q_TYPE_QH:
3456 return &periodic->qh->hw->hw_next;
3457 /* others are hw parts */
3458 default:
3459 return periodic->hw_next;
3460 }
3461}
3462
3463/* caller must hold fusbh200->lock */
3464static void periodic_unlink (struct fusbh200_hcd *fusbh200, unsigned frame, void *ptr)
3465{
3466 union fusbh200_shadow *prev_p = &fusbh200->pshadow[frame];
3467 __hc32 *hw_p = &fusbh200->periodic[frame];
3468 union fusbh200_shadow here = *prev_p;
3469
3470 /* find predecessor of "ptr"; hw and shadow lists are in sync */
3471 while (here.ptr && here.ptr != ptr) {
3472 prev_p = periodic_next_shadow(fusbh200, prev_p,
3473 Q_NEXT_TYPE(fusbh200, *hw_p));
3474 hw_p = shadow_next_periodic(fusbh200, &here,
3475 Q_NEXT_TYPE(fusbh200, *hw_p));
3476 here = *prev_p;
3477 }
3478 /* an interrupt entry (at list end) could have been shared */
3479 if (!here.ptr)
3480 return;
3481
3482 /* update shadow and hardware lists ... the old "next" pointers
3483 * from ptr may still be in use, the caller updates them.
3484 */
3485 *prev_p = *periodic_next_shadow(fusbh200, &here,
3486 Q_NEXT_TYPE(fusbh200, *hw_p));
3487
3488 *hw_p = *shadow_next_periodic(fusbh200, &here,
3489 Q_NEXT_TYPE(fusbh200, *hw_p));
3490}
3491
3492/* how many of the uframe's 125 usecs are allocated? */
3493static unsigned short
3494periodic_usecs (struct fusbh200_hcd *fusbh200, unsigned frame, unsigned uframe)
3495{
3496 __hc32 *hw_p = &fusbh200->periodic [frame];
3497 union fusbh200_shadow *q = &fusbh200->pshadow [frame];
3498 unsigned usecs = 0;
3499 struct fusbh200_qh_hw *hw;
3500
3501 while (q->ptr) {
3502 switch (hc32_to_cpu(fusbh200, Q_NEXT_TYPE(fusbh200, *hw_p))) {
3503 case Q_TYPE_QH:
3504 hw = q->qh->hw;
3505 /* is it in the S-mask? */
3506 if (hw->hw_info2 & cpu_to_hc32(fusbh200, 1 << uframe))
3507 usecs += q->qh->usecs;
3508 /* ... or C-mask? */
3509 if (hw->hw_info2 & cpu_to_hc32(fusbh200,
3510 1 << (8 + uframe)))
3511 usecs += q->qh->c_usecs;
3512 hw_p = &hw->hw_next;
3513 q = &q->qh->qh_next;
3514 break;
3515 // case Q_TYPE_FSTN:
3516 default:
3517 /* for "save place" FSTNs, count the relevant INTR
3518 * bandwidth from the previous frame
3519 */
3520 if (q->fstn->hw_prev != FUSBH200_LIST_END(fusbh200)) {
3521 fusbh200_dbg (fusbh200, "ignoring FSTN cost ...\n");
3522 }
3523 hw_p = &q->fstn->hw_next;
3524 q = &q->fstn->fstn_next;
3525 break;
3526 case Q_TYPE_ITD:
3527 if (q->itd->hw_transaction[uframe])
3528 usecs += q->itd->stream->usecs;
3529 hw_p = &q->itd->hw_next;
3530 q = &q->itd->itd_next;
3531 break;
3532 }
3533 }
3534#ifdef DEBUG
3535 if (usecs > fusbh200->uframe_periodic_max)
3536 fusbh200_err (fusbh200, "uframe %d sched overrun: %d usecs\n",
3537 frame * 8 + uframe, usecs);
3538#endif
3539 return usecs;
3540}
3541
3542/*-------------------------------------------------------------------------*/
3543
3544static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
3545{
3546 if (!dev1->tt || !dev2->tt)
3547 return 0;
3548 if (dev1->tt != dev2->tt)
3549 return 0;
3550 if (dev1->tt->multi)
3551 return dev1->ttport == dev2->ttport;
3552 else
3553 return 1;
3554}
3555
3556/* return true iff the device's transaction translator is available
3557 * for a periodic transfer starting at the specified frame, using
3558 * all the uframes in the mask.
3559 */
3560static int tt_no_collision (
3561 struct fusbh200_hcd *fusbh200,
3562 unsigned period,
3563 struct usb_device *dev,
3564 unsigned frame,
3565 u32 uf_mask
3566)
3567{
3568 if (period == 0) /* error */
3569 return 0;
3570
3571 /* note bandwidth wastage: split never follows csplit
3572 * (different dev or endpoint) until the next uframe.
3573 * calling convention doesn't make that distinction.
3574 */
3575 for (; frame < fusbh200->periodic_size; frame += period) {
3576 union fusbh200_shadow here;
3577 __hc32 type;
3578 struct fusbh200_qh_hw *hw;
3579
3580 here = fusbh200->pshadow [frame];
3581 type = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [frame]);
3582 while (here.ptr) {
3583 switch (hc32_to_cpu(fusbh200, type)) {
3584 case Q_TYPE_ITD:
3585 type = Q_NEXT_TYPE(fusbh200, here.itd->hw_next);
3586 here = here.itd->itd_next;
3587 continue;
3588 case Q_TYPE_QH:
3589 hw = here.qh->hw;
3590 if (same_tt (dev, here.qh->dev)) {
3591 u32 mask;
3592
3593 mask = hc32_to_cpu(fusbh200,
3594 hw->hw_info2);
3595 /* "knows" no gap is needed */
3596 mask |= mask >> 8;
3597 if (mask & uf_mask)
3598 break;
3599 }
3600 type = Q_NEXT_TYPE(fusbh200, hw->hw_next);
3601 here = here.qh->qh_next;
3602 continue;
3603 // case Q_TYPE_FSTN:
3604 default:
3605 fusbh200_dbg (fusbh200,
3606 "periodic frame %d bogus type %d\n",
3607 frame, type);
3608 }
3609
3610 /* collision or error */
3611 return 0;
3612 }
3613 }
3614
3615 /* no collision */
3616 return 1;
3617}
3618
3619/*-------------------------------------------------------------------------*/
3620
3621static void enable_periodic(struct fusbh200_hcd *fusbh200)
3622{
3623 if (fusbh200->periodic_count++)
3624 return;
3625
3626 /* Stop waiting to turn off the periodic schedule */
3627 fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_PERIODIC);
3628
3629 /* Don't start the schedule until PSS is 0 */
3630 fusbh200_poll_PSS(fusbh200);
3631 turn_on_io_watchdog(fusbh200);
3632}
3633
3634static void disable_periodic(struct fusbh200_hcd *fusbh200)
3635{
3636 if (--fusbh200->periodic_count)
3637 return;
3638
3639 /* Don't turn off the schedule until PSS is 1 */
3640 fusbh200_poll_PSS(fusbh200);
3641}
3642
3643/*-------------------------------------------------------------------------*/
3644
3645/* periodic schedule slots have iso tds (normal or split) first, then a
3646 * sparse tree for active interrupt transfers.
3647 *
3648 * this just links in a qh; caller guarantees uframe masks are set right.
3649 * no FSTN support (yet; fusbh200 0.96+)
3650 */
3651static void qh_link_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
3652{
3653 unsigned i;
3654 unsigned period = qh->period;
3655
3656 dev_dbg (&qh->dev->dev,
3657 "link qh%d-%04x/%p start %d [%d/%d us]\n",
3658 period, hc32_to_cpup(fusbh200, &qh->hw->hw_info2)
3659 & (QH_CMASK | QH_SMASK),
3660 qh, qh->start, qh->usecs, qh->c_usecs);
3661
3662 /* high bandwidth, or otherwise every microframe */
3663 if (period == 0)
3664 period = 1;
3665
3666 for (i = qh->start; i < fusbh200->periodic_size; i += period) {
3667 union fusbh200_shadow *prev = &fusbh200->pshadow[i];
3668 __hc32 *hw_p = &fusbh200->periodic[i];
3669 union fusbh200_shadow here = *prev;
3670 __hc32 type = 0;
3671
3672 /* skip the iso nodes at list head */
3673 while (here.ptr) {
3674 type = Q_NEXT_TYPE(fusbh200, *hw_p);
3675 if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
3676 break;
3677 prev = periodic_next_shadow(fusbh200, prev, type);
3678 hw_p = shadow_next_periodic(fusbh200, &here, type);
3679 here = *prev;
3680 }
3681
3682 /* sorting each branch by period (slow-->fast)
3683 * enables sharing interior tree nodes
3684 */
3685 while (here.ptr && qh != here.qh) {
3686 if (qh->period > here.qh->period)
3687 break;
3688 prev = &here.qh->qh_next;
3689 hw_p = &here.qh->hw->hw_next;
3690 here = *prev;
3691 }
3692 /* link in this qh, unless some earlier pass did that */
3693 if (qh != here.qh) {
3694 qh->qh_next = here;
3695 if (here.qh)
3696 qh->hw->hw_next = *hw_p;
3697 wmb ();
3698 prev->qh = qh;
3699 *hw_p = QH_NEXT (fusbh200, qh->qh_dma);
3700 }
3701 }
3702 qh->qh_state = QH_STATE_LINKED;
3703 qh->xacterrs = 0;
3704
3705 /* update per-qh bandwidth for usbfs */
3706 fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated += qh->period
3707 ? ((qh->usecs + qh->c_usecs) / qh->period)
3708 : (qh->usecs * 8);
3709
3710 list_add(&qh->intr_node, &fusbh200->intr_qh_list);
3711
3712 /* maybe enable periodic schedule processing */
3713 ++fusbh200->intr_count;
3714 enable_periodic(fusbh200);
3715}
3716
3717static void qh_unlink_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
3718{
3719 unsigned i;
3720 unsigned period;
3721
3722 /*
3723 * If qh is for a low/full-speed device, simply unlinking it
3724 * could interfere with an ongoing split transaction. To unlink
3725 * it safely would require setting the QH_INACTIVATE bit and
3726 * waiting at least one frame, as described in EHCI 4.12.2.5.
3727 *
3728 * We won't bother with any of this. Instead, we assume that the
3729 * only reason for unlinking an interrupt QH while the current URB
3730 * is still active is to dequeue all the URBs (flush the whole
3731 * endpoint queue).
3732 *
3733 * If rebalancing the periodic schedule is ever implemented, this
3734 * approach will no longer be valid.
3735 */
3736
3737 /* high bandwidth, or otherwise part of every microframe */
3738 if ((period = qh->period) == 0)
3739 period = 1;
3740
3741 for (i = qh->start; i < fusbh200->periodic_size; i += period)
3742 periodic_unlink (fusbh200, i, qh);
3743
3744 /* update per-qh bandwidth for usbfs */
3745 fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated -= qh->period
3746 ? ((qh->usecs + qh->c_usecs) / qh->period)
3747 : (qh->usecs * 8);
3748
3749 dev_dbg (&qh->dev->dev,
3750 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
3751 qh->period,
3752 hc32_to_cpup(fusbh200, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
3753 qh, qh->start, qh->usecs, qh->c_usecs);
3754
3755 /* qh->qh_next still "live" to HC */
3756 qh->qh_state = QH_STATE_UNLINK;
3757 qh->qh_next.ptr = NULL;
3758
3759 if (fusbh200->qh_scan_next == qh)
3760 fusbh200->qh_scan_next = list_entry(qh->intr_node.next,
3761 struct fusbh200_qh, intr_node);
3762 list_del(&qh->intr_node);
3763}
3764
3765static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
3766{
3767 /* If the QH isn't linked then there's nothing we can do
3768 * unless we were called during a giveback, in which case
3769 * qh_completions() has to deal with it.
3770 */
3771 if (qh->qh_state != QH_STATE_LINKED) {
3772 if (qh->qh_state == QH_STATE_COMPLETING)
3773 qh->needs_rescan = 1;
3774 return;
3775 }
3776
3777 qh_unlink_periodic (fusbh200, qh);
3778
3779 /* Make sure the unlinks are visible before starting the timer */
3780 wmb();
3781
3782 /*
3783 * The EHCI spec doesn't say how long it takes the controller to
3784 * stop accessing an unlinked interrupt QH. The timer delay is
3785 * 9 uframes; presumably that will be long enough.
3786 */
3787 qh->unlink_cycle = fusbh200->intr_unlink_cycle;
3788
3789 /* New entries go at the end of the intr_unlink list */
3790 if (fusbh200->intr_unlink)
3791 fusbh200->intr_unlink_last->unlink_next = qh;
3792 else
3793 fusbh200->intr_unlink = qh;
3794 fusbh200->intr_unlink_last = qh;
3795
3796 if (fusbh200->intr_unlinking)
3797 ; /* Avoid recursive calls */
3798 else if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
3799 fusbh200_handle_intr_unlinks(fusbh200);
3800 else if (fusbh200->intr_unlink == qh) {
3801 fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
3802 ++fusbh200->intr_unlink_cycle;
3803 }
3804}
3805
3806static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
3807{
3808 struct fusbh200_qh_hw *hw = qh->hw;
3809 int rc;
3810
3811 qh->qh_state = QH_STATE_IDLE;
3812 hw->hw_next = FUSBH200_LIST_END(fusbh200);
3813
3814 qh_completions(fusbh200, qh);
3815
3816 /* reschedule QH iff another request is queued */
3817 if (!list_empty(&qh->qtd_list) && fusbh200->rh_state == FUSBH200_RH_RUNNING) {
3818 rc = qh_schedule(fusbh200, qh);
3819
3820 /* An error here likely indicates handshake failure
3821 * or no space left in the schedule. Neither fault
3822 * should happen often ...
3823 *
3824 * FIXME kill the now-dysfunctional queued urbs
3825 */
3826 if (rc != 0)
3827 fusbh200_err(fusbh200, "can't reschedule qh %p, err %d\n",
3828 qh, rc);
3829 }
3830
3831 /* maybe turn off periodic schedule */
3832 --fusbh200->intr_count;
3833 disable_periodic(fusbh200);
3834}
3835
3836/*-------------------------------------------------------------------------*/
3837
3838static int check_period (
3839 struct fusbh200_hcd *fusbh200,
3840 unsigned frame,
3841 unsigned uframe,
3842 unsigned period,
3843 unsigned usecs
3844) {
3845 int claimed;
3846
3847 /* complete split running into next frame?
3848 * given FSTN support, we could sometimes check...
3849 */
3850 if (uframe >= 8)
3851 return 0;
3852
3853 /* convert "usecs we need" to "max already claimed" */
3854 usecs = fusbh200->uframe_periodic_max - usecs;
3855
3856 /* we "know" 2 and 4 uframe intervals were rejected; so
3857 * for period 0, check _every_ microframe in the schedule.
3858 */
3859 if (unlikely (period == 0)) {
3860 do {
3861 for (uframe = 0; uframe < 7; uframe++) {
3862 claimed = periodic_usecs (fusbh200, frame, uframe);
3863 if (claimed > usecs)
3864 return 0;
3865 }
3866 } while ((frame += 1) < fusbh200->periodic_size);
3867
3868 /* just check the specified uframe, at that period */
3869 } else {
3870 do {
3871 claimed = periodic_usecs (fusbh200, frame, uframe);
3872 if (claimed > usecs)
3873 return 0;
3874 } while ((frame += period) < fusbh200->periodic_size);
3875 }
3876
3877 // success!
3878 return 1;
3879}
3880
3881static int check_intr_schedule (
3882 struct fusbh200_hcd *fusbh200,
3883 unsigned frame,
3884 unsigned uframe,
3885 const struct fusbh200_qh *qh,
3886 __hc32 *c_maskp
3887)
3888{
3889 int retval = -ENOSPC;
3890 u8 mask = 0;
3891
3892 if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
3893 goto done;
3894
3895 if (!check_period (fusbh200, frame, uframe, qh->period, qh->usecs))
3896 goto done;
3897 if (!qh->c_usecs) {
3898 retval = 0;
3899 *c_maskp = 0;
3900 goto done;
3901 }
3902
3903 /* Make sure this tt's buffer is also available for CSPLITs.
3904 * We pessimize a bit; probably the typical full speed case
3905 * doesn't need the second CSPLIT.
3906 *
3907 * NOTE: both SPLIT and CSPLIT could be checked in just
3908 * one smart pass...
3909 */
3910 mask = 0x03 << (uframe + qh->gap_uf);
3911 *c_maskp = cpu_to_hc32(fusbh200, mask << 8);
3912
3913 mask |= 1 << uframe;
3914 if (tt_no_collision (fusbh200, qh->period, qh->dev, frame, mask)) {
3915 if (!check_period (fusbh200, frame, uframe + qh->gap_uf + 1,
3916 qh->period, qh->c_usecs))
3917 goto done;
3918 if (!check_period (fusbh200, frame, uframe + qh->gap_uf,
3919 qh->period, qh->c_usecs))
3920 goto done;
3921 retval = 0;
3922 }
3923done:
3924 return retval;
3925}
3926
3927/* "first fit" scheduling policy used the first time through,
3928 * or when the previous schedule slot can't be re-used.
3929 */
3930static int qh_schedule(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
3931{
3932 int status;
3933 unsigned uframe;
3934 __hc32 c_mask;
3935 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
3936 struct fusbh200_qh_hw *hw = qh->hw;
3937
3938 qh_refresh(fusbh200, qh);
3939 hw->hw_next = FUSBH200_LIST_END(fusbh200);
3940 frame = qh->start;
3941
3942 /* reuse the previous schedule slots, if we can */
3943 if (frame < qh->period) {
3944 uframe = ffs(hc32_to_cpup(fusbh200, &hw->hw_info2) & QH_SMASK);
3945 status = check_intr_schedule (fusbh200, frame, --uframe,
3946 qh, &c_mask);
3947 } else {
3948 uframe = 0;
3949 c_mask = 0;
3950 status = -ENOSPC;
3951 }
3952
3953 /* else scan the schedule to find a group of slots such that all
3954 * uframes have enough periodic bandwidth available.
3955 */
3956 if (status) {
3957 /* "normal" case, uframing flexible except with splits */
3958 if (qh->period) {
3959 int i;
3960
3961 for (i = qh->period; status && i > 0; --i) {
3962 frame = ++fusbh200->random_frame % qh->period;
3963 for (uframe = 0; uframe < 8; uframe++) {
3964 status = check_intr_schedule (fusbh200,
3965 frame, uframe, qh,
3966 &c_mask);
3967 if (status == 0)
3968 break;
3969 }
3970 }
3971
3972 /* qh->period == 0 means every uframe */
3973 } else {
3974 frame = 0;
3975 status = check_intr_schedule (fusbh200, 0, 0, qh, &c_mask);
3976 }
3977 if (status)
3978 goto done;
3979 qh->start = frame;
3980
3981 /* reset S-frame and (maybe) C-frame masks */
3982 hw->hw_info2 &= cpu_to_hc32(fusbh200, ~(QH_CMASK | QH_SMASK));
3983 hw->hw_info2 |= qh->period
3984 ? cpu_to_hc32(fusbh200, 1 << uframe)
3985 : cpu_to_hc32(fusbh200, QH_SMASK);
3986 hw->hw_info2 |= c_mask;
3987 } else
3988 fusbh200_dbg (fusbh200, "reused qh %p schedule\n", qh);
3989
3990 /* stuff into the periodic schedule */
3991 qh_link_periodic(fusbh200, qh);
3992done:
3993 return status;
3994}
3995
3996static int intr_submit (
3997 struct fusbh200_hcd *fusbh200,
3998 struct urb *urb,
3999 struct list_head *qtd_list,
4000 gfp_t mem_flags
4001) {
4002 unsigned epnum;
4003 unsigned long flags;
4004 struct fusbh200_qh *qh;
4005 int status;
4006 struct list_head empty;
4007
4008 /* get endpoint and transfer/schedule data */
4009 epnum = urb->ep->desc.bEndpointAddress;
4010
4011 spin_lock_irqsave (&fusbh200->lock, flags);
4012
4013 if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
4014 status = -ESHUTDOWN;
4015 goto done_not_linked;
4016 }
4017 status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
4018 if (unlikely(status))
4019 goto done_not_linked;
4020
4021 /* get qh and force any scheduling errors */
4022 INIT_LIST_HEAD (&empty);
4023 qh = qh_append_tds(fusbh200, urb, &empty, epnum, &urb->ep->hcpriv);
4024 if (qh == NULL) {
4025 status = -ENOMEM;
4026 goto done;
4027 }
4028 if (qh->qh_state == QH_STATE_IDLE) {
4029 if ((status = qh_schedule (fusbh200, qh)) != 0)
4030 goto done;
4031 }
4032
4033 /* then queue the urb's tds to the qh */
4034 qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
4035 BUG_ON (qh == NULL);
4036
4037 /* ... update usbfs periodic stats */
4038 fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs++;
4039
4040done:
4041 if (unlikely(status))
4042 usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
4043done_not_linked:
4044 spin_unlock_irqrestore (&fusbh200->lock, flags);
4045 if (status)
4046 qtd_list_free (fusbh200, urb, qtd_list);
4047
4048 return status;
4049}
4050
4051static void scan_intr(struct fusbh200_hcd *fusbh200)
4052{
4053 struct fusbh200_qh *qh;
4054
4055 list_for_each_entry_safe(qh, fusbh200->qh_scan_next, &fusbh200->intr_qh_list,
4056 intr_node) {
4057 rescan:
4058 /* clean any finished work for this qh */
4059 if (!list_empty(&qh->qtd_list)) {
4060 int temp;
4061
4062 /*
4063 * Unlinks could happen here; completion reporting
4064 * drops the lock. That's why fusbh200->qh_scan_next
4065 * always holds the next qh to scan; if the next qh
4066 * gets unlinked then fusbh200->qh_scan_next is adjusted
4067 * in qh_unlink_periodic().
4068 */
4069 temp = qh_completions(fusbh200, qh);
4070 if (unlikely(qh->needs_rescan ||
4071 (list_empty(&qh->qtd_list) &&
4072 qh->qh_state == QH_STATE_LINKED)))
4073 start_unlink_intr(fusbh200, qh);
4074 else if (temp != 0)
4075 goto rescan;
4076 }
4077 }
4078}
4079
4080/*-------------------------------------------------------------------------*/
4081
4082/* fusbh200_iso_stream ops work with both ITD and SITD */
4083
4084static struct fusbh200_iso_stream *
4085iso_stream_alloc (gfp_t mem_flags)
4086{
4087 struct fusbh200_iso_stream *stream;
4088
4089 stream = kzalloc(sizeof *stream, mem_flags);
4090 if (likely (stream != NULL)) {
4091 INIT_LIST_HEAD(&stream->td_list);
4092 INIT_LIST_HEAD(&stream->free_list);
4093 stream->next_uframe = -1;
4094 }
4095 return stream;
4096}
4097
4098static void
4099iso_stream_init (
4100 struct fusbh200_hcd *fusbh200,
4101 struct fusbh200_iso_stream *stream,
4102 struct usb_device *dev,
4103 int pipe,
4104 unsigned interval
4105)
4106{
4107 u32 buf1;
4108 unsigned epnum, maxp;
4109 int is_input;
4110 long bandwidth;
4111 unsigned multi;
4112
4113 /*
4114 * this might be a "high bandwidth" highspeed endpoint,
4115 * as encoded in the ep descriptor's wMaxPacket field
4116 */
4117 epnum = usb_pipeendpoint (pipe);
4118 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
4119 maxp = usb_maxpacket(dev, pipe, !is_input);
4120 if (is_input) {
4121 buf1 = (1 << 11);
4122 } else {
4123 buf1 = 0;
4124 }
4125
4126 maxp = max_packet(maxp);
4127 multi = hb_mult(maxp);
4128 buf1 |= maxp;
4129 maxp *= multi;
4130
4131 stream->buf0 = cpu_to_hc32(fusbh200, (epnum << 8) | dev->devnum);
4132 stream->buf1 = cpu_to_hc32(fusbh200, buf1);
4133 stream->buf2 = cpu_to_hc32(fusbh200, multi);
4134
4135 /* usbfs wants to report the average usecs per frame tied up
4136 * when transfers on this endpoint are scheduled ...
4137 */
4138 if (dev->speed == USB_SPEED_FULL) {
4139 interval <<= 3;
4140 stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
4141 is_input, 1, maxp));
4142 stream->usecs /= 8;
4143 } else {
4144 stream->highspeed = 1;
4145 stream->usecs = HS_USECS_ISO (maxp);
4146 }
4147 bandwidth = stream->usecs * 8;
4148 bandwidth /= interval;
4149
4150 stream->bandwidth = bandwidth;
4151 stream->udev = dev;
4152 stream->bEndpointAddress = is_input | epnum;
4153 stream->interval = interval;
4154 stream->maxp = maxp;
4155}
4156
4157static struct fusbh200_iso_stream *
4158iso_stream_find (struct fusbh200_hcd *fusbh200, struct urb *urb)
4159{
4160 unsigned epnum;
4161 struct fusbh200_iso_stream *stream;
4162 struct usb_host_endpoint *ep;
4163 unsigned long flags;
4164
4165 epnum = usb_pipeendpoint (urb->pipe);
4166 if (usb_pipein(urb->pipe))
4167 ep = urb->dev->ep_in[epnum];
4168 else
4169 ep = urb->dev->ep_out[epnum];
4170
4171 spin_lock_irqsave (&fusbh200->lock, flags);
4172 stream = ep->hcpriv;
4173
4174 if (unlikely (stream == NULL)) {
4175 stream = iso_stream_alloc(GFP_ATOMIC);
4176 if (likely (stream != NULL)) {
4177 ep->hcpriv = stream;
4178 stream->ep = ep;
4179 iso_stream_init(fusbh200, stream, urb->dev, urb->pipe,
4180 urb->interval);
4181 }
4182
4183 /* if dev->ep [epnum] is a QH, hw is set */
4184 } else if (unlikely (stream->hw != NULL)) {
4185 fusbh200_dbg (fusbh200, "dev %s ep%d%s, not iso??\n",
4186 urb->dev->devpath, epnum,
4187 usb_pipein(urb->pipe) ? "in" : "out");
4188 stream = NULL;
4189 }
4190
4191 spin_unlock_irqrestore (&fusbh200->lock, flags);
4192 return stream;
4193}
4194
4195/*-------------------------------------------------------------------------*/
4196
4197/* fusbh200_iso_sched ops can be ITD-only or SITD-only */
4198
4199static struct fusbh200_iso_sched *
4200iso_sched_alloc (unsigned packets, gfp_t mem_flags)
4201{
4202 struct fusbh200_iso_sched *iso_sched;
4203 int size = sizeof *iso_sched;
4204
4205 size += packets * sizeof (struct fusbh200_iso_packet);
4206 iso_sched = kzalloc(size, mem_flags);
4207 if (likely (iso_sched != NULL)) {
4208 INIT_LIST_HEAD (&iso_sched->td_list);
4209 }
4210 return iso_sched;
4211}
4212
4213static inline void
4214itd_sched_init(
4215 struct fusbh200_hcd *fusbh200,
4216 struct fusbh200_iso_sched *iso_sched,
4217 struct fusbh200_iso_stream *stream,
4218 struct urb *urb
4219)
4220{
4221 unsigned i;
4222 dma_addr_t dma = urb->transfer_dma;
4223
4224 /* how many uframes are needed for these transfers */
4225 iso_sched->span = urb->number_of_packets * stream->interval;
4226
4227 /* figure out per-uframe itd fields that we'll need later
4228 * when we fit new itds into the schedule.
4229 */
4230 for (i = 0; i < urb->number_of_packets; i++) {
4231 struct fusbh200_iso_packet *uframe = &iso_sched->packet [i];
4232 unsigned length;
4233 dma_addr_t buf;
4234 u32 trans;
4235
4236 length = urb->iso_frame_desc [i].length;
4237 buf = dma + urb->iso_frame_desc [i].offset;
4238
4239 trans = FUSBH200_ISOC_ACTIVE;
4240 trans |= buf & 0x0fff;
4241 if (unlikely (((i + 1) == urb->number_of_packets))
4242 && !(urb->transfer_flags & URB_NO_INTERRUPT))
4243 trans |= FUSBH200_ITD_IOC;
4244 trans |= length << 16;
4245 uframe->transaction = cpu_to_hc32(fusbh200, trans);
4246
4247 /* might need to cross a buffer page within a uframe */
4248 uframe->bufp = (buf & ~(u64)0x0fff);
4249 buf += length;
4250 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
4251 uframe->cross = 1;
4252 }
4253}
4254
4255static void
4256iso_sched_free (
4257 struct fusbh200_iso_stream *stream,
4258 struct fusbh200_iso_sched *iso_sched
4259)
4260{
4261 if (!iso_sched)
4262 return;
4263 // caller must hold fusbh200->lock!
4264 list_splice (&iso_sched->td_list, &stream->free_list);
4265 kfree (iso_sched);
4266}
4267
4268static int
4269itd_urb_transaction (
4270 struct fusbh200_iso_stream *stream,
4271 struct fusbh200_hcd *fusbh200,
4272 struct urb *urb,
4273 gfp_t mem_flags
4274)
4275{
4276 struct fusbh200_itd *itd;
4277 dma_addr_t itd_dma;
4278 int i;
4279 unsigned num_itds;
4280 struct fusbh200_iso_sched *sched;
4281 unsigned long flags;
4282
4283 sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
4284 if (unlikely (sched == NULL))
4285 return -ENOMEM;
4286
4287 itd_sched_init(fusbh200, sched, stream, urb);
4288
4289 if (urb->interval < 8)
4290 num_itds = 1 + (sched->span + 7) / 8;
4291 else
4292 num_itds = urb->number_of_packets;
4293
4294 /* allocate/init ITDs */
4295 spin_lock_irqsave (&fusbh200->lock, flags);
4296 for (i = 0; i < num_itds; i++) {
4297
4298 /*
4299 * Use iTDs from the free list, but not iTDs that may
4300 * still be in use by the hardware.
4301 */
4302 if (likely(!list_empty(&stream->free_list))) {
4303 itd = list_first_entry(&stream->free_list,
4304 struct fusbh200_itd, itd_list);
4305 if (itd->frame == fusbh200->now_frame)
4306 goto alloc_itd;
4307 list_del (&itd->itd_list);
4308 itd_dma = itd->itd_dma;
4309 } else {
4310 alloc_itd:
4311 spin_unlock_irqrestore (&fusbh200->lock, flags);
4312 itd = dma_pool_alloc (fusbh200->itd_pool, mem_flags,
4313 &itd_dma);
4314 spin_lock_irqsave (&fusbh200->lock, flags);
4315 if (!itd) {
4316 iso_sched_free(stream, sched);
4317 spin_unlock_irqrestore(&fusbh200->lock, flags);
4318 return -ENOMEM;
4319 }
4320 }
4321
4322 memset (itd, 0, sizeof *itd);
4323 itd->itd_dma = itd_dma;
4324 list_add (&itd->itd_list, &sched->td_list);
4325 }
4326 spin_unlock_irqrestore (&fusbh200->lock, flags);
4327
4328 /* temporarily store schedule info in hcpriv */
4329 urb->hcpriv = sched;
4330 urb->error_count = 0;
4331 return 0;
4332}
4333
4334/*-------------------------------------------------------------------------*/
4335
4336static inline int
4337itd_slot_ok (
4338 struct fusbh200_hcd *fusbh200,
4339 u32 mod,
4340 u32 uframe,
4341 u8 usecs,
4342 u32 period
4343)
4344{
4345 uframe %= period;
4346 do {
4347 /* can't commit more than uframe_periodic_max usec */
4348 if (periodic_usecs (fusbh200, uframe >> 3, uframe & 0x7)
4349 > (fusbh200->uframe_periodic_max - usecs))
4350 return 0;
4351
4352 /* we know urb->interval is 2^N uframes */
4353 uframe += period;
4354 } while (uframe < mod);
4355 return 1;
4356}
4357
4358/*
4359 * This scheduler plans almost as far into the future as it has actual
4360 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
4361 * "as small as possible" to be cache-friendlier.) That limits the size
4362 * transfers you can stream reliably; avoid more than 64 msec per urb.
4363 * Also avoid queue depths of less than fusbh200's worst irq latency (affected
4364 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
4365 * and other factors); or more than about 230 msec total (for portability,
4366 * given FUSBH200_TUNE_FLS and the slop). Or, write a smarter scheduler!
4367 */
4368
4369#define SCHEDULE_SLOP 80 /* microframes */
4370
4371static int
4372iso_stream_schedule (
4373 struct fusbh200_hcd *fusbh200,
4374 struct urb *urb,
4375 struct fusbh200_iso_stream *stream
4376)
4377{
4378 u32 now, next, start, period, span;
4379 int status;
4380 unsigned mod = fusbh200->periodic_size << 3;
4381 struct fusbh200_iso_sched *sched = urb->hcpriv;
4382
4383 period = urb->interval;
4384 span = sched->span;
4385
4386 if (span > mod - SCHEDULE_SLOP) {
4387 fusbh200_dbg (fusbh200, "iso request %p too long\n", urb);
4388 status = -EFBIG;
4389 goto fail;
4390 }
4391
4392 now = fusbh200_read_frame_index(fusbh200) & (mod - 1);
4393
4394 /* Typical case: reuse current schedule, stream is still active.
4395 * Hopefully there are no gaps from the host falling behind
4396 * (irq delays etc), but if there are we'll take the next
4397 * slot in the schedule, implicitly assuming URB_ISO_ASAP.
4398 */
4399 if (likely (!list_empty (&stream->td_list))) {
4400 u32 excess;
4401
4402 /* For high speed devices, allow scheduling within the
4403 * isochronous scheduling threshold. For full speed devices
4404 * and Intel PCI-based controllers, don't (work around for
4405 * Intel ICH9 bug).
4406 */
4407 if (!stream->highspeed && fusbh200->fs_i_thresh)
4408 next = now + fusbh200->i_thresh;
4409 else
4410 next = now;
4411
4412 /* Fell behind (by up to twice the slop amount)?
4413 * We decide based on the time of the last currently-scheduled
4414 * slot, not the time of the next available slot.
4415 */
4416 excess = (stream->next_uframe - period - next) & (mod - 1);
4417 if (excess >= mod - 2 * SCHEDULE_SLOP)
4418 start = next + excess - mod + period *
4419 DIV_ROUND_UP(mod - excess, period);
4420 else
4421 start = next + excess + period;
4422 if (start - now >= mod) {
4423 fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
4424 urb, start - now - period, period,
4425 mod);
4426 status = -EFBIG;
4427 goto fail;
4428 }
4429 }
4430
4431 /* need to schedule; when's the next (u)frame we could start?
4432 * this is bigger than fusbh200->i_thresh allows; scheduling itself
4433 * isn't free, the slop should handle reasonably slow cpus. it
4434 * can also help high bandwidth if the dma and irq loads don't
4435 * jump until after the queue is primed.
4436 */
4437 else {
4438 int done = 0;
4439 start = SCHEDULE_SLOP + (now & ~0x07);
4440
4441 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
4442
4443 /* find a uframe slot with enough bandwidth.
4444 * Early uframes are more precious because full-speed
4445 * iso IN transfers can't use late uframes,
4446 * and therefore they should be allocated last.
4447 */
4448 next = start;
4449 start += period;
4450 do {
4451 start--;
4452 /* check schedule: enough space? */
4453 if (itd_slot_ok(fusbh200, mod, start,
4454 stream->usecs, period))
4455 done = 1;
4456 } while (start > next && !done);
4457
4458 /* no room in the schedule */
4459 if (!done) {
4460 fusbh200_dbg(fusbh200, "iso resched full %p (now %d max %d)\n",
4461 urb, now, now + mod);
4462 status = -ENOSPC;
4463 goto fail;
4464 }
4465 }
4466
4467 /* Tried to schedule too far into the future? */
4468 if (unlikely(start - now + span - period
4469 >= mod - 2 * SCHEDULE_SLOP)) {
4470 fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
4471 urb, start - now, span - period,
4472 mod - 2 * SCHEDULE_SLOP);
4473 status = -EFBIG;
4474 goto fail;
4475 }
4476
4477 stream->next_uframe = start & (mod - 1);
4478
4479 /* report high speed start in uframes; full speed, in frames */
4480 urb->start_frame = stream->next_uframe;
4481 if (!stream->highspeed)
4482 urb->start_frame >>= 3;
4483
4484 /* Make sure scan_isoc() sees these */
4485 if (fusbh200->isoc_count == 0)
4486 fusbh200->next_frame = now >> 3;
4487 return 0;
4488
4489 fail:
4490 iso_sched_free(stream, sched);
4491 urb->hcpriv = NULL;
4492 return status;
4493}
4494
4495/*-------------------------------------------------------------------------*/
4496
4497static inline void
4498itd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_iso_stream *stream,
4499 struct fusbh200_itd *itd)
4500{
4501 int i;
4502
4503 /* it's been recently zeroed */
4504 itd->hw_next = FUSBH200_LIST_END(fusbh200);
4505 itd->hw_bufp [0] = stream->buf0;
4506 itd->hw_bufp [1] = stream->buf1;
4507 itd->hw_bufp [2] = stream->buf2;
4508
4509 for (i = 0; i < 8; i++)
4510 itd->index[i] = -1;
4511
4512 /* All other fields are filled when scheduling */
4513}
4514
4515static inline void
4516itd_patch(
4517 struct fusbh200_hcd *fusbh200,
4518 struct fusbh200_itd *itd,
4519 struct fusbh200_iso_sched *iso_sched,
4520 unsigned index,
4521 u16 uframe
4522)
4523{
4524 struct fusbh200_iso_packet *uf = &iso_sched->packet [index];
4525 unsigned pg = itd->pg;
4526
4527 // BUG_ON (pg == 6 && uf->cross);
4528
4529 uframe &= 0x07;
4530 itd->index [uframe] = index;
4531
4532 itd->hw_transaction[uframe] = uf->transaction;
4533 itd->hw_transaction[uframe] |= cpu_to_hc32(fusbh200, pg << 12);
4534 itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, uf->bufp & ~(u32)0);
4535 itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(uf->bufp >> 32));
4536
4537 /* iso_frame_desc[].offset must be strictly increasing */
4538 if (unlikely (uf->cross)) {
4539 u64 bufp = uf->bufp + 4096;
4540
4541 itd->pg = ++pg;
4542 itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, bufp & ~(u32)0);
4543 itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(bufp >> 32));
4544 }
4545}
4546
4547static inline void
4548itd_link (struct fusbh200_hcd *fusbh200, unsigned frame, struct fusbh200_itd *itd)
4549{
4550 union fusbh200_shadow *prev = &fusbh200->pshadow[frame];
4551 __hc32 *hw_p = &fusbh200->periodic[frame];
4552 union fusbh200_shadow here = *prev;
4553 __hc32 type = 0;
4554
4555 /* skip any iso nodes which might belong to previous microframes */
4556 while (here.ptr) {
4557 type = Q_NEXT_TYPE(fusbh200, *hw_p);
4558 if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
4559 break;
4560 prev = periodic_next_shadow(fusbh200, prev, type);
4561 hw_p = shadow_next_periodic(fusbh200, &here, type);
4562 here = *prev;
4563 }
4564
4565 itd->itd_next = here;
4566 itd->hw_next = *hw_p;
4567 prev->itd = itd;
4568 itd->frame = frame;
4569 wmb ();
4570 *hw_p = cpu_to_hc32(fusbh200, itd->itd_dma | Q_TYPE_ITD);
4571}
4572
4573/* fit urb's itds into the selected schedule slot; activate as needed */
4574static void itd_link_urb(
4575 struct fusbh200_hcd *fusbh200,
4576 struct urb *urb,
4577 unsigned mod,
4578 struct fusbh200_iso_stream *stream
4579)
4580{
4581 int packet;
4582 unsigned next_uframe, uframe, frame;
4583 struct fusbh200_iso_sched *iso_sched = urb->hcpriv;
4584 struct fusbh200_itd *itd;
4585
4586 next_uframe = stream->next_uframe & (mod - 1);
4587
4588 if (unlikely (list_empty(&stream->td_list))) {
4589 fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
4590 += stream->bandwidth;
4591 fusbh200_vdbg (fusbh200,
4592 "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
4593 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
4594 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
4595 urb->interval,
4596 next_uframe >> 3, next_uframe & 0x7);
4597 }
4598
4599 /* fill iTDs uframe by uframe */
4600 for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
4601 if (itd == NULL) {
4602 /* ASSERT: we have all necessary itds */
4603 // BUG_ON (list_empty (&iso_sched->td_list));
4604
4605 /* ASSERT: no itds for this endpoint in this uframe */
4606
4607 itd = list_entry (iso_sched->td_list.next,
4608 struct fusbh200_itd, itd_list);
4609 list_move_tail (&itd->itd_list, &stream->td_list);
4610 itd->stream = stream;
4611 itd->urb = urb;
4612 itd_init (fusbh200, stream, itd);
4613 }
4614
4615 uframe = next_uframe & 0x07;
4616 frame = next_uframe >> 3;
4617
4618 itd_patch(fusbh200, itd, iso_sched, packet, uframe);
4619
4620 next_uframe += stream->interval;
4621 next_uframe &= mod - 1;
4622 packet++;
4623
4624 /* link completed itds into the schedule */
4625 if (((next_uframe >> 3) != frame)
4626 || packet == urb->number_of_packets) {
4627 itd_link(fusbh200, frame & (fusbh200->periodic_size - 1), itd);
4628 itd = NULL;
4629 }
4630 }
4631 stream->next_uframe = next_uframe;
4632
4633 /* don't need that schedule data any more */
4634 iso_sched_free (stream, iso_sched);
4635 urb->hcpriv = NULL;
4636
4637 ++fusbh200->isoc_count;
4638 enable_periodic(fusbh200);
4639}
4640
4641#define ISO_ERRS (FUSBH200_ISOC_BUF_ERR | FUSBH200_ISOC_BABBLE | FUSBH200_ISOC_XACTERR)
4642
4643/* Process and recycle a completed ITD. Return true iff its urb completed,
4644 * and hence its completion callback probably added things to the hardware
4645 * schedule.
4646 *
4647 * Note that we carefully avoid recycling this descriptor until after any
4648 * completion callback runs, so that it won't be reused quickly. That is,
4649 * assuming (a) no more than two urbs per frame on this endpoint, and also
4650 * (b) only this endpoint's completions submit URBs. It seems some silicon
4651 * corrupts things if you reuse completed descriptors very quickly...
4652 */
4653static bool itd_complete(struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
4654{
4655 struct urb *urb = itd->urb;
4656 struct usb_iso_packet_descriptor *desc;
4657 u32 t;
4658 unsigned uframe;
4659 int urb_index = -1;
4660 struct fusbh200_iso_stream *stream = itd->stream;
4661 struct usb_device *dev;
4662 bool retval = false;
4663
4664 /* for each uframe with a packet */
4665 for (uframe = 0; uframe < 8; uframe++) {
4666 if (likely (itd->index[uframe] == -1))
4667 continue;
4668 urb_index = itd->index[uframe];
4669 desc = &urb->iso_frame_desc [urb_index];
4670
4671 t = hc32_to_cpup(fusbh200, &itd->hw_transaction [uframe]);
4672 itd->hw_transaction [uframe] = 0;
4673
4674 /* report transfer status */
4675 if (unlikely (t & ISO_ERRS)) {
4676 urb->error_count++;
4677 if (t & FUSBH200_ISOC_BUF_ERR)
4678 desc->status = usb_pipein (urb->pipe)
4679 ? -ENOSR /* hc couldn't read */
4680 : -ECOMM; /* hc couldn't write */
4681 else if (t & FUSBH200_ISOC_BABBLE)
4682 desc->status = -EOVERFLOW;
4683 else /* (t & FUSBH200_ISOC_XACTERR) */
4684 desc->status = -EPROTO;
4685
4686 /* HC need not update length with this error */
4687 if (!(t & FUSBH200_ISOC_BABBLE)) {
4688 desc->actual_length = fusbh200_itdlen(urb, desc, t);
4689 urb->actual_length += desc->actual_length;
4690 }
4691 } else if (likely ((t & FUSBH200_ISOC_ACTIVE) == 0)) {
4692 desc->status = 0;
4693 desc->actual_length = fusbh200_itdlen(urb, desc, t);
4694 urb->actual_length += desc->actual_length;
4695 } else {
4696 /* URB was too late */
4697 desc->status = -EXDEV;
4698 }
4699 }
4700
4701 /* handle completion now? */
4702 if (likely ((urb_index + 1) != urb->number_of_packets))
4703 goto done;
4704
4705 /* ASSERT: it's really the last itd for this urb
4706 list_for_each_entry (itd, &stream->td_list, itd_list)
4707 BUG_ON (itd->urb == urb);
4708 */
4709
4710 /* give urb back to the driver; completion often (re)submits */
4711 dev = urb->dev;
4712 fusbh200_urb_done(fusbh200, urb, 0);
4713 retval = true;
4714 urb = NULL;
4715
4716 --fusbh200->isoc_count;
4717 disable_periodic(fusbh200);
4718
4719 if (unlikely(list_is_singular(&stream->td_list))) {
4720 fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
4721 -= stream->bandwidth;
4722 fusbh200_vdbg (fusbh200,
4723 "deschedule devp %s ep%d%s-iso\n",
4724 dev->devpath, stream->bEndpointAddress & 0x0f,
4725 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
4726 }
4727
4728done:
4729 itd->urb = NULL;
4730
4731 /* Add to the end of the free list for later reuse */
4732 list_move_tail(&itd->itd_list, &stream->free_list);
4733
4734 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
4735 if (list_empty(&stream->td_list)) {
4736 list_splice_tail_init(&stream->free_list,
4737 &fusbh200->cached_itd_list);
4738 start_free_itds(fusbh200);
4739 }
4740
4741 return retval;
4742}
4743
4744/*-------------------------------------------------------------------------*/
4745
4746static int itd_submit (struct fusbh200_hcd *fusbh200, struct urb *urb,
4747 gfp_t mem_flags)
4748{
4749 int status = -EINVAL;
4750 unsigned long flags;
4751 struct fusbh200_iso_stream *stream;
4752
4753 /* Get iso_stream head */
4754 stream = iso_stream_find (fusbh200, urb);
4755 if (unlikely (stream == NULL)) {
4756 fusbh200_dbg (fusbh200, "can't get iso stream\n");
4757 return -ENOMEM;
4758 }
4759 if (unlikely (urb->interval != stream->interval &&
4760 fusbh200_port_speed(fusbh200, 0) == USB_PORT_STAT_HIGH_SPEED)) {
4761 fusbh200_dbg (fusbh200, "can't change iso interval %d --> %d\n",
4762 stream->interval, urb->interval);
4763 goto done;
4764 }
4765
4766#ifdef FUSBH200_URB_TRACE
4767 fusbh200_dbg (fusbh200,
4768 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
4769 __func__, urb->dev->devpath, urb,
4770 usb_pipeendpoint (urb->pipe),
4771 usb_pipein (urb->pipe) ? "in" : "out",
4772 urb->transfer_buffer_length,
4773 urb->number_of_packets, urb->interval,
4774 stream);
4775#endif
4776
4777 /* allocate ITDs w/o locking anything */
4778 status = itd_urb_transaction (stream, fusbh200, urb, mem_flags);
4779 if (unlikely (status < 0)) {
4780 fusbh200_dbg (fusbh200, "can't init itds\n");
4781 goto done;
4782 }
4783
4784 /* schedule ... need to lock */
4785 spin_lock_irqsave (&fusbh200->lock, flags);
4786 if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
4787 status = -ESHUTDOWN;
4788 goto done_not_linked;
4789 }
4790 status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
4791 if (unlikely(status))
4792 goto done_not_linked;
4793 status = iso_stream_schedule(fusbh200, urb, stream);
4794 if (likely (status == 0))
4795 itd_link_urb (fusbh200, urb, fusbh200->periodic_size << 3, stream);
4796 else
4797 usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
4798 done_not_linked:
4799 spin_unlock_irqrestore (&fusbh200->lock, flags);
4800 done:
4801 return status;
4802}
4803
4804/*-------------------------------------------------------------------------*/
4805
4806static void scan_isoc(struct fusbh200_hcd *fusbh200)
4807{
4808 unsigned uf, now_frame, frame;
4809 unsigned fmask = fusbh200->periodic_size - 1;
4810 bool modified, live;
4811
4812 /*
4813 * When running, scan from last scan point up to "now"
4814 * else clean up by scanning everything that's left.
4815 * Touches as few pages as possible: cache-friendly.
4816 */
4817 if (fusbh200->rh_state >= FUSBH200_RH_RUNNING) {
4818 uf = fusbh200_read_frame_index(fusbh200);
4819 now_frame = (uf >> 3) & fmask;
4820 live = true;
4821 } else {
4822 now_frame = (fusbh200->next_frame - 1) & fmask;
4823 live = false;
4824 }
4825 fusbh200->now_frame = now_frame;
4826
4827 frame = fusbh200->next_frame;
4828 for (;;) {
4829 union fusbh200_shadow q, *q_p;
4830 __hc32 type, *hw_p;
4831
4832restart:
4833 /* scan each element in frame's queue for completions */
4834 q_p = &fusbh200->pshadow [frame];
4835 hw_p = &fusbh200->periodic [frame];
4836 q.ptr = q_p->ptr;
4837 type = Q_NEXT_TYPE(fusbh200, *hw_p);
4838 modified = false;
4839
4840 while (q.ptr != NULL) {
4841 switch (hc32_to_cpu(fusbh200, type)) {
4842 case Q_TYPE_ITD:
4843 /* If this ITD is still active, leave it for
4844 * later processing ... check the next entry.
4845 * No need to check for activity unless the
4846 * frame is current.
4847 */
4848 if (frame == now_frame && live) {
4849 rmb();
4850 for (uf = 0; uf < 8; uf++) {
4851 if (q.itd->hw_transaction[uf] &
4852 ITD_ACTIVE(fusbh200))
4853 break;
4854 }
4855 if (uf < 8) {
4856 q_p = &q.itd->itd_next;
4857 hw_p = &q.itd->hw_next;
4858 type = Q_NEXT_TYPE(fusbh200,
4859 q.itd->hw_next);
4860 q = *q_p;
4861 break;
4862 }
4863 }
4864
4865 /* Take finished ITDs out of the schedule
4866 * and process them: recycle, maybe report
4867 * URB completion. HC won't cache the
4868 * pointer for much longer, if at all.
4869 */
4870 *q_p = q.itd->itd_next;
4871 *hw_p = q.itd->hw_next;
4872 type = Q_NEXT_TYPE(fusbh200, q.itd->hw_next);
4873 wmb();
4874 modified = itd_complete (fusbh200, q.itd);
4875 q = *q_p;
4876 break;
4877 default:
4878 fusbh200_dbg(fusbh200, "corrupt type %d frame %d shadow %p\n",
4879 type, frame, q.ptr);
4880 // BUG ();
4881 /* FALL THROUGH */
4882 case Q_TYPE_QH:
4883 case Q_TYPE_FSTN:
4884 /* End of the iTDs and siTDs */
4885 q.ptr = NULL;
4886 break;
4887 }
4888
4889 /* assume completion callbacks modify the queue */
4890 if (unlikely(modified && fusbh200->isoc_count > 0))
4891 goto restart;
4892 }
4893
4894 /* Stop when we have reached the current frame */
4895 if (frame == now_frame)
4896 break;
4897 frame = (frame + 1) & fmask;
4898 }
4899 fusbh200->next_frame = now_frame;
4900}
4901/*-------------------------------------------------------------------------*/
4902/*
4903 * Display / Set uframe_periodic_max
4904 */
4905static ssize_t show_uframe_periodic_max(struct device *dev,
4906 struct device_attribute *attr,
4907 char *buf)
4908{
4909 struct fusbh200_hcd *fusbh200;
4910 int n;
4911
4912 fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
4913 n = scnprintf(buf, PAGE_SIZE, "%d\n", fusbh200->uframe_periodic_max);
4914 return n;
4915}
4916
4917
4918static ssize_t store_uframe_periodic_max(struct device *dev,
4919 struct device_attribute *attr,
4920 const char *buf, size_t count)
4921{
4922 struct fusbh200_hcd *fusbh200;
4923 unsigned uframe_periodic_max;
4924 unsigned frame, uframe;
4925 unsigned short allocated_max;
4926 unsigned long flags;
4927 ssize_t ret;
4928
4929 fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
4930 if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
4931 return -EINVAL;
4932
4933 if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
4934 fusbh200_info(fusbh200, "rejecting invalid request for "
4935 "uframe_periodic_max=%u\n", uframe_periodic_max);
4936 return -EINVAL;
4937 }
4938
4939 ret = -EINVAL;
4940
4941 /*
4942 * lock, so that our checking does not race with possible periodic
4943 * bandwidth allocation through submitting new urbs.
4944 */
4945 spin_lock_irqsave (&fusbh200->lock, flags);
4946
4947 /*
4948 * for request to decrease max periodic bandwidth, we have to check
4949 * every microframe in the schedule to see whether the decrease is
4950 * possible.
4951 */
4952 if (uframe_periodic_max < fusbh200->uframe_periodic_max) {
4953 allocated_max = 0;
4954
4955 for (frame = 0; frame < fusbh200->periodic_size; ++frame)
4956 for (uframe = 0; uframe < 7; ++uframe)
4957 allocated_max = max(allocated_max,
4958 periodic_usecs (fusbh200, frame, uframe));
4959
4960 if (allocated_max > uframe_periodic_max) {
4961 fusbh200_info(fusbh200,
4962 "cannot decrease uframe_periodic_max becase "
4963 "periodic bandwidth is already allocated "
4964 "(%u > %u)\n",
4965 allocated_max, uframe_periodic_max);
4966 goto out_unlock;
4967 }
4968 }
4969
4970 /* increasing is always ok */
4971
4972 fusbh200_info(fusbh200, "setting max periodic bandwidth to %u%% "
4973 "(== %u usec/uframe)\n",
4974 100*uframe_periodic_max/125, uframe_periodic_max);
4975
4976 if (uframe_periodic_max != 100)
4977 fusbh200_warn(fusbh200, "max periodic bandwidth set is non-standard\n");
4978
4979 fusbh200->uframe_periodic_max = uframe_periodic_max;
4980 ret = count;
4981
4982out_unlock:
4983 spin_unlock_irqrestore (&fusbh200->lock, flags);
4984 return ret;
4985}
4986static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
4987
4988
4989static inline int create_sysfs_files(struct fusbh200_hcd *fusbh200)
4990{
4991 struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller;
4992 int i = 0;
4993
4994 if (i)
4995 goto out;
4996
4997 i = device_create_file(controller, &dev_attr_uframe_periodic_max);
4998out:
4999 return i;
5000}
5001
5002static inline void remove_sysfs_files(struct fusbh200_hcd *fusbh200)
5003{
5004 struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller;
5005
5006 device_remove_file(controller, &dev_attr_uframe_periodic_max);
5007}
5008/*-------------------------------------------------------------------------*/
5009
5010/* On some systems, leaving remote wakeup enabled prevents system shutdown.
5011 * The firmware seems to think that powering off is a wakeup event!
5012 * This routine turns off remote wakeup and everything else, on all ports.
5013 */
5014static void fusbh200_turn_off_all_ports(struct fusbh200_hcd *fusbh200)
5015{
5016 u32 __iomem *status_reg = &fusbh200->regs->port_status;
5017
5018 fusbh200_writel(fusbh200, PORT_RWC_BITS, status_reg);
5019}
5020
5021/*
5022 * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
5023 * Must be called with interrupts enabled and the lock not held.
5024 */
5025static void fusbh200_silence_controller(struct fusbh200_hcd *fusbh200)
5026{
5027 fusbh200_halt(fusbh200);
5028
5029 spin_lock_irq(&fusbh200->lock);
5030 fusbh200->rh_state = FUSBH200_RH_HALTED;
5031 fusbh200_turn_off_all_ports(fusbh200);
5032 spin_unlock_irq(&fusbh200->lock);
5033}
5034
5035/* fusbh200_shutdown kick in for silicon on any bus (not just pci, etc).
5036 * This forcibly disables dma and IRQs, helping kexec and other cases
5037 * where the next system software may expect clean state.
5038 */
5039static void fusbh200_shutdown(struct usb_hcd *hcd)
5040{
5041 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
5042
5043 spin_lock_irq(&fusbh200->lock);
5044 fusbh200->shutdown = true;
5045 fusbh200->rh_state = FUSBH200_RH_STOPPING;
5046 fusbh200->enabled_hrtimer_events = 0;
5047 spin_unlock_irq(&fusbh200->lock);
5048
5049 fusbh200_silence_controller(fusbh200);
5050
5051 hrtimer_cancel(&fusbh200->hrtimer);
5052}
5053
5054/*-------------------------------------------------------------------------*/
5055
5056/*
5057 * fusbh200_work is called from some interrupts, timers, and so on.
5058 * it calls driver completion functions, after dropping fusbh200->lock.
5059 */
5060static void fusbh200_work (struct fusbh200_hcd *fusbh200)
5061{
5062 /* another CPU may drop fusbh200->lock during a schedule scan while
5063 * it reports urb completions. this flag guards against bogus
5064 * attempts at re-entrant schedule scanning.
5065 */
5066 if (fusbh200->scanning) {
5067 fusbh200->need_rescan = true;
5068 return;
5069 }
5070 fusbh200->scanning = true;
5071
5072 rescan:
5073 fusbh200->need_rescan = false;
5074 if (fusbh200->async_count)
5075 scan_async(fusbh200);
5076 if (fusbh200->intr_count > 0)
5077 scan_intr(fusbh200);
5078 if (fusbh200->isoc_count > 0)
5079 scan_isoc(fusbh200);
5080 if (fusbh200->need_rescan)
5081 goto rescan;
5082 fusbh200->scanning = false;
5083
5084 /* the IO watchdog guards against hardware or driver bugs that
5085 * misplace IRQs, and should let us run completely without IRQs.
5086 * such lossage has been observed on both VT6202 and VT8235.
5087 */
5088 turn_on_io_watchdog(fusbh200);
5089}
5090
5091/*
5092 * Called when the fusbh200_hcd module is removed.
5093 */
5094static void fusbh200_stop (struct usb_hcd *hcd)
5095{
5096 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
5097
5098 fusbh200_dbg (fusbh200, "stop\n");
5099
5100 /* no more interrupts ... */
5101
5102 spin_lock_irq(&fusbh200->lock);
5103 fusbh200->enabled_hrtimer_events = 0;
5104 spin_unlock_irq(&fusbh200->lock);
5105
5106 fusbh200_quiesce(fusbh200);
5107 fusbh200_silence_controller(fusbh200);
5108 fusbh200_reset (fusbh200);
5109
5110 hrtimer_cancel(&fusbh200->hrtimer);
5111 remove_sysfs_files(fusbh200);
5112 remove_debug_files (fusbh200);
5113
5114 /* root hub is shut down separately (first, when possible) */
5115 spin_lock_irq (&fusbh200->lock);
5116 end_free_itds(fusbh200);
5117 spin_unlock_irq (&fusbh200->lock);
5118 fusbh200_mem_cleanup (fusbh200);
5119
5120#ifdef FUSBH200_STATS
5121 fusbh200_dbg(fusbh200, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
5122 fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
5123 fusbh200->stats.lost_iaa);
5124 fusbh200_dbg (fusbh200, "complete %ld unlink %ld\n",
5125 fusbh200->stats.complete, fusbh200->stats.unlink);
5126#endif
5127
5128 dbg_status (fusbh200, "fusbh200_stop completed",
5129 fusbh200_readl(fusbh200, &fusbh200->regs->status));
5130}
5131
5132/* one-time init, only for memory state */
5133static int hcd_fusbh200_init(struct usb_hcd *hcd)
5134{
5135 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
5136 u32 temp;
5137 int retval;
5138 u32 hcc_params;
5139 struct fusbh200_qh_hw *hw;
5140
5141 spin_lock_init(&fusbh200->lock);
5142
5143 /*
5144 * keep io watchdog by default, those good HCDs could turn off it later
5145 */
5146 fusbh200->need_io_watchdog = 1;
5147
5148 hrtimer_init(&fusbh200->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
5149 fusbh200->hrtimer.function = fusbh200_hrtimer_func;
5150 fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
5151
5152 hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
5153
5154 /*
5155 * by default set standard 80% (== 100 usec/uframe) max periodic
5156 * bandwidth as required by USB 2.0
5157 */
5158 fusbh200->uframe_periodic_max = 100;
5159
5160 /*
5161 * hw default: 1K periodic list heads, one per frame.
5162 * periodic_size can shrink by USBCMD update if hcc_params allows.
5163 */
5164 fusbh200->periodic_size = DEFAULT_I_TDPS;
5165 INIT_LIST_HEAD(&fusbh200->intr_qh_list);
5166 INIT_LIST_HEAD(&fusbh200->cached_itd_list);
5167
5168 if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
5169 /* periodic schedule size can be smaller than default */
5170 switch (FUSBH200_TUNE_FLS) {
5171 case 0: fusbh200->periodic_size = 1024; break;
5172 case 1: fusbh200->periodic_size = 512; break;
5173 case 2: fusbh200->periodic_size = 256; break;
5174 default: BUG();
5175 }
5176 }
5177 if ((retval = fusbh200_mem_init(fusbh200, GFP_KERNEL)) < 0)
5178 return retval;
5179
5180 /* controllers may cache some of the periodic schedule ... */
5181 fusbh200->i_thresh = 2;
5182
5183 /*
5184 * dedicate a qh for the async ring head, since we couldn't unlink
5185 * a 'real' qh without stopping the async schedule [4.8]. use it
5186 * as the 'reclamation list head' too.
5187 * its dummy is used in hw_alt_next of many tds, to prevent the qh
5188 * from automatically advancing to the next td after short reads.
5189 */
5190 fusbh200->async->qh_next.qh = NULL;
5191 hw = fusbh200->async->hw;
5192 hw->hw_next = QH_NEXT(fusbh200, fusbh200->async->qh_dma);
5193 hw->hw_info1 = cpu_to_hc32(fusbh200, QH_HEAD);
5194 hw->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
5195 hw->hw_qtd_next = FUSBH200_LIST_END(fusbh200);
5196 fusbh200->async->qh_state = QH_STATE_LINKED;
5197 hw->hw_alt_next = QTD_NEXT(fusbh200, fusbh200->async->dummy->qtd_dma);
5198
5199 /* clear interrupt enables, set irq latency */
5200 if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
5201 log2_irq_thresh = 0;
5202 temp = 1 << (16 + log2_irq_thresh);
5203 if (HCC_CANPARK(hcc_params)) {
5204 /* HW default park == 3, on hardware that supports it (like
5205 * NVidia and ALI silicon), maximizes throughput on the async
5206 * schedule by avoiding QH fetches between transfers.
5207 *
5208 * With fast usb storage devices and NForce2, "park" seems to
5209 * make problems: throughput reduction (!), data errors...
5210 */
5211 if (park) {
5212 park = min(park, (unsigned) 3);
5213 temp |= CMD_PARK;
5214 temp |= park << 8;
5215 }
5216 fusbh200_dbg(fusbh200, "park %d\n", park);
5217 }
5218 if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
5219 /* periodic schedule size can be smaller than default */
5220 temp &= ~(3 << 2);
5221 temp |= (FUSBH200_TUNE_FLS << 2);
5222 }
5223 fusbh200->command = temp;
5224
5225 /* Accept arbitrarily long scatter-gather lists */
5226 if (!(hcd->driver->flags & HCD_LOCAL_MEM))
5227 hcd->self.sg_tablesize = ~0;
5228 return 0;
5229}
5230
5231/* start HC running; it's halted, hcd_fusbh200_init() has been run (once) */
5232static int fusbh200_run (struct usb_hcd *hcd)
5233{
5234 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
5235 u32 temp;
5236 u32 hcc_params;
5237
5238 hcd->uses_new_polling = 1;
5239
5240 /* EHCI spec section 4.1 */
5241
5242 fusbh200_writel(fusbh200, fusbh200->periodic_dma, &fusbh200->regs->frame_list);
5243 fusbh200_writel(fusbh200, (u32)fusbh200->async->qh_dma, &fusbh200->regs->async_next);
5244
5245 /*
5246 * hcc_params controls whether fusbh200->regs->segment must (!!!)
5247 * be used; it constrains QH/ITD/SITD and QTD locations.
5248 * pci_pool consistent memory always uses segment zero.
5249 * streaming mappings for I/O buffers, like pci_map_single(),
5250 * can return segments above 4GB, if the device allows.
5251 *
5252 * NOTE: the dma mask is visible through dma_supported(), so
5253 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
5254 * Scsi_Host.highmem_io, and so forth. It's readonly to all
5255 * host side drivers though.
5256 */
5257 hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
5258
5259 // Philips, Intel, and maybe others need CMD_RUN before the
5260 // root hub will detect new devices (why?); NEC doesn't
5261 fusbh200->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
5262 fusbh200->command |= CMD_RUN;
5263 fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
5264 dbg_cmd (fusbh200, "init", fusbh200->command);
5265
5266 /*
5267 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
5268 * are explicitly handed to companion controller(s), so no TT is
5269 * involved with the root hub. (Except where one is integrated,
5270 * and there's no companion controller unless maybe for USB OTG.)
5271 *
5272 * Turning on the CF flag will transfer ownership of all ports
5273 * from the companions to the EHCI controller. If any of the
5274 * companions are in the middle of a port reset at the time, it
5275 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
5276 * guarantees that no resets are in progress. After we set CF,
5277 * a short delay lets the hardware catch up; new resets shouldn't
5278 * be started before the port switching actions could complete.
5279 */
5280 down_write(&ehci_cf_port_reset_rwsem);
5281 fusbh200->rh_state = FUSBH200_RH_RUNNING;
5282 fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */
5283 msleep(5);
5284 up_write(&ehci_cf_port_reset_rwsem);
5285 fusbh200->last_periodic_enable = ktime_get_real();
5286
5287 temp = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
5288 fusbh200_info (fusbh200,
5289 "USB %x.%x started, EHCI %x.%02x\n",
5290 ((fusbh200->sbrn & 0xf0)>>4), (fusbh200->sbrn & 0x0f),
5291 temp >> 8, temp & 0xff);
5292
5293 fusbh200_writel(fusbh200, INTR_MASK,
5294 &fusbh200->regs->intr_enable); /* Turn On Interrupts */
5295
5296 /* GRR this is run-once init(), being done every time the HC starts.
5297 * So long as they're part of class devices, we can't do it init()
5298 * since the class device isn't created that early.
5299 */
5300 create_debug_files(fusbh200);
5301 create_sysfs_files(fusbh200);
5302
5303 return 0;
5304}
5305
5306static int fusbh200_setup(struct usb_hcd *hcd)
5307{
5308 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
5309 int retval;
5310
5311 fusbh200->regs = (void __iomem *)fusbh200->caps +
5312 HC_LENGTH(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
5313 dbg_hcs_params(fusbh200, "reset");
5314 dbg_hcc_params(fusbh200, "reset");
5315
5316 /* cache this readonly data; minimize chip reads */
5317 fusbh200->hcs_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
5318
5319 fusbh200->sbrn = HCD_USB2;
5320
5321 /* data structure init */
5322 retval = hcd_fusbh200_init(hcd);
5323 if (retval)
5324 return retval;
5325
5326 retval = fusbh200_halt(fusbh200);
5327 if (retval)
5328 return retval;
5329
5330 fusbh200_reset(fusbh200);
5331
5332 return 0;
5333}
5334
5335/*-------------------------------------------------------------------------*/
5336
5337static irqreturn_t fusbh200_irq (struct usb_hcd *hcd)
5338{
5339 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
5340 u32 status, masked_status, pcd_status = 0, cmd;
5341 int bh;
5342
5343 spin_lock (&fusbh200->lock);
5344
5345 status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
5346
5347 /* e.g. cardbus physical eject */
5348 if (status == ~(u32) 0) {
5349 fusbh200_dbg (fusbh200, "device removed\n");
5350 goto dead;
5351 }
5352
5353 /*
5354 * We don't use STS_FLR, but some controllers don't like it to
5355 * remain on, so mask it out along with the other status bits.
5356 */
5357 masked_status = status & (INTR_MASK | STS_FLR);
5358
5359 /* Shared IRQ? */
5360 if (!masked_status || unlikely(fusbh200->rh_state == FUSBH200_RH_HALTED)) {
5361 spin_unlock(&fusbh200->lock);
5362 return IRQ_NONE;
5363 }
5364
5365 /* clear (just) interrupts */
5366 fusbh200_writel(fusbh200, masked_status, &fusbh200->regs->status);
5367 cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
5368 bh = 0;
5369
5370#ifdef VERBOSE_DEBUG
5371 /* unrequested/ignored: Frame List Rollover */
5372 dbg_status (fusbh200, "irq", status);
5373#endif
5374
5375 /* INT, ERR, and IAA interrupt rates can be throttled */
5376
5377 /* normal [4.15.1.2] or error [4.15.1.1] completion */
5378 if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
5379 if (likely ((status & STS_ERR) == 0))
5380 COUNT (fusbh200->stats.normal);
5381 else
5382 COUNT (fusbh200->stats.error);
5383 bh = 1;
5384 }
5385
5386 /* complete the unlinking of some qh [4.15.2.3] */
5387 if (status & STS_IAA) {
5388
5389 /* Turn off the IAA watchdog */
5390 fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_IAA_WATCHDOG);
5391
5392 /*
5393 * Mild optimization: Allow another IAAD to reset the
5394 * hrtimer, if one occurs before the next expiration.
5395 * In theory we could always cancel the hrtimer, but
5396 * tests show that about half the time it will be reset
5397 * for some other event anyway.
5398 */
5399 if (fusbh200->next_hrtimer_event == FUSBH200_HRTIMER_IAA_WATCHDOG)
5400 ++fusbh200->next_hrtimer_event;
5401
5402 /* guard against (alleged) silicon errata */
5403 if (cmd & CMD_IAAD)
5404 fusbh200_dbg(fusbh200, "IAA with IAAD still set?\n");
5405 if (fusbh200->async_iaa) {
5406 COUNT(fusbh200->stats.iaa);
5407 end_unlink_async(fusbh200);
5408 } else
5409 fusbh200_dbg(fusbh200, "IAA with nothing unlinked?\n");
5410 }
5411
5412 /* remote wakeup [4.3.1] */
5413 if (status & STS_PCD) {
5414 int pstatus;
5415 u32 __iomem *status_reg = &fusbh200->regs->port_status;
5416
5417 /* kick root hub later */
5418 pcd_status = status;
5419
5420 /* resume root hub? */
5421 if (fusbh200->rh_state == FUSBH200_RH_SUSPENDED)
5422 usb_hcd_resume_root_hub(hcd);
5423
5424 pstatus = fusbh200_readl(fusbh200, status_reg);
5425
5426 if (test_bit(0, &fusbh200->suspended_ports) &&
5427 ((pstatus & PORT_RESUME) ||
5428 !(pstatus & PORT_SUSPEND)) &&
5429 (pstatus & PORT_PE) &&
5430 fusbh200->reset_done[0] == 0) {
5431
5432 /* start 20 msec resume signaling from this port,
5433 * and make khubd collect PORT_STAT_C_SUSPEND to
5434 * stop that signaling. Use 5 ms extra for safety,
5435 * like usb_port_resume() does.
5436 */
5437 fusbh200->reset_done[0] = jiffies + msecs_to_jiffies(25);
5438 set_bit(0, &fusbh200->resuming_ports);
5439 fusbh200_dbg (fusbh200, "port 1 remote wakeup\n");
5440 mod_timer(&hcd->rh_timer, fusbh200->reset_done[0]);
5441 }
5442 }
5443
5444 /* PCI errors [4.15.2.4] */
5445 if (unlikely ((status & STS_FATAL) != 0)) {
5446 fusbh200_err(fusbh200, "fatal error\n");
5447 dbg_cmd(fusbh200, "fatal", cmd);
5448 dbg_status(fusbh200, "fatal", status);
5449dead:
5450 usb_hc_died(hcd);
5451
5452 /* Don't let the controller do anything more */
5453 fusbh200->shutdown = true;
5454 fusbh200->rh_state = FUSBH200_RH_STOPPING;
5455 fusbh200->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
5456 fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
5457 fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
5458 fusbh200_handle_controller_death(fusbh200);
5459
5460 /* Handle completions when the controller stops */
5461 bh = 0;
5462 }
5463
5464 if (bh)
5465 fusbh200_work (fusbh200);
5466 spin_unlock (&fusbh200->lock);
5467 if (pcd_status)
5468 usb_hcd_poll_rh_status(hcd);
5469 return IRQ_HANDLED;
5470}
5471
5472/*-------------------------------------------------------------------------*/
5473
5474/*
5475 * non-error returns are a promise to giveback() the urb later
5476 * we drop ownership so next owner (or urb unlink) can get it
5477 *
5478 * urb + dev is in hcd.self.controller.urb_list
5479 * we're queueing TDs onto software and hardware lists
5480 *
5481 * hcd-specific init for hcpriv hasn't been done yet
5482 *
5483 * NOTE: control, bulk, and interrupt share the same code to append TDs
5484 * to a (possibly active) QH, and the same QH scanning code.
5485 */
5486static int fusbh200_urb_enqueue (
5487 struct usb_hcd *hcd,
5488 struct urb *urb,
5489 gfp_t mem_flags
5490) {
5491 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
5492 struct list_head qtd_list;
5493
5494 INIT_LIST_HEAD (&qtd_list);
5495
5496 switch (usb_pipetype (urb->pipe)) {
5497 case PIPE_CONTROL:
5498 /* qh_completions() code doesn't handle all the fault cases
5499 * in multi-TD control transfers. Even 1KB is rare anyway.
5500 */
5501 if (urb->transfer_buffer_length > (16 * 1024))
5502 return -EMSGSIZE;
5503 /* FALLTHROUGH */
5504 /* case PIPE_BULK: */
5505 default:
5506 if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
5507 return -ENOMEM;
5508 return submit_async(fusbh200, urb, &qtd_list, mem_flags);
5509
5510 case PIPE_INTERRUPT:
5511 if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
5512 return -ENOMEM;
5513 return intr_submit(fusbh200, urb, &qtd_list, mem_flags);
5514
5515 case PIPE_ISOCHRONOUS:
5516 return itd_submit (fusbh200, urb, mem_flags);
5517 }
5518}
5519
5520/* remove from hardware lists
5521 * completions normally happen asynchronously
5522 */
5523
5524static int fusbh200_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
5525{
5526 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
5527 struct fusbh200_qh *qh;
5528 unsigned long flags;
5529 int rc;
5530
5531 spin_lock_irqsave (&fusbh200->lock, flags);
5532 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
5533 if (rc)
5534 goto done;
5535
5536 switch (usb_pipetype (urb->pipe)) {
5537 // case PIPE_CONTROL:
5538 // case PIPE_BULK:
5539 default:
5540 qh = (struct fusbh200_qh *) urb->hcpriv;
5541 if (!qh)
5542 break;
5543 switch (qh->qh_state) {
5544 case QH_STATE_LINKED:
5545 case QH_STATE_COMPLETING:
5546 start_unlink_async(fusbh200, qh);
5547 break;
5548 case QH_STATE_UNLINK:
5549 case QH_STATE_UNLINK_WAIT:
5550 /* already started */
5551 break;
5552 case QH_STATE_IDLE:
5553 /* QH might be waiting for a Clear-TT-Buffer */
5554 qh_completions(fusbh200, qh);
5555 break;
5556 }
5557 break;
5558
5559 case PIPE_INTERRUPT:
5560 qh = (struct fusbh200_qh *) urb->hcpriv;
5561 if (!qh)
5562 break;
5563 switch (qh->qh_state) {
5564 case QH_STATE_LINKED:
5565 case QH_STATE_COMPLETING:
5566 start_unlink_intr(fusbh200, qh);
5567 break;
5568 case QH_STATE_IDLE:
5569 qh_completions (fusbh200, qh);
5570 break;
5571 default:
5572 fusbh200_dbg (fusbh200, "bogus qh %p state %d\n",
5573 qh, qh->qh_state);
5574 goto done;
5575 }
5576 break;
5577
5578 case PIPE_ISOCHRONOUS:
5579 // itd...
5580
5581 // wait till next completion, do it then.
5582 // completion irqs can wait up to 1024 msec,
5583 break;
5584 }
5585done:
5586 spin_unlock_irqrestore (&fusbh200->lock, flags);
5587 return rc;
5588}
5589
5590/*-------------------------------------------------------------------------*/
5591
5592// bulk qh holds the data toggle
5593
5594static void
5595fusbh200_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
5596{
5597 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
5598 unsigned long flags;
5599 struct fusbh200_qh *qh, *tmp;
5600
5601 /* ASSERT: any requests/urbs are being unlinked */
5602 /* ASSERT: nobody can be submitting urbs for this any more */
5603
5604rescan:
5605 spin_lock_irqsave (&fusbh200->lock, flags);
5606 qh = ep->hcpriv;
5607 if (!qh)
5608 goto done;
5609
5610 /* endpoints can be iso streams. for now, we don't
5611 * accelerate iso completions ... so spin a while.
5612 */
5613 if (qh->hw == NULL) {
5614 struct fusbh200_iso_stream *stream = ep->hcpriv;
5615
5616 if (!list_empty(&stream->td_list))
5617 goto idle_timeout;
5618
5619 /* BUG_ON(!list_empty(&stream->free_list)); */
5620 kfree(stream);
5621 goto done;
5622 }
5623
5624 if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
5625 qh->qh_state = QH_STATE_IDLE;
5626 switch (qh->qh_state) {
5627 case QH_STATE_LINKED:
5628 case QH_STATE_COMPLETING:
5629 for (tmp = fusbh200->async->qh_next.qh;
5630 tmp && tmp != qh;
5631 tmp = tmp->qh_next.qh)
5632 continue;
5633 /* periodic qh self-unlinks on empty, and a COMPLETING qh
5634 * may already be unlinked.
5635 */
5636 if (tmp)
5637 start_unlink_async(fusbh200, qh);
5638 /* FALL THROUGH */
5639 case QH_STATE_UNLINK: /* wait for hw to finish? */
5640 case QH_STATE_UNLINK_WAIT:
5641idle_timeout:
5642 spin_unlock_irqrestore (&fusbh200->lock, flags);
5643 schedule_timeout_uninterruptible(1);
5644 goto rescan;
5645 case QH_STATE_IDLE: /* fully unlinked */
5646 if (qh->clearing_tt)
5647 goto idle_timeout;
5648 if (list_empty (&qh->qtd_list)) {
5649 qh_destroy(fusbh200, qh);
5650 break;
5651 }
5652 /* else FALL THROUGH */
5653 default:
5654 /* caller was supposed to have unlinked any requests;
5655 * that's not our job. just leak this memory.
5656 */
5657 fusbh200_err (fusbh200, "qh %p (#%02x) state %d%s\n",
5658 qh, ep->desc.bEndpointAddress, qh->qh_state,
5659 list_empty (&qh->qtd_list) ? "" : "(has tds)");
5660 break;
5661 }
5662 done:
5663 ep->hcpriv = NULL;
5664 spin_unlock_irqrestore (&fusbh200->lock, flags);
5665}
5666
5667static void
5668fusbh200_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
5669{
5670 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
5671 struct fusbh200_qh *qh;
5672 int eptype = usb_endpoint_type(&ep->desc);
5673 int epnum = usb_endpoint_num(&ep->desc);
5674 int is_out = usb_endpoint_dir_out(&ep->desc);
5675 unsigned long flags;
5676
5677 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
5678 return;
5679
5680 spin_lock_irqsave(&fusbh200->lock, flags);
5681 qh = ep->hcpriv;
5682
5683 /* For Bulk and Interrupt endpoints we maintain the toggle state
5684 * in the hardware; the toggle bits in udev aren't used at all.
5685 * When an endpoint is reset by usb_clear_halt() we must reset
5686 * the toggle bit in the QH.
5687 */
5688 if (qh) {
5689 usb_settoggle(qh->dev, epnum, is_out, 0);
5690 if (!list_empty(&qh->qtd_list)) {
5691 WARN_ONCE(1, "clear_halt for a busy endpoint\n");
5692 } else if (qh->qh_state == QH_STATE_LINKED ||
5693 qh->qh_state == QH_STATE_COMPLETING) {
5694
5695 /* The toggle value in the QH can't be updated
5696 * while the QH is active. Unlink it now;
5697 * re-linking will call qh_refresh().
5698 */
5699 if (eptype == USB_ENDPOINT_XFER_BULK)
5700 start_unlink_async(fusbh200, qh);
5701 else
5702 start_unlink_intr(fusbh200, qh);
5703 }
5704 }
5705 spin_unlock_irqrestore(&fusbh200->lock, flags);
5706}
5707
5708static int fusbh200_get_frame (struct usb_hcd *hcd)
5709{
5710 struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
5711 return (fusbh200_read_frame_index(fusbh200) >> 3) % fusbh200->periodic_size;
5712}
5713
5714/*-------------------------------------------------------------------------*/
5715
5716/*
5717 * The EHCI in ChipIdea HDRC cannot be a separate module or device,
5718 * because its registers (and irq) are shared between host/gadget/otg
5719 * functions and in order to facilitate role switching we cannot
5720 * give the fusbh200 driver exclusive access to those.
5721 */
5722MODULE_DESCRIPTION(DRIVER_DESC);
5723MODULE_AUTHOR (DRIVER_AUTHOR);
5724MODULE_LICENSE ("GPL");
5725
5726static const struct hc_driver fusbh200_fusbh200_hc_driver = {
5727 .description = hcd_name,
5728 .product_desc = "Faraday USB2.0 Host Controller",
5729 .hcd_priv_size = sizeof(struct fusbh200_hcd),
5730
5731 /*
5732 * generic hardware linkage
5733 */
5734 .irq = fusbh200_irq,
5735 .flags = HCD_MEMORY | HCD_USB2,
5736
5737 /*
5738 * basic lifecycle operations
5739 */
5740 .reset = hcd_fusbh200_init,
5741 .start = fusbh200_run,
5742 .stop = fusbh200_stop,
5743 .shutdown = fusbh200_shutdown,
5744
5745 /*
5746 * managing i/o requests and associated device resources
5747 */
5748 .urb_enqueue = fusbh200_urb_enqueue,
5749 .urb_dequeue = fusbh200_urb_dequeue,
5750 .endpoint_disable = fusbh200_endpoint_disable,
5751 .endpoint_reset = fusbh200_endpoint_reset,
5752
5753 /*
5754 * scheduling support
5755 */
5756 .get_frame_number = fusbh200_get_frame,
5757
5758 /*
5759 * root hub support
5760 */
5761 .hub_status_data = fusbh200_hub_status_data,
5762 .hub_control = fusbh200_hub_control,
5763 .bus_suspend = fusbh200_bus_suspend,
5764 .bus_resume = fusbh200_bus_resume,
5765
5766 .relinquish_port = fusbh200_relinquish_port,
5767 .port_handed_over = fusbh200_port_handed_over,
5768
5769 .clear_tt_buffer_complete = fusbh200_clear_tt_buffer_complete,
5770};
5771
5772void fusbh200_init(struct fusbh200_hcd *fusbh200)
5773{
5774 u32 reg;
5775
5776 reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmcsr);
5777 reg |= BMCSR_INT_POLARITY;
5778 reg &= ~BMCSR_VBUS_OFF;
5779 fusbh200_writel(fusbh200, reg, &fusbh200->regs->bmcsr);
5780
5781 reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmier);
5782 fusbh200_writel(fusbh200, reg | BMIER_OVC_EN | BMIER_VBUS_ERR_EN,
5783 &fusbh200->regs->bmier);
5784}
5785
5786/**
5787 * fusbh200_hcd_fusbh200_probe - initialize faraday FUSBH200 HCDs
5788 *
5789 * Allocates basic resources for this USB host controller, and
5790 * then invokes the start() method for the HCD associated with it
5791 * through the hotplug entry's driver_data.
5792 */
5793static int fusbh200_hcd_fusbh200_probe(struct platform_device *pdev)
5794{
5795 struct device *dev = &pdev->dev;
5796 struct usb_hcd *hcd;
5797 struct resource *res;
5798 int irq;
5799 int retval = -ENODEV;
5800 struct fusbh200_hcd *fusbh200;
5801
5802 if (usb_disabled())
5803 return -ENODEV;
5804
5805 pdev->dev.power.power_state = PMSG_ON;
5806
5807 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
5808 if (!res) {
5809 dev_err(dev,
5810 "Found HC with no IRQ. Check %s setup!\n",
5811 dev_name(dev));
5812 return -ENODEV;
5813 }
5814
5815 irq = res->start;
5816
5817 hcd = usb_create_hcd(&fusbh200_fusbh200_hc_driver, dev,
5818 dev_name(dev));
5819 if (!hcd) {
5820 dev_err(dev, "failed to create hcd with err %d\n", retval);
5821 retval = -ENOMEM;
5822 goto fail_create_hcd;
5823 }
5824
5825 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5826 if (!res) {
5827 dev_err(dev,
5828 "Found HC with no register addr. Check %s setup!\n",
5829 dev_name(dev));
5830 retval = -ENODEV;
5831 goto fail_request_resource;
5832 }
5833
5834 hcd->rsrc_start = res->start;
5835 hcd->rsrc_len = resource_size(res);
5836 hcd->has_tt = 1;
5837
5838 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
5839 fusbh200_fusbh200_hc_driver.description)) {
5840 dev_dbg(dev, "controller already in use\n");
5841 retval = -EBUSY;
5842 goto fail_request_resource;
5843 }
5844
5845 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
5846 if (!res) {
5847 dev_err(dev,
5848 "Found HC with no register addr. Check %s setup!\n",
5849 dev_name(dev));
5850 retval = -ENODEV;
5851 goto fail_request_resource;
5852 }
5853
5854 hcd->regs = ioremap_nocache(res->start, resource_size(res));
5855 if (hcd->regs == NULL) {
5856 dev_dbg(dev, "error mapping memory\n");
5857 retval = -EFAULT;
5858 goto fail_ioremap;
5859 }
5860
5861 fusbh200 = hcd_to_fusbh200(hcd);
5862
5863 fusbh200->caps = hcd->regs;
5864
5865 retval = fusbh200_setup(hcd);
5866 if (retval)
5867 return retval;
5868
5869 fusbh200_init(fusbh200);
5870
5871 retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
5872 if (retval) {
5873 dev_err(dev, "failed to add hcd with err %d\n", retval);
5874 goto fail_add_hcd;
5875 }
5876
5877 return retval;
5878
5879fail_add_hcd:
5880 iounmap(hcd->regs);
5881fail_ioremap:
5882 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
5883fail_request_resource:
5884 usb_put_hcd(hcd);
5885fail_create_hcd:
5886 dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
5887 return retval;
5888}
5889
5890/**
5891 * fusbh200_hcd_fusbh200_remove - shutdown processing for EHCI HCDs
5892 * @dev: USB Host Controller being removed
5893 *
5894 * Reverses the effect of fotg2xx_usb_hcd_probe(), first invoking
5895 * the HCD's stop() method. It is always called from a thread
5896 * context, normally "rmmod", "apmd", or something similar.
5897 */
5898int fusbh200_hcd_fusbh200_remove(struct platform_device *pdev)
5899{
5900 struct device *dev = &pdev->dev;
5901 struct usb_hcd *hcd = dev_get_drvdata(dev);
5902
5903 if (!hcd)
5904 return 0;
5905
5906 usb_remove_hcd(hcd);
5907 iounmap(hcd->regs);
5908 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
5909 usb_put_hcd(hcd);
5910 platform_set_drvdata(pdev, NULL);
5911
5912 return 0;
5913}
5914
5915struct platform_driver fusbh200_hcd_fusbh200_driver = {
5916 .driver = {
5917 .name = "fusbh200",
5918 },
5919 .probe = fusbh200_hcd_fusbh200_probe,
5920 .remove = fusbh200_hcd_fusbh200_remove,
5921};
5922
5923static int __init fusbh200_hcd_init(void)
5924{
5925 int retval = 0;
5926
5927 if (usb_disabled())
5928 return -ENODEV;
5929
5930 printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
5931 set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
5932 if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
5933 test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
5934 printk(KERN_WARNING "Warning! fusbh200_hcd should always be loaded"
5935 " before uhci_hcd and ohci_hcd, not after\n");
5936
5937 pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
5938 hcd_name,
5939 sizeof(struct fusbh200_qh), sizeof(struct fusbh200_qtd),
5940 sizeof(struct fusbh200_itd));
5941
5942#ifdef DEBUG
5943 fusbh200_debug_root = debugfs_create_dir("fusbh200", usb_debug_root);
5944 if (!fusbh200_debug_root) {
5945 retval = -ENOENT;
5946 goto err_debug;
5947 }
5948#endif
5949
5950 retval = platform_driver_register(&fusbh200_hcd_fusbh200_driver);
5951 if (retval < 0)
5952 goto clean;
5953 return retval;
5954
5955 platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
5956clean:
5957#ifdef DEBUG
5958 debugfs_remove(fusbh200_debug_root);
5959 fusbh200_debug_root = NULL;
5960err_debug:
5961#endif
5962 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
5963 return retval;
5964}
5965module_init(fusbh200_hcd_init);
5966
5967static void __exit fusbh200_hcd_cleanup(void)
5968{
5969 platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
5970#ifdef DEBUG
5971 debugfs_remove(fusbh200_debug_root);
5972#endif
5973 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
5974}
5975module_exit(fusbh200_hcd_cleanup);
diff --git a/drivers/usb/host/fusbh200.h b/drivers/usb/host/fusbh200.h
new file mode 100644
index 000000000000..797c9e855270
--- /dev/null
+++ b/drivers/usb/host/fusbh200.h
@@ -0,0 +1,743 @@
1#ifndef __LINUX_FUSBH200_H
2#define __LINUX_FUSBH200_H
3
4/* definitions used for the EHCI driver */
5
6/*
7 * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
8 * __leXX (normally) or __beXX (given FUSBH200_BIG_ENDIAN_DESC), depending on
9 * the host controller implementation.
10 *
11 * To facilitate the strongest possible byte-order checking from "sparse"
12 * and so on, we use __leXX unless that's not practical.
13 */
14#define __hc32 __le32
15#define __hc16 __le16
16
17/* statistics can be kept for tuning/monitoring */
18struct fusbh200_stats {
19 /* irq usage */
20 unsigned long normal;
21 unsigned long error;
22 unsigned long iaa;
23 unsigned long lost_iaa;
24
25 /* termination of urbs from core */
26 unsigned long complete;
27 unsigned long unlink;
28};
29
30/* fusbh200_hcd->lock guards shared data against other CPUs:
31 * fusbh200_hcd: async, unlink, periodic (and shadow), ...
32 * usb_host_endpoint: hcpriv
33 * fusbh200_qh: qh_next, qtd_list
34 * fusbh200_qtd: qtd_list
35 *
36 * Also, hold this lock when talking to HC registers or
37 * when updating hw_* fields in shared qh/qtd/... structures.
38 */
39
40#define FUSBH200_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
41
42/*
43 * fusbh200_rh_state values of FUSBH200_RH_RUNNING or above mean that the
44 * controller may be doing DMA. Lower values mean there's no DMA.
45 */
46enum fusbh200_rh_state {
47 FUSBH200_RH_HALTED,
48 FUSBH200_RH_SUSPENDED,
49 FUSBH200_RH_RUNNING,
50 FUSBH200_RH_STOPPING
51};
52
53/*
54 * Timer events, ordered by increasing delay length.
55 * Always update event_delays_ns[] and event_handlers[] (defined in
56 * ehci-timer.c) in parallel with this list.
57 */
58enum fusbh200_hrtimer_event {
59 FUSBH200_HRTIMER_POLL_ASS, /* Poll for async schedule off */
60 FUSBH200_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
61 FUSBH200_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
62 FUSBH200_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
63 FUSBH200_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
64 FUSBH200_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
65 FUSBH200_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
66 FUSBH200_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
67 FUSBH200_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
68 FUSBH200_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
69 FUSBH200_HRTIMER_NUM_EVENTS /* Must come last */
70};
71#define FUSBH200_HRTIMER_NO_EVENT 99
72
73struct fusbh200_hcd { /* one per controller */
74 /* timing support */
75 enum fusbh200_hrtimer_event next_hrtimer_event;
76 unsigned enabled_hrtimer_events;
77 ktime_t hr_timeouts[FUSBH200_HRTIMER_NUM_EVENTS];
78 struct hrtimer hrtimer;
79
80 int PSS_poll_count;
81 int ASS_poll_count;
82 int died_poll_count;
83
84 /* glue to PCI and HCD framework */
85 struct fusbh200_caps __iomem *caps;
86 struct fusbh200_regs __iomem *regs;
87 struct fusbh200_dbg_port __iomem *debug;
88
89 __u32 hcs_params; /* cached register copy */
90 spinlock_t lock;
91 enum fusbh200_rh_state rh_state;
92
93 /* general schedule support */
94 bool scanning:1;
95 bool need_rescan:1;
96 bool intr_unlinking:1;
97 bool async_unlinking:1;
98 bool shutdown:1;
99 struct fusbh200_qh *qh_scan_next;
100
101 /* async schedule support */
102 struct fusbh200_qh *async;
103 struct fusbh200_qh *dummy; /* For AMD quirk use */
104 struct fusbh200_qh *async_unlink;
105 struct fusbh200_qh *async_unlink_last;
106 struct fusbh200_qh *async_iaa;
107 unsigned async_unlink_cycle;
108 unsigned async_count; /* async activity count */
109
110 /* periodic schedule support */
111#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
112 unsigned periodic_size;
113 __hc32 *periodic; /* hw periodic table */
114 dma_addr_t periodic_dma;
115 struct list_head intr_qh_list;
116 unsigned i_thresh; /* uframes HC might cache */
117
118 union fusbh200_shadow *pshadow; /* mirror hw periodic table */
119 struct fusbh200_qh *intr_unlink;
120 struct fusbh200_qh *intr_unlink_last;
121 unsigned intr_unlink_cycle;
122 unsigned now_frame; /* frame from HC hardware */
123 unsigned next_frame; /* scan periodic, start here */
124 unsigned intr_count; /* intr activity count */
125 unsigned isoc_count; /* isoc activity count */
126 unsigned periodic_count; /* periodic activity count */
127 unsigned uframe_periodic_max; /* max periodic time per uframe */
128
129
130 /* list of itds completed while now_frame was still active */
131 struct list_head cached_itd_list;
132 struct fusbh200_itd *last_itd_to_free;
133
134 /* per root hub port */
135 unsigned long reset_done [FUSBH200_MAX_ROOT_PORTS];
136
137 /* bit vectors (one bit per port) */
138 unsigned long bus_suspended; /* which ports were
139 already suspended at the start of a bus suspend */
140 unsigned long companion_ports; /* which ports are
141 dedicated to the companion controller */
142 unsigned long owned_ports; /* which ports are
143 owned by the companion during a bus suspend */
144 unsigned long port_c_suspend; /* which ports have
145 the change-suspend feature turned on */
146 unsigned long suspended_ports; /* which ports are
147 suspended */
148 unsigned long resuming_ports; /* which ports have
149 started to resume */
150
151 /* per-HC memory pools (could be per-bus, but ...) */
152 struct dma_pool *qh_pool; /* qh per active urb */
153 struct dma_pool *qtd_pool; /* one or more per qh */
154 struct dma_pool *itd_pool; /* itd per iso urb */
155
156 unsigned random_frame;
157 unsigned long next_statechange;
158 ktime_t last_periodic_enable;
159 u32 command;
160
161 /* SILICON QUIRKS */
162 unsigned need_io_watchdog:1;
163 unsigned fs_i_thresh:1; /* Intel iso scheduling */
164
165 u8 sbrn; /* packed release number */
166
167 /* irq statistics */
168#ifdef FUSBH200_STATS
169 struct fusbh200_stats stats;
170# define COUNT(x) do { (x)++; } while (0)
171#else
172# define COUNT(x) do {} while (0)
173#endif
174
175 /* debug files */
176#ifdef DEBUG
177 struct dentry *debug_dir;
178#endif
179};
180
181/* convert between an HCD pointer and the corresponding FUSBH200_HCD */
182static inline struct fusbh200_hcd *hcd_to_fusbh200 (struct usb_hcd *hcd)
183{
184 return (struct fusbh200_hcd *) (hcd->hcd_priv);
185}
186static inline struct usb_hcd *fusbh200_to_hcd (struct fusbh200_hcd *fusbh200)
187{
188 return container_of ((void *) fusbh200, struct usb_hcd, hcd_priv);
189}
190
191/*-------------------------------------------------------------------------*/
192
193/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
194
195/* Section 2.2 Host Controller Capability Registers */
196struct fusbh200_caps {
197 /* these fields are specified as 8 and 16 bit registers,
198 * but some hosts can't perform 8 or 16 bit PCI accesses.
199 * some hosts treat caplength and hciversion as parts of a 32-bit
200 * register, others treat them as two separate registers, this
201 * affects the memory map for big endian controllers.
202 */
203 u32 hc_capbase;
204#define HC_LENGTH(fusbh200, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
205 (fusbh200_big_endian_capbase(fusbh200) ? 24 : 0)))
206#define HC_VERSION(fusbh200, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
207 (fusbh200_big_endian_capbase(fusbh200) ? 0 : 16)))
208 u32 hcs_params; /* HCSPARAMS - offset 0x4 */
209#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
210
211 u32 hcc_params; /* HCCPARAMS - offset 0x8 */
212#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
213#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
214 u8 portroute[8]; /* nibbles for routing - offset 0xC */
215};
216
217
218/* Section 2.3 Host Controller Operational Registers */
219struct fusbh200_regs {
220
221 /* USBCMD: offset 0x00 */
222 u32 command;
223
224/* EHCI 1.1 addendum */
225/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
226#define CMD_PARK (1<<11) /* enable "park" on async qh */
227#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
228#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
229#define CMD_ASE (1<<5) /* async schedule enable */
230#define CMD_PSE (1<<4) /* periodic schedule enable */
231/* 3:2 is periodic frame list size */
232#define CMD_RESET (1<<1) /* reset HC not bus */
233#define CMD_RUN (1<<0) /* start/stop HC */
234
235 /* USBSTS: offset 0x04 */
236 u32 status;
237#define STS_ASS (1<<15) /* Async Schedule Status */
238#define STS_PSS (1<<14) /* Periodic Schedule Status */
239#define STS_RECL (1<<13) /* Reclamation */
240#define STS_HALT (1<<12) /* Not running (any reason) */
241/* some bits reserved */
242 /* these STS_* flags are also intr_enable bits (USBINTR) */
243#define STS_IAA (1<<5) /* Interrupted on async advance */
244#define STS_FATAL (1<<4) /* such as some PCI access errors */
245#define STS_FLR (1<<3) /* frame list rolled over */
246#define STS_PCD (1<<2) /* port change detect */
247#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
248#define STS_INT (1<<0) /* "normal" completion (short, ...) */
249
250 /* USBINTR: offset 0x08 */
251 u32 intr_enable;
252
253 /* FRINDEX: offset 0x0C */
254 u32 frame_index; /* current microframe number */
255 /* CTRLDSSEGMENT: offset 0x10 */
256 u32 segment; /* address bits 63:32 if needed */
257 /* PERIODICLISTBASE: offset 0x14 */
258 u32 frame_list; /* points to periodic list */
259 /* ASYNCLISTADDR: offset 0x18 */
260 u32 async_next; /* address of next async queue head */
261
262 u32 reserved1;
263 /* PORTSC: offset 0x20 */
264 u32 port_status;
265/* 31:23 reserved */
266#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
267#define PORT_RESET (1<<8) /* reset port */
268#define PORT_SUSPEND (1<<7) /* suspend port */
269#define PORT_RESUME (1<<6) /* resume it */
270#define PORT_PEC (1<<3) /* port enable change */
271#define PORT_PE (1<<2) /* port enable */
272#define PORT_CSC (1<<1) /* connect status change */
273#define PORT_CONNECT (1<<0) /* device connected */
274#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
275
276 u32 reserved2[3];
277
278 /* BMCSR: offset 0x30 */
279 u32 bmcsr; /* Bus Moniter Control/Status Register */
280#define BMCSR_HOST_SPD_TYP (3<<9)
281#define BMCSR_VBUS_OFF (1<<4)
282#define BMCSR_INT_POLARITY (1<<3)
283
284 /* BMISR: offset 0x34 */
285 u32 bmisr; /* Bus Moniter Interrupt Status Register*/
286#define BMISR_OVC (1<<1)
287
288 /* BMIER: offset 0x38 */
289 u32 bmier; /* Bus Moniter Interrupt Enable Register */
290#define BMIER_OVC_EN (1<<1)
291#define BMIER_VBUS_ERR_EN (1<<0)
292};
293
294/* Appendix C, Debug port ... intended for use with special "debug devices"
295 * that can help if there's no serial console. (nonstandard enumeration.)
296 */
297struct fusbh200_dbg_port {
298 u32 control;
299#define DBGP_OWNER (1<<30)
300#define DBGP_ENABLED (1<<28)
301#define DBGP_DONE (1<<16)
302#define DBGP_INUSE (1<<10)
303#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
304# define DBGP_ERR_BAD 1
305# define DBGP_ERR_SIGNAL 2
306#define DBGP_ERROR (1<<6)
307#define DBGP_GO (1<<5)
308#define DBGP_OUT (1<<4)
309#define DBGP_LEN(x) (((x)>>0)&0x0f)
310 u32 pids;
311#define DBGP_PID_GET(x) (((x)>>16)&0xff)
312#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
313 u32 data03;
314 u32 data47;
315 u32 address;
316#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
317};
318
319#ifdef CONFIG_EARLY_PRINTK_DBGP
320#include <linux/init.h>
321extern int __init early_dbgp_init(char *s);
322extern struct console early_dbgp_console;
323#endif /* CONFIG_EARLY_PRINTK_DBGP */
324
325struct usb_hcd;
326
327static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
328{
329 return 1; /* Shouldn't this be 0? */
330}
331
332static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
333{
334 return -1;
335}
336
337#ifdef CONFIG_EARLY_PRINTK_DBGP
338/* Call backs from fusbh200 host driver to fusbh200 debug driver */
339extern int dbgp_external_startup(struct usb_hcd *);
340extern int dbgp_reset_prep(struct usb_hcd *hcd);
341#else
342static inline int dbgp_reset_prep(struct usb_hcd *hcd)
343{
344 return xen_dbgp_reset_prep(hcd);
345}
346static inline int dbgp_external_startup(struct usb_hcd *hcd)
347{
348 return xen_dbgp_external_startup(hcd);
349}
350#endif
351
352/*-------------------------------------------------------------------------*/
353
354#define QTD_NEXT(fusbh200, dma) cpu_to_hc32(fusbh200, (u32)dma)
355
356/*
357 * EHCI Specification 0.95 Section 3.5
358 * QTD: describe data transfer components (buffer, direction, ...)
359 * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
360 *
361 * These are associated only with "QH" (Queue Head) structures,
362 * used with control, bulk, and interrupt transfers.
363 */
364struct fusbh200_qtd {
365 /* first part defined by EHCI spec */
366 __hc32 hw_next; /* see EHCI 3.5.1 */
367 __hc32 hw_alt_next; /* see EHCI 3.5.2 */
368 __hc32 hw_token; /* see EHCI 3.5.3 */
369#define QTD_TOGGLE (1 << 31) /* data toggle */
370#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
371#define QTD_IOC (1 << 15) /* interrupt on complete */
372#define QTD_CERR(tok) (((tok)>>10) & 0x3)
373#define QTD_PID(tok) (((tok)>>8) & 0x3)
374#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
375#define QTD_STS_HALT (1 << 6) /* halted on error */
376#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
377#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
378#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
379#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
380#define QTD_STS_STS (1 << 1) /* split transaction state */
381#define QTD_STS_PING (1 << 0) /* issue PING? */
382
383#define ACTIVE_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_ACTIVE)
384#define HALT_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_HALT)
385#define STATUS_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_STS)
386
387 __hc32 hw_buf [5]; /* see EHCI 3.5.4 */
388 __hc32 hw_buf_hi [5]; /* Appendix B */
389
390 /* the rest is HCD-private */
391 dma_addr_t qtd_dma; /* qtd address */
392 struct list_head qtd_list; /* sw qtd list */
393 struct urb *urb; /* qtd's urb */
394 size_t length; /* length of buffer */
395} __attribute__ ((aligned (32)));
396
397/* mask NakCnt+T in qh->hw_alt_next */
398#define QTD_MASK(fusbh200) cpu_to_hc32 (fusbh200, ~0x1f)
399
400#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
401
402/*-------------------------------------------------------------------------*/
403
404/* type tag from {qh,itd,fstn}->hw_next */
405#define Q_NEXT_TYPE(fusbh200,dma) ((dma) & cpu_to_hc32(fusbh200, 3 << 1))
406
407/*
408 * Now the following defines are not converted using the
409 * cpu_to_le32() macro anymore, since we have to support
410 * "dynamic" switching between be and le support, so that the driver
411 * can be used on one system with SoC EHCI controller using big-endian
412 * descriptors as well as a normal little-endian PCI EHCI controller.
413 */
414/* values for that type tag */
415#define Q_TYPE_ITD (0 << 1)
416#define Q_TYPE_QH (1 << 1)
417#define Q_TYPE_SITD (2 << 1)
418#define Q_TYPE_FSTN (3 << 1)
419
420/* next async queue entry, or pointer to interrupt/periodic QH */
421#define QH_NEXT(fusbh200,dma) (cpu_to_hc32(fusbh200, (((u32)dma)&~0x01f)|Q_TYPE_QH))
422
423/* for periodic/async schedules and qtd lists, mark end of list */
424#define FUSBH200_LIST_END(fusbh200) cpu_to_hc32(fusbh200, 1) /* "null pointer" to hw */
425
426/*
427 * Entries in periodic shadow table are pointers to one of four kinds
428 * of data structure. That's dictated by the hardware; a type tag is
429 * encoded in the low bits of the hardware's periodic schedule. Use
430 * Q_NEXT_TYPE to get the tag.
431 *
432 * For entries in the async schedule, the type tag always says "qh".
433 */
434union fusbh200_shadow {
435 struct fusbh200_qh *qh; /* Q_TYPE_QH */
436 struct fusbh200_itd *itd; /* Q_TYPE_ITD */
437 struct fusbh200_fstn *fstn; /* Q_TYPE_FSTN */
438 __hc32 *hw_next; /* (all types) */
439 void *ptr;
440};
441
442/*-------------------------------------------------------------------------*/
443
444/*
445 * EHCI Specification 0.95 Section 3.6
446 * QH: describes control/bulk/interrupt endpoints
447 * See Fig 3-7 "Queue Head Structure Layout".
448 *
449 * These appear in both the async and (for interrupt) periodic schedules.
450 */
451
452/* first part defined by EHCI spec */
453struct fusbh200_qh_hw {
454 __hc32 hw_next; /* see EHCI 3.6.1 */
455 __hc32 hw_info1; /* see EHCI 3.6.2 */
456#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
457#define QH_HEAD (1 << 15) /* Head of async reclamation list */
458#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
459#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
460#define QH_LOW_SPEED (1 << 12)
461#define QH_FULL_SPEED (0 << 12)
462#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
463 __hc32 hw_info2; /* see EHCI 3.6.2 */
464#define QH_SMASK 0x000000ff
465#define QH_CMASK 0x0000ff00
466#define QH_HUBADDR 0x007f0000
467#define QH_HUBPORT 0x3f800000
468#define QH_MULT 0xc0000000
469 __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
470
471 /* qtd overlay (hardware parts of a struct fusbh200_qtd) */
472 __hc32 hw_qtd_next;
473 __hc32 hw_alt_next;
474 __hc32 hw_token;
475 __hc32 hw_buf [5];
476 __hc32 hw_buf_hi [5];
477} __attribute__ ((aligned(32)));
478
479struct fusbh200_qh {
480 struct fusbh200_qh_hw *hw; /* Must come first */
481 /* the rest is HCD-private */
482 dma_addr_t qh_dma; /* address of qh */
483 union fusbh200_shadow qh_next; /* ptr to qh; or periodic */
484 struct list_head qtd_list; /* sw qtd list */
485 struct list_head intr_node; /* list of intr QHs */
486 struct fusbh200_qtd *dummy;
487 struct fusbh200_qh *unlink_next; /* next on unlink list */
488
489 unsigned unlink_cycle;
490
491 u8 needs_rescan; /* Dequeue during giveback */
492 u8 qh_state;
493#define QH_STATE_LINKED 1 /* HC sees this */
494#define QH_STATE_UNLINK 2 /* HC may still see this */
495#define QH_STATE_IDLE 3 /* HC doesn't see this */
496#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
497#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
498
499 u8 xacterrs; /* XactErr retry counter */
500#define QH_XACTERR_MAX 32 /* XactErr retry limit */
501
502 /* periodic schedule info */
503 u8 usecs; /* intr bandwidth */
504 u8 gap_uf; /* uframes split/csplit gap */
505 u8 c_usecs; /* ... split completion bw */
506 u16 tt_usecs; /* tt downstream bandwidth */
507 unsigned short period; /* polling interval */
508 unsigned short start; /* where polling starts */
509#define NO_FRAME ((unsigned short)~0) /* pick new start */
510
511 struct usb_device *dev; /* access to TT */
512 unsigned is_out:1; /* bulk or intr OUT */
513 unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
514};
515
516/*-------------------------------------------------------------------------*/
517
518/* description of one iso transaction (up to 3 KB data if highspeed) */
519struct fusbh200_iso_packet {
520 /* These will be copied to iTD when scheduling */
521 u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
522 __hc32 transaction; /* itd->hw_transaction[i] |= */
523 u8 cross; /* buf crosses pages */
524 /* for full speed OUT splits */
525 u32 buf1;
526};
527
528/* temporary schedule data for packets from iso urbs (both speeds)
529 * each packet is one logical usb transaction to the device (not TT),
530 * beginning at stream->next_uframe
531 */
532struct fusbh200_iso_sched {
533 struct list_head td_list;
534 unsigned span;
535 struct fusbh200_iso_packet packet [0];
536};
537
538/*
539 * fusbh200_iso_stream - groups all (s)itds for this endpoint.
540 * acts like a qh would, if EHCI had them for ISO.
541 */
542struct fusbh200_iso_stream {
543 /* first field matches fusbh200_hq, but is NULL */
544 struct fusbh200_qh_hw *hw;
545
546 u8 bEndpointAddress;
547 u8 highspeed;
548 struct list_head td_list; /* queued itds */
549 struct list_head free_list; /* list of unused itds */
550 struct usb_device *udev;
551 struct usb_host_endpoint *ep;
552
553 /* output of (re)scheduling */
554 int next_uframe;
555 __hc32 splits;
556
557 /* the rest is derived from the endpoint descriptor,
558 * trusting urb->interval == f(epdesc->bInterval) and
559 * including the extra info for hw_bufp[0..2]
560 */
561 u8 usecs, c_usecs;
562 u16 interval;
563 u16 tt_usecs;
564 u16 maxp;
565 u16 raw_mask;
566 unsigned bandwidth;
567
568 /* This is used to initialize iTD's hw_bufp fields */
569 __hc32 buf0;
570 __hc32 buf1;
571 __hc32 buf2;
572
573 /* this is used to initialize sITD's tt info */
574 __hc32 address;
575};
576
577/*-------------------------------------------------------------------------*/
578
579/*
580 * EHCI Specification 0.95 Section 3.3
581 * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
582 *
583 * Schedule records for high speed iso xfers
584 */
585struct fusbh200_itd {
586 /* first part defined by EHCI spec */
587 __hc32 hw_next; /* see EHCI 3.3.1 */
588 __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */
589#define FUSBH200_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
590#define FUSBH200_ISOC_BUF_ERR (1<<30) /* Data buffer error */
591#define FUSBH200_ISOC_BABBLE (1<<29) /* babble detected */
592#define FUSBH200_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
593#define FUSBH200_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
594#define FUSBH200_ITD_IOC (1 << 15) /* interrupt on complete */
595
596#define ITD_ACTIVE(fusbh200) cpu_to_hc32(fusbh200, FUSBH200_ISOC_ACTIVE)
597
598 __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */
599 __hc32 hw_bufp_hi [7]; /* Appendix B */
600
601 /* the rest is HCD-private */
602 dma_addr_t itd_dma; /* for this itd */
603 union fusbh200_shadow itd_next; /* ptr to periodic q entry */
604
605 struct urb *urb;
606 struct fusbh200_iso_stream *stream; /* endpoint's queue */
607 struct list_head itd_list; /* list of stream's itds */
608
609 /* any/all hw_transactions here may be used by that urb */
610 unsigned frame; /* where scheduled */
611 unsigned pg;
612 unsigned index[8]; /* in urb->iso_frame_desc */
613} __attribute__ ((aligned (32)));
614
615/*-------------------------------------------------------------------------*/
616
617/*
618 * EHCI Specification 0.96 Section 3.7
619 * Periodic Frame Span Traversal Node (FSTN)
620 *
621 * Manages split interrupt transactions (using TT) that span frame boundaries
622 * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
623 * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
624 * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
625 */
626struct fusbh200_fstn {
627 __hc32 hw_next; /* any periodic q entry */
628 __hc32 hw_prev; /* qh or FUSBH200_LIST_END */
629
630 /* the rest is HCD-private */
631 dma_addr_t fstn_dma;
632 union fusbh200_shadow fstn_next; /* ptr to periodic q entry */
633} __attribute__ ((aligned (32)));
634
635/*-------------------------------------------------------------------------*/
636
637/* Prepare the PORTSC wakeup flags during controller suspend/resume */
638
639#define fusbh200_prepare_ports_for_controller_suspend(fusbh200, do_wakeup) \
640 fusbh200_adjust_port_wakeup_flags(fusbh200, true, do_wakeup);
641
642#define fusbh200_prepare_ports_for_controller_resume(fusbh200) \
643 fusbh200_adjust_port_wakeup_flags(fusbh200, false, false);
644
645/*-------------------------------------------------------------------------*/
646
647/*
648 * Some EHCI controllers have a Transaction Translator built into the
649 * root hub. This is a non-standard feature. Each controller will need
650 * to add code to the following inline functions, and call them as
651 * needed (mostly in root hub code).
652 */
653
654static inline unsigned int
655fusbh200_get_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
656{
657 return (readl(&fusbh200->regs->bmcsr)
658 & BMCSR_HOST_SPD_TYP) >> 9;
659}
660
661/* Returns the speed of a device attached to a port on the root hub. */
662static inline unsigned int
663fusbh200_port_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
664{
665 switch (fusbh200_get_speed(fusbh200, portsc)) {
666 case 0:
667 return 0;
668 case 1:
669 return USB_PORT_STAT_LOW_SPEED;
670 case 2:
671 default:
672 return USB_PORT_STAT_HIGH_SPEED;
673 }
674}
675
676/*-------------------------------------------------------------------------*/
677
678#define fusbh200_has_fsl_portno_bug(e) (0)
679
680/*
681 * While most USB host controllers implement their registers in
682 * little-endian format, a minority (celleb companion chip) implement
683 * them in big endian format.
684 *
685 * This attempts to support either format at compile time without a
686 * runtime penalty, or both formats with the additional overhead
687 * of checking a flag bit.
688 *
689 */
690
691#define fusbh200_big_endian_mmio(e) 0
692#define fusbh200_big_endian_capbase(e) 0
693
694static inline unsigned int fusbh200_readl(const struct fusbh200_hcd *fusbh200,
695 __u32 __iomem * regs)
696{
697 return readl(regs);
698}
699
700static inline void fusbh200_writel(const struct fusbh200_hcd *fusbh200,
701 const unsigned int val, __u32 __iomem *regs)
702{
703 writel(val, regs);
704}
705
706/* cpu to fusbh200 */
707static inline __hc32 cpu_to_hc32 (const struct fusbh200_hcd *fusbh200, const u32 x)
708{
709 return cpu_to_le32(x);
710}
711
712/* fusbh200 to cpu */
713static inline u32 hc32_to_cpu (const struct fusbh200_hcd *fusbh200, const __hc32 x)
714{
715 return le32_to_cpu(x);
716}
717
718static inline u32 hc32_to_cpup (const struct fusbh200_hcd *fusbh200, const __hc32 *x)
719{
720 return le32_to_cpup(x);
721}
722
723/*-------------------------------------------------------------------------*/
724
725static inline unsigned fusbh200_read_frame_index(struct fusbh200_hcd *fusbh200)
726{
727 return fusbh200_readl(fusbh200, &fusbh200->regs->frame_index);
728}
729
730#define fusbh200_itdlen(urb, desc, t) ({ \
731 usb_pipein((urb)->pipe) ? \
732 (desc)->length - FUSBH200_ITD_LENGTH(t) : \
733 FUSBH200_ITD_LENGTH(t); \
734})
735/*-------------------------------------------------------------------------*/
736
737#ifndef DEBUG
738#define STUB_DEBUG_FILES
739#endif /* DEBUG */
740
741/*-------------------------------------------------------------------------*/
742
743#endif /* __LINUX_FUSBH200_H */