aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/usb/host
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig126
-rw-r--r--drivers/usb/host/Makefile10
-rw-r--r--drivers/usb/host/ehci-dbg.c755
-rw-r--r--drivers/usb/host/ehci-hcd.c1261
-rw-r--r--drivers/usb/host/ehci-hub.c553
-rw-r--r--drivers/usb/host/ehci-mem.c237
-rw-r--r--drivers/usb/host/ehci-q.c1090
-rw-r--r--drivers/usb/host/ehci-sched.c1999
-rw-r--r--drivers/usb/host/ehci.h637
-rw-r--r--drivers/usb/host/hc_crisv10.c4556
-rw-r--r--drivers/usb/host/hc_crisv10.h289
-rw-r--r--drivers/usb/host/ohci-au1xxx.c284
-rw-r--r--drivers/usb/host/ohci-dbg.c707
-rw-r--r--drivers/usb/host/ohci-hcd.c925
-rw-r--r--drivers/usb/host/ohci-hub.c643
-rw-r--r--drivers/usb/host/ohci-lh7a404.c266
-rw-r--r--drivers/usb/host/ohci-mem.c139
-rw-r--r--drivers/usb/host/ohci-omap.c560
-rw-r--r--drivers/usb/host/ohci-pci.c264
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c234
-rw-r--r--drivers/usb/host/ohci-pxa27x.c383
-rw-r--r--drivers/usb/host/ohci-q.c1107
-rw-r--r--drivers/usb/host/ohci-sa1111.c289
-rw-r--r--drivers/usb/host/ohci.h636
-rw-r--r--drivers/usb/host/sl811-hcd.c1851
-rw-r--r--drivers/usb/host/sl811.h266
-rw-r--r--drivers/usb/host/uhci-debug.c587
-rw-r--r--drivers/usb/host/uhci-hcd.c919
-rw-r--r--drivers/usb/host/uhci-hcd.h454
-rw-r--r--drivers/usb/host/uhci-hub.c299
-rw-r--r--drivers/usb/host/uhci-q.c1539
31 files changed, 23865 insertions, 0 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
new file mode 100644
index 000000000000..3196c3265ff5
--- /dev/null
+++ b/drivers/usb/host/Kconfig
@@ -0,0 +1,126 @@
1#
2# USB Host Controller Drivers
3#
4comment "USB Host Controller Drivers"
5 depends on USB
6
7config USB_EHCI_HCD
8 tristate "EHCI HCD (USB 2.0) support"
9 depends on USB && PCI
10 ---help---
11 The Enhanced Host Controller Interface (EHCI) is standard for USB 2.0
12 "high speed" (480 Mbit/sec, 60 Mbyte/sec) host controller hardware.
13 If your USB host controller supports USB 2.0, you will likely want to
14 configure this Host Controller Driver. At this writing, the primary
15 implementation of EHCI is a chip from NEC, widely available in add-on
16 PCI cards, but implementations are in the works from other vendors
17 including Intel and Philips. Motherboard support is appearing.
18
19 EHCI controllers are packaged with "companion" host controllers (OHCI
20 or UHCI) to handle USB 1.1 devices connected to root hub ports. Ports
21 will connect to EHCI if it the device is high speed, otherwise they
22 connect to a companion controller. If you configure EHCI, you should
23 probably configure the OHCI (for NEC and some other vendors) USB Host
24 Controller Driver or UHCI (for Via motherboards) Host Controller
25 Driver too.
26
27 You may want to read <file:Documentation/usb/ehci.txt>.
28
29 To compile this driver as a module, choose M here: the
30 module will be called ehci-hcd.
31
32config USB_EHCI_SPLIT_ISO
33 bool "Full speed ISO transactions (EXPERIMENTAL)"
34 depends on USB_EHCI_HCD && EXPERIMENTAL
35 default n
36 ---help---
37 This code is new and hasn't been used with many different
38 EHCI or USB 2.0 transaction translator implementations.
39 It should work for ISO-OUT transfers, like audio.
40
41config USB_EHCI_ROOT_HUB_TT
42 bool "Root Hub Transaction Translators (EXPERIMENTAL)"
43 depends on USB_EHCI_HCD && EXPERIMENTAL
44 ---help---
45 Some EHCI chips have vendor-specific extensions to integrate
46 transaction translators, so that no OHCI or UHCI companion
47 controller is needed. It's safe to say "y" even if your
48 controller doesn't support this feature.
49
50 This supports the EHCI implementation from TransDimension Inc.
51
52config USB_OHCI_HCD
53 tristate "OHCI HCD support"
54 depends on USB && USB_ARCH_HAS_OHCI
55 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
56 ---help---
57 The Open Host Controller Interface (OHCI) is a standard for accessing
58 USB 1.1 host controller hardware. It does more in hardware than Intel's
59 UHCI specification. If your USB host controller follows the OHCI spec,
60 say Y. On most non-x86 systems, and on x86 hardware that's not using a
61 USB controller from Intel or VIA, this is appropriate. If your host
62 controller doesn't use PCI, this is probably appropriate. For a PCI
63 based system where you're not sure, the "lspci -v" entry will list the
64 right "prog-if" for your USB controller(s): EHCI, OHCI, or UHCI.
65
66 To compile this driver as a module, choose M here: the
67 module will be called ohci-hcd.
68
69config USB_OHCI_HCD_PPC_SOC
70 bool "OHCI support for on-chip PPC USB controller"
71 depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx)
72 default y
73 select USB_OHCI_BIG_ENDIAN
74 ---help---
75 Enables support for the USB controller on the MPC52xx or
76 STB03xxx processor chip. If unsure, say Y.
77
78config USB_OHCI_HCD_PCI
79 bool "OHCI support for PCI-bus USB controllers"
80 depends on USB_OHCI_HCD && PCI && (STB03xxx || PPC_MPC52xx)
81 default y
82 select USB_OHCI_LITTLE_ENDIAN
83 ---help---
84 Enables support for PCI-bus plug-in USB controller cards.
85 If unsure, say Y.
86
87config USB_OHCI_BIG_ENDIAN
88 bool
89 depends on USB_OHCI_HCD
90 default n
91
92config USB_OHCI_LITTLE_ENDIAN
93 bool
94 depends on USB_OHCI_HCD
95 default n if STB03xxx || PPC_MPC52xx
96 default y
97
98config USB_UHCI_HCD
99 tristate "UHCI HCD (most Intel and VIA) support"
100 depends on USB && PCI
101 ---help---
102 The Universal Host Controller Interface is a standard by Intel for
103 accessing the USB hardware in the PC (which is also called the USB
104 host controller). If your USB host controller conforms to this
105 standard, you may want to say Y, but see below. All recent boards
106 with Intel PCI chipsets (like intel 430TX, 440FX, 440LX, 440BX,
107 i810, i820) conform to this standard. Also all VIA PCI chipsets
108 (like VIA VP2, VP3, MVP3, Apollo Pro, Apollo Pro II or Apollo Pro
109 133). If unsure, say Y.
110
111 To compile this driver as a module, choose M here: the
112 module will be called uhci-hcd.
113
114config USB_SL811_HCD
115 tristate "SL811HS HCD support"
116 depends on USB
117 default N
118 help
119 The SL811HS is a single-port USB controller that supports either
120 host side or peripheral side roles. Enable this option if your
121 board has this chip, and you want to use it as a host controller.
122 If unsure, say N.
123
124 To compile this driver as a module, choose M here: the
125 module will be called sl811-hcd.
126
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
new file mode 100644
index 000000000000..a574ca06cf6b
--- /dev/null
+++ b/drivers/usb/host/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for USB Host Controller Driver
3# framework and drivers
4#
5
6obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
7obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
8obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
9obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
10obj-$(CONFIG_ETRAX_ARCH_V10) += hc_crisv10.o
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
new file mode 100644
index 000000000000..495e2a3ef6f1
--- /dev/null
+++ b/drivers/usb/host/ehci-dbg.c
@@ -0,0 +1,755 @@
1/*
2 * Copyright (c) 2001-2002 by David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* this file is part of ehci-hcd.c */
20
21#define ehci_dbg(ehci, fmt, args...) \
22 dev_dbg (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
23#define ehci_err(ehci, fmt, args...) \
24 dev_err (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
25#define ehci_info(ehci, fmt, args...) \
26 dev_info (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
27#define ehci_warn(ehci, fmt, args...) \
28 dev_warn (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
29
30#ifdef EHCI_VERBOSE_DEBUG
31# define vdbg dbg
32# define ehci_vdbg ehci_dbg
33#else
34# define vdbg(fmt,args...) do { } while (0)
35# define ehci_vdbg(ehci, fmt, args...) do { } while (0)
36#endif
37
38#ifdef DEBUG
39
40/* check the values in the HCSPARAMS register
41 * (host controller _Structural_ parameters)
42 * see EHCI spec, Table 2-4 for each value
43 */
44static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
45{
46 u32 params = readl (&ehci->caps->hcs_params);
47
48 ehci_dbg (ehci,
49 "%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n",
50 label, params,
51 HCS_DEBUG_PORT (params),
52 HCS_INDICATOR (params) ? " ind" : "",
53 HCS_N_CC (params),
54 HCS_N_PCC (params),
55 HCS_PORTROUTED (params) ? "" : " ordered",
56 HCS_PPC (params) ? "" : " !ppc",
57 HCS_N_PORTS (params)
58 );
59 /* Port routing, per EHCI 0.95 Spec, Section 2.2.5 */
60 if (HCS_PORTROUTED (params)) {
61 int i;
62 char buf [46], tmp [7], byte;
63
64 buf[0] = 0;
65 for (i = 0; i < HCS_N_PORTS (params); i++) {
66 // FIXME MIPS won't readb() ...
67 byte = readb (&ehci->caps->portroute[(i>>1)]);
68 sprintf(tmp, "%d ",
69 ((i & 0x1) ? ((byte)&0xf) : ((byte>>4)&0xf)));
70 strcat(buf, tmp);
71 }
72 ehci_dbg (ehci, "%s portroute %s\n",
73 label, buf);
74 }
75}
76#else
77
78static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
79
80#endif
81
82#ifdef DEBUG
83
84/* check the values in the HCCPARAMS register
85 * (host controller _Capability_ parameters)
86 * see EHCI Spec, Table 2-5 for each value
87 * */
88static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
89{
90 u32 params = readl (&ehci->caps->hcc_params);
91
92 if (HCC_ISOC_CACHE (params)) {
93 ehci_dbg (ehci,
94 "%s hcc_params %04x caching frame %s%s%s\n",
95 label, params,
96 HCC_PGM_FRAMELISTLEN (params) ? "256/512/1024" : "1024",
97 HCC_CANPARK (params) ? " park" : "",
98 HCC_64BIT_ADDR (params) ? " 64 bit addr" : "");
99 } else {
100 ehci_dbg (ehci,
101 "%s hcc_params %04x thresh %d uframes %s%s%s\n",
102 label,
103 params,
104 HCC_ISOC_THRES (params),
105 HCC_PGM_FRAMELISTLEN (params) ? "256/512/1024" : "1024",
106 HCC_CANPARK (params) ? " park" : "",
107 HCC_64BIT_ADDR (params) ? " 64 bit addr" : "");
108 }
109}
110#else
111
112static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
113
114#endif
115
116#ifdef DEBUG
117
118static void __attribute__((__unused__))
119dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
120{
121 ehci_dbg (ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
122 le32_to_cpup (&qtd->hw_next),
123 le32_to_cpup (&qtd->hw_alt_next),
124 le32_to_cpup (&qtd->hw_token),
125 le32_to_cpup (&qtd->hw_buf [0]));
126 if (qtd->hw_buf [1])
127 ehci_dbg (ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
128 le32_to_cpup (&qtd->hw_buf [1]),
129 le32_to_cpup (&qtd->hw_buf [2]),
130 le32_to_cpup (&qtd->hw_buf [3]),
131 le32_to_cpup (&qtd->hw_buf [4]));
132}
133
134static void __attribute__((__unused__))
135dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
136{
137 ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
138 qh, qh->hw_next, qh->hw_info1, qh->hw_info2,
139 qh->hw_current);
140 dbg_qtd ("overlay", ehci, (struct ehci_qtd *) &qh->hw_qtd_next);
141}
142
143static void __attribute__((__unused__))
144dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
145{
146 ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n",
147 label, itd->frame, itd, le32_to_cpu(itd->hw_next), itd->urb);
148 ehci_dbg (ehci,
149 " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
150 le32_to_cpu(itd->hw_transaction[0]),
151 le32_to_cpu(itd->hw_transaction[1]),
152 le32_to_cpu(itd->hw_transaction[2]),
153 le32_to_cpu(itd->hw_transaction[3]),
154 le32_to_cpu(itd->hw_transaction[4]),
155 le32_to_cpu(itd->hw_transaction[5]),
156 le32_to_cpu(itd->hw_transaction[6]),
157 le32_to_cpu(itd->hw_transaction[7]));
158 ehci_dbg (ehci,
159 " buf: %08x %08x %08x %08x %08x %08x %08x\n",
160 le32_to_cpu(itd->hw_bufp[0]),
161 le32_to_cpu(itd->hw_bufp[1]),
162 le32_to_cpu(itd->hw_bufp[2]),
163 le32_to_cpu(itd->hw_bufp[3]),
164 le32_to_cpu(itd->hw_bufp[4]),
165 le32_to_cpu(itd->hw_bufp[5]),
166 le32_to_cpu(itd->hw_bufp[6]));
167 ehci_dbg (ehci, " index: %d %d %d %d %d %d %d %d\n",
168 itd->index[0], itd->index[1], itd->index[2],
169 itd->index[3], itd->index[4], itd->index[5],
170 itd->index[6], itd->index[7]);
171}
172
173static void __attribute__((__unused__))
174dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
175{
176 ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
177 label, sitd->frame, sitd, le32_to_cpu(sitd->hw_next), sitd->urb);
178 ehci_dbg (ehci,
179 " addr %08x sched %04x result %08x buf %08x %08x\n",
180 le32_to_cpu(sitd->hw_fullspeed_ep),
181 le32_to_cpu(sitd->hw_uframe),
182 le32_to_cpu(sitd->hw_results),
183 le32_to_cpu(sitd->hw_buf [0]),
184 le32_to_cpu(sitd->hw_buf [1]));
185}
186
187static int __attribute__((__unused__))
188dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
189{
190 return scnprintf (buf, len,
191 "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
192 label, label [0] ? " " : "", status,
193 (status & STS_ASS) ? " Async" : "",
194 (status & STS_PSS) ? " Periodic" : "",
195 (status & STS_RECL) ? " Recl" : "",
196 (status & STS_HALT) ? " Halt" : "",
197 (status & STS_IAA) ? " IAA" : "",
198 (status & STS_FATAL) ? " FATAL" : "",
199 (status & STS_FLR) ? " FLR" : "",
200 (status & STS_PCD) ? " PCD" : "",
201 (status & STS_ERR) ? " ERR" : "",
202 (status & STS_INT) ? " INT" : ""
203 );
204}
205
206static int __attribute__((__unused__))
207dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
208{
209 return scnprintf (buf, len,
210 "%s%sintrenable %02x%s%s%s%s%s%s",
211 label, label [0] ? " " : "", enable,
212 (enable & STS_IAA) ? " IAA" : "",
213 (enable & STS_FATAL) ? " FATAL" : "",
214 (enable & STS_FLR) ? " FLR" : "",
215 (enable & STS_PCD) ? " PCD" : "",
216 (enable & STS_ERR) ? " ERR" : "",
217 (enable & STS_INT) ? " INT" : ""
218 );
219}
220
221static const char *const fls_strings [] =
222 { "1024", "512", "256", "??" };
223
224static int
225dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
226{
227 return scnprintf (buf, len,
228 "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
229 label, label [0] ? " " : "", command,
230 (command & CMD_PARK) ? "park" : "(park)",
231 CMD_PARK_CNT (command),
232 (command >> 16) & 0x3f,
233 (command & CMD_LRESET) ? " LReset" : "",
234 (command & CMD_IAAD) ? " IAAD" : "",
235 (command & CMD_ASE) ? " Async" : "",
236 (command & CMD_PSE) ? " Periodic" : "",
237 fls_strings [(command >> 2) & 0x3],
238 (command & CMD_RESET) ? " Reset" : "",
239 (command & CMD_RUN) ? "RUN" : "HALT"
240 );
241}
242
243static int
244dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
245{
246 char *sig;
247
248 /* signaling state */
249 switch (status & (3 << 10)) {
250 case 0 << 10: sig = "se0"; break;
251 case 1 << 10: sig = "k"; break; /* low speed */
252 case 2 << 10: sig = "j"; break;
253 default: sig = "?"; break;
254 }
255
256 return scnprintf (buf, len,
257 "%s%sport %d status %06x%s%s sig=%s %s%s%s%s%s%s%s%s%s",
258 label, label [0] ? " " : "", port, status,
259 (status & PORT_POWER) ? " POWER" : "",
260 (status & PORT_OWNER) ? " OWNER" : "",
261 sig,
262 (status & PORT_RESET) ? " RESET" : "",
263 (status & PORT_SUSPEND) ? " SUSPEND" : "",
264 (status & PORT_RESUME) ? " RESUME" : "",
265 (status & PORT_OCC) ? " OCC" : "",
266 (status & PORT_OC) ? " OC" : "",
267 (status & PORT_PEC) ? " PEC" : "",
268 (status & PORT_PE) ? " PE" : "",
269 (status & PORT_CSC) ? " CSC" : "",
270 (status & PORT_CONNECT) ? " CONNECT" : ""
271 );
272}
273
274#else
275static inline void __attribute__((__unused__))
276dbg_qh (char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
277{}
278
279static inline int __attribute__((__unused__))
280dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
281{ return 0; }
282
283static inline int __attribute__((__unused__))
284dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
285{ return 0; }
286
287static inline int __attribute__((__unused__))
288dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
289{ return 0; }
290
291static inline int __attribute__((__unused__))
292dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
293{ return 0; }
294
295#endif /* DEBUG */
296
297/* functions have the "wrong" filename when they're output... */
298#define dbg_status(ehci, label, status) { \
299 char _buf [80]; \
300 dbg_status_buf (_buf, sizeof _buf, label, status); \
301 ehci_dbg (ehci, "%s\n", _buf); \
302}
303
304#define dbg_cmd(ehci, label, command) { \
305 char _buf [80]; \
306 dbg_command_buf (_buf, sizeof _buf, label, command); \
307 ehci_dbg (ehci, "%s\n", _buf); \
308}
309
310#define dbg_port(ehci, label, port, status) { \
311 char _buf [80]; \
312 dbg_port_buf (_buf, sizeof _buf, label, port, status); \
313 ehci_dbg (ehci, "%s\n", _buf); \
314}
315
316/*-------------------------------------------------------------------------*/
317
318#ifdef STUB_DEBUG_FILES
319
320static inline void create_debug_files (struct ehci_hcd *bus) { }
321static inline void remove_debug_files (struct ehci_hcd *bus) { }
322
323#else
324
325/* troubleshooting help: expose state in driverfs */
326
327#define speed_char(info1) ({ char tmp; \
328 switch (info1 & (3 << 12)) { \
329 case 0 << 12: tmp = 'f'; break; \
330 case 1 << 12: tmp = 'l'; break; \
331 case 2 << 12: tmp = 'h'; break; \
332 default: tmp = '?'; break; \
333 }; tmp; })
334
335static inline char token_mark (__le32 token)
336{
337 __u32 v = le32_to_cpu (token);
338 if (v & QTD_STS_ACTIVE)
339 return '*';
340 if (v & QTD_STS_HALT)
341 return '-';
342 if (!IS_SHORT_READ (v))
343 return ' ';
344 /* tries to advance through hw_alt_next */
345 return '/';
346}
347
348static void qh_lines (
349 struct ehci_hcd *ehci,
350 struct ehci_qh *qh,
351 char **nextp,
352 unsigned *sizep
353)
354{
355 u32 scratch;
356 u32 hw_curr;
357 struct list_head *entry;
358 struct ehci_qtd *td;
359 unsigned temp;
360 unsigned size = *sizep;
361 char *next = *nextp;
362 char mark;
363
364 if (qh->hw_qtd_next == EHCI_LIST_END) /* NEC does this */
365 mark = '@';
366 else
367 mark = token_mark (qh->hw_token);
368 if (mark == '/') { /* qh_alt_next controls qh advance? */
369 if ((qh->hw_alt_next & QTD_MASK) == ehci->async->hw_alt_next)
370 mark = '#'; /* blocked */
371 else if (qh->hw_alt_next == EHCI_LIST_END)
372 mark = '.'; /* use hw_qtd_next */
373 /* else alt_next points to some other qtd */
374 }
375 scratch = le32_to_cpup (&qh->hw_info1);
376 hw_curr = (mark == '*') ? le32_to_cpup (&qh->hw_current) : 0;
377 temp = scnprintf (next, size,
378 "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
379 qh, scratch & 0x007f,
380 speed_char (scratch),
381 (scratch >> 8) & 0x000f,
382 scratch, le32_to_cpup (&qh->hw_info2),
383 le32_to_cpup (&qh->hw_token), mark,
384 (__constant_cpu_to_le32 (QTD_TOGGLE) & qh->hw_token)
385 ? "data1" : "data0",
386 (le32_to_cpup (&qh->hw_alt_next) >> 1) & 0x0f);
387 size -= temp;
388 next += temp;
389
390 /* hc may be modifying the list as we read it ... */
391 list_for_each (entry, &qh->qtd_list) {
392 td = list_entry (entry, struct ehci_qtd, qtd_list);
393 scratch = le32_to_cpup (&td->hw_token);
394 mark = ' ';
395 if (hw_curr == td->qtd_dma)
396 mark = '*';
397 else if (qh->hw_qtd_next == td->qtd_dma)
398 mark = '+';
399 else if (QTD_LENGTH (scratch)) {
400 if (td->hw_alt_next == ehci->async->hw_alt_next)
401 mark = '#';
402 else if (td->hw_alt_next != EHCI_LIST_END)
403 mark = '/';
404 }
405 temp = snprintf (next, size,
406 "\n\t%p%c%s len=%d %08x urb %p",
407 td, mark, ({ char *tmp;
408 switch ((scratch>>8)&0x03) {
409 case 0: tmp = "out"; break;
410 case 1: tmp = "in"; break;
411 case 2: tmp = "setup"; break;
412 default: tmp = "?"; break;
413 } tmp;}),
414 (scratch >> 16) & 0x7fff,
415 scratch,
416 td->urb);
417 if (temp < 0)
418 temp = 0;
419 else if (size < temp)
420 temp = size;
421 size -= temp;
422 next += temp;
423 if (temp == size)
424 goto done;
425 }
426
427 temp = snprintf (next, size, "\n");
428 if (temp < 0)
429 temp = 0;
430 else if (size < temp)
431 temp = size;
432 size -= temp;
433 next += temp;
434
435done:
436 *sizep = size;
437 *nextp = next;
438}
439
440static ssize_t
441show_async (struct class_device *class_dev, char *buf)
442{
443 struct usb_bus *bus;
444 struct usb_hcd *hcd;
445 struct ehci_hcd *ehci;
446 unsigned long flags;
447 unsigned temp, size;
448 char *next;
449 struct ehci_qh *qh;
450
451 *buf = 0;
452
453 bus = to_usb_bus(class_dev);
454 hcd = bus->hcpriv;
455 ehci = hcd_to_ehci (hcd);
456 next = buf;
457 size = PAGE_SIZE;
458
459 /* dumps a snapshot of the async schedule.
460 * usually empty except for long-term bulk reads, or head.
461 * one QH per line, and TDs we know about
462 */
463 spin_lock_irqsave (&ehci->lock, flags);
464 for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
465 qh_lines (ehci, qh, &next, &size);
466 if (ehci->reclaim && size > 0) {
467 temp = scnprintf (next, size, "\nreclaim =\n");
468 size -= temp;
469 next += temp;
470
471 for (qh = ehci->reclaim; size > 0 && qh; qh = qh->reclaim)
472 qh_lines (ehci, qh, &next, &size);
473 }
474 spin_unlock_irqrestore (&ehci->lock, flags);
475
476 return strlen (buf);
477}
478static CLASS_DEVICE_ATTR (async, S_IRUGO, show_async, NULL);
479
480#define DBG_SCHED_LIMIT 64
481
482static ssize_t
483show_periodic (struct class_device *class_dev, char *buf)
484{
485 struct usb_bus *bus;
486 struct usb_hcd *hcd;
487 struct ehci_hcd *ehci;
488 unsigned long flags;
489 union ehci_shadow p, *seen;
490 unsigned temp, size, seen_count;
491 char *next;
492 unsigned i;
493 __le32 tag;
494
495 if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
496 return 0;
497 seen_count = 0;
498
499 bus = to_usb_bus(class_dev);
500 hcd = bus->hcpriv;
501 ehci = hcd_to_ehci (hcd);
502 next = buf;
503 size = PAGE_SIZE;
504
505 temp = scnprintf (next, size, "size = %d\n", ehci->periodic_size);
506 size -= temp;
507 next += temp;
508
509 /* dump a snapshot of the periodic schedule.
510 * iso changes, interrupt usually doesn't.
511 */
512 spin_lock_irqsave (&ehci->lock, flags);
513 for (i = 0; i < ehci->periodic_size; i++) {
514 p = ehci->pshadow [i];
515 if (likely (!p.ptr))
516 continue;
517 tag = Q_NEXT_TYPE (ehci->periodic [i]);
518
519 temp = scnprintf (next, size, "%4d: ", i);
520 size -= temp;
521 next += temp;
522
523 do {
524 switch (tag) {
525 case Q_TYPE_QH:
526 temp = scnprintf (next, size, " qh%d-%04x/%p",
527 p.qh->period,
528 le32_to_cpup (&p.qh->hw_info2)
529 /* uframe masks */
530 & 0xffff,
531 p.qh);
532 size -= temp;
533 next += temp;
534 /* don't repeat what follows this qh */
535 for (temp = 0; temp < seen_count; temp++) {
536 if (seen [temp].ptr != p.ptr)
537 continue;
538 if (p.qh->qh_next.ptr)
539 temp = scnprintf (next, size,
540 " ...");
541 p.ptr = NULL;
542 break;
543 }
544 /* show more info the first time around */
545 if (temp == seen_count && p.ptr) {
546 u32 scratch = le32_to_cpup (
547 &p.qh->hw_info1);
548 struct ehci_qtd *qtd;
549 char *type = "";
550
551 /* count tds, get ep direction */
552 temp = 0;
553 list_for_each_entry (qtd,
554 &p.qh->qtd_list,
555 qtd_list) {
556 temp++;
557 switch (0x03 & (le32_to_cpu (
558 qtd->hw_token) >> 8)) {
559 case 0: type = "out"; continue;
560 case 1: type = "in"; continue;
561 }
562 }
563
564 temp = scnprintf (next, size,
565 " (%c%d ep%d%s "
566 "[%d/%d] q%d p%d)",
567 speed_char (scratch),
568 scratch & 0x007f,
569 (scratch >> 8) & 0x000f, type,
570 p.qh->usecs, p.qh->c_usecs,
571 temp,
572 0x7ff & (scratch >> 16));
573
574 if (seen_count < DBG_SCHED_LIMIT)
575 seen [seen_count++].qh = p.qh;
576 } else
577 temp = 0;
578 if (p.qh) {
579 tag = Q_NEXT_TYPE (p.qh->hw_next);
580 p = p.qh->qh_next;
581 }
582 break;
583 case Q_TYPE_FSTN:
584 temp = scnprintf (next, size,
585 " fstn-%8x/%p", p.fstn->hw_prev,
586 p.fstn);
587 tag = Q_NEXT_TYPE (p.fstn->hw_next);
588 p = p.fstn->fstn_next;
589 break;
590 case Q_TYPE_ITD:
591 temp = scnprintf (next, size,
592 " itd/%p", p.itd);
593 tag = Q_NEXT_TYPE (p.itd->hw_next);
594 p = p.itd->itd_next;
595 break;
596 case Q_TYPE_SITD:
597 temp = scnprintf (next, size,
598 " sitd%d-%04x/%p",
599 p.sitd->stream->interval,
600 le32_to_cpup (&p.sitd->hw_uframe)
601 & 0x0000ffff,
602 p.sitd);
603 tag = Q_NEXT_TYPE (p.sitd->hw_next);
604 p = p.sitd->sitd_next;
605 break;
606 }
607 size -= temp;
608 next += temp;
609 } while (p.ptr);
610
611 temp = scnprintf (next, size, "\n");
612 size -= temp;
613 next += temp;
614 }
615 spin_unlock_irqrestore (&ehci->lock, flags);
616 kfree (seen);
617
618 return PAGE_SIZE - size;
619}
620static CLASS_DEVICE_ATTR (periodic, S_IRUGO, show_periodic, NULL);
621
622#undef DBG_SCHED_LIMIT
623
624static ssize_t
625show_registers (struct class_device *class_dev, char *buf)
626{
627 struct usb_bus *bus;
628 struct usb_hcd *hcd;
629 struct ehci_hcd *ehci;
630 unsigned long flags;
631 unsigned temp, size, i;
632 char *next, scratch [80];
633 static char fmt [] = "%*s\n";
634 static char label [] = "";
635
636 bus = to_usb_bus(class_dev);
637 hcd = bus->hcpriv;
638 ehci = hcd_to_ehci (hcd);
639 next = buf;
640 size = PAGE_SIZE;
641
642 spin_lock_irqsave (&ehci->lock, flags);
643
644 if (bus->controller->power.power_state) {
645 size = scnprintf (next, size,
646 "bus %s, device %s (driver " DRIVER_VERSION ")\n"
647 "SUSPENDED (no register access)\n",
648 hcd->self.controller->bus->name,
649 hcd->self.controller->bus_id);
650 goto done;
651 }
652
653 /* Capability Registers */
654 i = HC_VERSION(readl (&ehci->caps->hc_capbase));
655 temp = scnprintf (next, size,
656 "bus %s, device %s (driver " DRIVER_VERSION ")\n"
657 "EHCI %x.%02x, hcd state %d\n",
658 hcd->self.controller->bus->name,
659 hcd->self.controller->bus_id,
660 i >> 8, i & 0x0ff, hcd->state);
661 size -= temp;
662 next += temp;
663
664 // FIXME interpret both types of params
665 i = readl (&ehci->caps->hcs_params);
666 temp = scnprintf (next, size, "structural params 0x%08x\n", i);
667 size -= temp;
668 next += temp;
669
670 i = readl (&ehci->caps->hcc_params);
671 temp = scnprintf (next, size, "capability params 0x%08x\n", i);
672 size -= temp;
673 next += temp;
674
675 /* Operational Registers */
676 temp = dbg_status_buf (scratch, sizeof scratch, label,
677 readl (&ehci->regs->status));
678 temp = scnprintf (next, size, fmt, temp, scratch);
679 size -= temp;
680 next += temp;
681
682 temp = dbg_command_buf (scratch, sizeof scratch, label,
683 readl (&ehci->regs->command));
684 temp = scnprintf (next, size, fmt, temp, scratch);
685 size -= temp;
686 next += temp;
687
688 temp = dbg_intr_buf (scratch, sizeof scratch, label,
689 readl (&ehci->regs->intr_enable));
690 temp = scnprintf (next, size, fmt, temp, scratch);
691 size -= temp;
692 next += temp;
693
694 temp = scnprintf (next, size, "uframe %04x\n",
695 readl (&ehci->regs->frame_index));
696 size -= temp;
697 next += temp;
698
699 for (i = 0; i < HCS_N_PORTS (ehci->hcs_params); i++) {
700 temp = dbg_port_buf (scratch, sizeof scratch, label, i + 1,
701 readl (&ehci->regs->port_status [i]));
702 temp = scnprintf (next, size, fmt, temp, scratch);
703 size -= temp;
704 next += temp;
705 }
706
707 if (ehci->reclaim) {
708 temp = scnprintf (next, size, "reclaim qh %p%s\n",
709 ehci->reclaim,
710 ehci->reclaim_ready ? " ready" : "");
711 size -= temp;
712 next += temp;
713 }
714
715#ifdef EHCI_STATS
716 temp = scnprintf (next, size,
717 "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
718 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
719 ehci->stats.lost_iaa);
720 size -= temp;
721 next += temp;
722
723 temp = scnprintf (next, size, "complete %ld unlink %ld\n",
724 ehci->stats.complete, ehci->stats.unlink);
725 size -= temp;
726 next += temp;
727#endif
728
729done:
730 spin_unlock_irqrestore (&ehci->lock, flags);
731
732 return PAGE_SIZE - size;
733}
734static CLASS_DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL);
735
736static inline void create_debug_files (struct ehci_hcd *ehci)
737{
738 struct class_device *cldev = &ehci_to_hcd(ehci)->self.class_dev;
739
740 class_device_create_file(cldev, &class_device_attr_async);
741 class_device_create_file(cldev, &class_device_attr_periodic);
742 class_device_create_file(cldev, &class_device_attr_registers);
743}
744
745static inline void remove_debug_files (struct ehci_hcd *ehci)
746{
747 struct class_device *cldev = &ehci_to_hcd(ehci)->self.class_dev;
748
749 class_device_remove_file(cldev, &class_device_attr_async);
750 class_device_remove_file(cldev, &class_device_attr_periodic);
751 class_device_remove_file(cldev, &class_device_attr_registers);
752}
753
754#endif /* STUB_DEBUG_FILES */
755
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
new file mode 100644
index 000000000000..a63bb792e2c7
--- /dev/null
+++ b/drivers/usb/host/ehci-hcd.c
@@ -0,0 +1,1261 @@
1/*
2 * Copyright (c) 2000-2004 by David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include <linux/config.h>
20
21#ifdef CONFIG_USB_DEBUG
22 #define DEBUG
23#else
24 #undef DEBUG
25#endif
26
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/dmapool.h>
30#include <linux/kernel.h>
31#include <linux/delay.h>
32#include <linux/ioport.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/smp_lock.h>
36#include <linux/errno.h>
37#include <linux/init.h>
38#include <linux/timer.h>
39#include <linux/list.h>
40#include <linux/interrupt.h>
41#include <linux/reboot.h>
42#include <linux/usb.h>
43#include <linux/moduleparam.h>
44#include <linux/dma-mapping.h>
45
46#include "../core/hcd.h"
47
48#include <asm/byteorder.h>
49#include <asm/io.h>
50#include <asm/irq.h>
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54
55/*-------------------------------------------------------------------------*/
56
57/*
58 * EHCI hc_driver implementation ... experimental, incomplete.
59 * Based on the final 1.0 register interface specification.
60 *
61 * USB 2.0 shows up in upcoming www.pcmcia.org technology.
62 * First was PCMCIA, like ISA; then CardBus, which is PCI.
63 * Next comes "CardBay", using USB 2.0 signals.
64 *
65 * Contains additional contributions by Brad Hards, Rory Bolt, and others.
66 * Special thanks to Intel and VIA for providing host controllers to
67 * test this driver on, and Cypress (including In-System Design) for
68 * providing early devices for those host controllers to talk to!
69 *
70 * HISTORY:
71 *
72 * 2004-05-10 Root hub and PCI suspend/resume support; remote wakeup. (db)
73 * 2004-02-24 Replace pci_* with generic dma_* API calls (dsaxena@plexity.net)
74 * 2003-12-29 Rewritten high speed iso transfer support (by Michal Sojka,
75 * <sojkam@centrum.cz>, updates by DB).
76 *
77 * 2002-11-29 Correct handling for hw async_next register.
78 * 2002-08-06 Handling for bulk and interrupt transfers is mostly shared;
79 * only scheduling is different, no arbitrary limitations.
80 * 2002-07-25 Sanity check PCI reads, mostly for better cardbus support,
81 * clean up HC run state handshaking.
82 * 2002-05-24 Preliminary FS/LS interrupts, using scheduling shortcuts
83 * 2002-05-11 Clear TT errors for FS/LS ctrl/bulk. Fill in some other
84 * missing pieces: enabling 64bit dma, handoff from BIOS/SMM.
85 * 2002-05-07 Some error path cleanups to report better errors; wmb();
86 * use non-CVS version id; better iso bandwidth claim.
87 * 2002-04-19 Control/bulk/interrupt submit no longer uses giveback() on
88 * errors in submit path. Bugfixes to interrupt scheduling/processing.
89 * 2002-03-05 Initial high-speed ISO support; reduce ITD memory; shift
90 * more checking to generic hcd framework (db). Make it work with
91 * Philips EHCI; reduce PCI traffic; shorten IRQ path (Rory Bolt).
92 * 2002-01-14 Minor cleanup; version synch.
93 * 2002-01-08 Fix roothub handoff of FS/LS to companion controllers.
94 * 2002-01-04 Control/Bulk queuing behaves.
95 *
96 * 2001-12-12 Initial patch version for Linux 2.5.1 kernel.
97 * 2001-June Works with usb-storage and NEC EHCI on 2.4
98 */
99
100#define DRIVER_VERSION "10 Dec 2004"
101#define DRIVER_AUTHOR "David Brownell"
102#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
103
104static const char hcd_name [] = "ehci_hcd";
105
106
107#undef EHCI_VERBOSE_DEBUG
108#undef EHCI_URB_TRACE
109
110#ifdef DEBUG
111#define EHCI_STATS
112#endif
113
114/* magic numbers that can affect system performance */
115#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
116#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
117#define EHCI_TUNE_RL_TT 0
118#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
119#define EHCI_TUNE_MULT_TT 1
120#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
121
122#define EHCI_IAA_JIFFIES (HZ/100) /* arbitrary; ~10 msec */
123#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
124#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
125#define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
126
127/* Initial IRQ latency: faster than hw default */
128static int log2_irq_thresh = 0; // 0 to 6
129module_param (log2_irq_thresh, int, S_IRUGO);
130MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
131
132/* initial park setting: slower than hw default */
133static unsigned park = 0;
134module_param (park, uint, S_IRUGO);
135MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
136
137#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
138
139/*-------------------------------------------------------------------------*/
140
141#include "ehci.h"
142#include "ehci-dbg.c"
143
144/*-------------------------------------------------------------------------*/
145
146/*
147 * handshake - spin reading hc until handshake completes or fails
148 * @ptr: address of hc register to be read
149 * @mask: bits to look at in result of read
150 * @done: value of those bits when handshake succeeds
151 * @usec: timeout in microseconds
152 *
153 * Returns negative errno, or zero on success
154 *
155 * Success happens when the "mask" bits have the specified value (hardware
156 * handshake done). There are two failure modes: "usec" have passed (major
157 * hardware flakeout), or the register reads as all-ones (hardware removed).
158 *
159 * That last failure should_only happen in cases like physical cardbus eject
160 * before driver shutdown. But it also seems to be caused by bugs in cardbus
161 * bridge shutdown: shutting down the bridge before the devices using it.
162 */
163static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec)
164{
165 u32 result;
166
167 do {
168 result = readl (ptr);
169 if (result == ~(u32)0) /* card removed */
170 return -ENODEV;
171 result &= mask;
172 if (result == done)
173 return 0;
174 udelay (1);
175 usec--;
176 } while (usec > 0);
177 return -ETIMEDOUT;
178}
179
180/* force HC to halt state from unknown (EHCI spec section 2.3) */
181static int ehci_halt (struct ehci_hcd *ehci)
182{
183 u32 temp = readl (&ehci->regs->status);
184
185 if ((temp & STS_HALT) != 0)
186 return 0;
187
188 temp = readl (&ehci->regs->command);
189 temp &= ~CMD_RUN;
190 writel (temp, &ehci->regs->command);
191 return handshake (&ehci->regs->status, STS_HALT, STS_HALT, 16 * 125);
192}
193
194/* put TDI/ARC silicon into EHCI mode */
195static void tdi_reset (struct ehci_hcd *ehci)
196{
197 u32 __iomem *reg_ptr;
198 u32 tmp;
199
200 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68);
201 tmp = readl (reg_ptr);
202 tmp |= 0x3;
203 writel (tmp, reg_ptr);
204}
205
206/* reset a non-running (STS_HALT == 1) controller */
207static int ehci_reset (struct ehci_hcd *ehci)
208{
209 int retval;
210 u32 command = readl (&ehci->regs->command);
211
212 command |= CMD_RESET;
213 dbg_cmd (ehci, "reset", command);
214 writel (command, &ehci->regs->command);
215 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
216 ehci->next_statechange = jiffies;
217 retval = handshake (&ehci->regs->command, CMD_RESET, 0, 250 * 1000);
218
219 if (retval)
220 return retval;
221
222 if (ehci_is_TDI(ehci))
223 tdi_reset (ehci);
224
225 return retval;
226}
227
228/* idle the controller (from running) */
229static void ehci_quiesce (struct ehci_hcd *ehci)
230{
231 u32 temp;
232
233#ifdef DEBUG
234 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
235 BUG ();
236#endif
237
238 /* wait for any schedule enables/disables to take effect */
239 temp = readl (&ehci->regs->command) << 10;
240 temp &= STS_ASS | STS_PSS;
241 if (handshake (&ehci->regs->status, STS_ASS | STS_PSS,
242 temp, 16 * 125) != 0) {
243 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
244 return;
245 }
246
247 /* then disable anything that's still active */
248 temp = readl (&ehci->regs->command);
249 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
250 writel (temp, &ehci->regs->command);
251
252 /* hardware can take 16 microframes to turn off ... */
253 if (handshake (&ehci->regs->status, STS_ASS | STS_PSS,
254 0, 16 * 125) != 0) {
255 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
256 return;
257 }
258}
259
260/*-------------------------------------------------------------------------*/
261
262static void ehci_work(struct ehci_hcd *ehci, struct pt_regs *regs);
263
264#include "ehci-hub.c"
265#include "ehci-mem.c"
266#include "ehci-q.c"
267#include "ehci-sched.c"
268
269/*-------------------------------------------------------------------------*/
270
271static void ehci_watchdog (unsigned long param)
272{
273 struct ehci_hcd *ehci = (struct ehci_hcd *) param;
274 unsigned long flags;
275
276 spin_lock_irqsave (&ehci->lock, flags);
277
278 /* lost IAA irqs wedge things badly; seen with a vt8235 */
279 if (ehci->reclaim) {
280 u32 status = readl (&ehci->regs->status);
281
282 if (status & STS_IAA) {
283 ehci_vdbg (ehci, "lost IAA\n");
284 COUNT (ehci->stats.lost_iaa);
285 writel (STS_IAA, &ehci->regs->status);
286 ehci->reclaim_ready = 1;
287 }
288 }
289
290 /* stop async processing after it's idled a bit */
291 if (test_bit (TIMER_ASYNC_OFF, &ehci->actions))
292 start_unlink_async (ehci, ehci->async);
293
294 /* ehci could run by timer, without IRQs ... */
295 ehci_work (ehci, NULL);
296
297 spin_unlock_irqrestore (&ehci->lock, flags);
298}
299
300#ifdef CONFIG_PCI
301
302/* EHCI 0.96 (and later) section 5.1 says how to kick BIOS/SMM/...
303 * off the controller (maybe it can boot from highspeed USB disks).
304 */
305static int bios_handoff (struct ehci_hcd *ehci, int where, u32 cap)
306{
307 if (cap & (1 << 16)) {
308 int msec = 5000;
309 struct pci_dev *pdev =
310 to_pci_dev(ehci_to_hcd(ehci)->self.controller);
311
312 /* request handoff to OS */
313 cap |= 1 << 24;
314 pci_write_config_dword(pdev, where, cap);
315
316 /* and wait a while for it to happen */
317 do {
318 msleep(10);
319 msec -= 10;
320 pci_read_config_dword(pdev, where, &cap);
321 } while ((cap & (1 << 16)) && msec);
322 if (cap & (1 << 16)) {
323 ehci_err (ehci, "BIOS handoff failed (%d, %04x)\n",
324 where, cap);
325 // some BIOS versions seem buggy...
326 // return 1;
327 ehci_warn (ehci, "continuing after BIOS bug...\n");
328 return 0;
329 }
330 ehci_dbg (ehci, "BIOS handoff succeeded\n");
331 }
332 return 0;
333}
334
335#endif
336
337static int
338ehci_reboot (struct notifier_block *self, unsigned long code, void *null)
339{
340 struct ehci_hcd *ehci;
341
342 ehci = container_of (self, struct ehci_hcd, reboot_notifier);
343
344 /* make BIOS/etc use companion controller during reboot */
345 writel (0, &ehci->regs->configured_flag);
346 return 0;
347}
348
349
350/* called by khubd or root hub init threads */
351
352static int ehci_hc_reset (struct usb_hcd *hcd)
353{
354 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
355 u32 temp;
356 unsigned count = 256/4;
357
358 spin_lock_init (&ehci->lock);
359
360 ehci->caps = hcd->regs;
361 ehci->regs = hcd->regs + HC_LENGTH (readl (&ehci->caps->hc_capbase));
362 dbg_hcs_params (ehci, "reset");
363 dbg_hcc_params (ehci, "reset");
364
365#ifdef CONFIG_PCI
366 /* EHCI 0.96 and later may have "extended capabilities" */
367 if (hcd->self.controller->bus == &pci_bus_type) {
368 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
369
370 switch (pdev->vendor) {
371 case PCI_VENDOR_ID_TDI:
372 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
373 ehci->is_tdi_rh_tt = 1;
374 tdi_reset (ehci);
375 }
376 break;
377 case PCI_VENDOR_ID_AMD:
378 /* AMD8111 EHCI doesn't work, according to AMD errata */
379 if (pdev->device == 0x7463) {
380 ehci_info (ehci, "ignoring AMD8111 (errata)\n");
381 return -EIO;
382 }
383 break;
384 }
385
386 temp = HCC_EXT_CAPS (readl (&ehci->caps->hcc_params));
387 } else
388 temp = 0;
389 while (temp && count--) {
390 u32 cap;
391
392 pci_read_config_dword (to_pci_dev(hcd->self.controller),
393 temp, &cap);
394 ehci_dbg (ehci, "capability %04x at %02x\n", cap, temp);
395 switch (cap & 0xff) {
396 case 1: /* BIOS/SMM/... handoff */
397 if (bios_handoff (ehci, temp, cap) != 0)
398 return -EOPNOTSUPP;
399 break;
400 case 0: /* illegal reserved capability */
401 ehci_warn (ehci, "illegal capability!\n");
402 cap = 0;
403 /* FALLTHROUGH */
404 default: /* unknown */
405 break;
406 }
407 temp = (cap >> 8) & 0xff;
408 }
409 if (!count) {
410 ehci_err (ehci, "bogus capabilities ... PCI problems!\n");
411 return -EIO;
412 }
413 if (ehci_is_TDI(ehci))
414 ehci_reset (ehci);
415#endif
416
417 /* cache this readonly data; minimize PCI reads */
418 ehci->hcs_params = readl (&ehci->caps->hcs_params);
419
420 /* at least the Genesys GL880S needs fixup here */
421 temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
422 temp &= 0x0f;
423 if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
424 ehci_dbg (ehci, "bogus port configuration: "
425 "cc=%d x pcc=%d < ports=%d\n",
426 HCS_N_CC(ehci->hcs_params),
427 HCS_N_PCC(ehci->hcs_params),
428 HCS_N_PORTS(ehci->hcs_params));
429
430#ifdef CONFIG_PCI
431 if (hcd->self.controller->bus == &pci_bus_type) {
432 struct pci_dev *pdev;
433
434 pdev = to_pci_dev(hcd->self.controller);
435 switch (pdev->vendor) {
436 case 0x17a0: /* GENESYS */
437 /* GL880S: should be PORTS=2 */
438 temp |= (ehci->hcs_params & ~0xf);
439 ehci->hcs_params = temp;
440 break;
441 case PCI_VENDOR_ID_NVIDIA:
442 /* NF4: should be PCC=10 */
443 break;
444 }
445 }
446#endif
447 }
448
449 /* force HC to halt state */
450 return ehci_halt (ehci);
451}
452
453static int ehci_start (struct usb_hcd *hcd)
454{
455 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
456 u32 temp;
457 struct usb_device *udev;
458 struct usb_bus *bus;
459 int retval;
460 u32 hcc_params;
461 u8 sbrn = 0;
462 int first;
463
464 /* skip some things on restart paths */
465 first = (ehci->watchdog.data == 0);
466 if (first) {
467 init_timer (&ehci->watchdog);
468 ehci->watchdog.function = ehci_watchdog;
469 ehci->watchdog.data = (unsigned long) ehci;
470 }
471
472 /*
473 * hw default: 1K periodic list heads, one per frame.
474 * periodic_size can shrink by USBCMD update if hcc_params allows.
475 */
476 ehci->periodic_size = DEFAULT_I_TDPS;
477 if (first && (retval = ehci_mem_init (ehci, GFP_KERNEL)) < 0)
478 return retval;
479
480 /* controllers may cache some of the periodic schedule ... */
481 hcc_params = readl (&ehci->caps->hcc_params);
482 if (HCC_ISOC_CACHE (hcc_params)) // full frame cache
483 ehci->i_thresh = 8;
484 else // N microframes cached
485 ehci->i_thresh = 2 + HCC_ISOC_THRES (hcc_params);
486
487 ehci->reclaim = NULL;
488 ehci->reclaim_ready = 0;
489 ehci->next_uframe = -1;
490
491 /* controller state: unknown --> reset */
492
493 /* EHCI spec section 4.1 */
494 if ((retval = ehci_reset (ehci)) != 0) {
495 ehci_mem_cleanup (ehci);
496 return retval;
497 }
498 writel (ehci->periodic_dma, &ehci->regs->frame_list);
499
500#ifdef CONFIG_PCI
501 if (hcd->self.controller->bus == &pci_bus_type) {
502 struct pci_dev *pdev;
503 u16 port_wake;
504
505 pdev = to_pci_dev(hcd->self.controller);
506
507 /* Serial Bus Release Number is at PCI 0x60 offset */
508 pci_read_config_byte(pdev, 0x60, &sbrn);
509
510 /* port wake capability, reported by boot firmware */
511 pci_read_config_word(pdev, 0x62, &port_wake);
512 hcd->can_wakeup = (port_wake & 1) != 0;
513
514 /* help hc dma work well with cachelines */
515 pci_set_mwi (pdev);
516 }
517#endif
518
519 /*
520 * dedicate a qh for the async ring head, since we couldn't unlink
521 * a 'real' qh without stopping the async schedule [4.8]. use it
522 * as the 'reclamation list head' too.
523 * its dummy is used in hw_alt_next of many tds, to prevent the qh
524 * from automatically advancing to the next td after short reads.
525 */
526 if (first) {
527 ehci->async->qh_next.qh = NULL;
528 ehci->async->hw_next = QH_NEXT (ehci->async->qh_dma);
529 ehci->async->hw_info1 = cpu_to_le32 (QH_HEAD);
530 ehci->async->hw_token = cpu_to_le32 (QTD_STS_HALT);
531 ehci->async->hw_qtd_next = EHCI_LIST_END;
532 ehci->async->qh_state = QH_STATE_LINKED;
533 ehci->async->hw_alt_next = QTD_NEXT (ehci->async->dummy->qtd_dma);
534 }
535 writel ((u32)ehci->async->qh_dma, &ehci->regs->async_next);
536
537 /*
538 * hcc_params controls whether ehci->regs->segment must (!!!)
539 * be used; it constrains QH/ITD/SITD and QTD locations.
540 * pci_pool consistent memory always uses segment zero.
541 * streaming mappings for I/O buffers, like pci_map_single(),
542 * can return segments above 4GB, if the device allows.
543 *
544 * NOTE: the dma mask is visible through dma_supported(), so
545 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
546 * Scsi_Host.highmem_io, and so forth. It's readonly to all
547 * host side drivers though.
548 */
549 if (HCC_64BIT_ADDR (hcc_params)) {
550 writel (0, &ehci->regs->segment);
551#if 0
552// this is deeply broken on almost all architectures
553 if (!pci_set_dma_mask (to_pci_dev(hcd->self.controller), 0xffffffffffffffffULL))
554 ehci_info (ehci, "enabled 64bit PCI DMA\n");
555#endif
556 }
557
558 /* clear interrupt enables, set irq latency */
559 if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
560 log2_irq_thresh = 0;
561 temp = 1 << (16 + log2_irq_thresh);
562 if (HCC_CANPARK(hcc_params)) {
563 /* HW default park == 3, on hardware that supports it (like
564 * NVidia and ALI silicon), maximizes throughput on the async
565 * schedule by avoiding QH fetches between transfers.
566 *
567 * With fast usb storage devices and NForce2, "park" seems to
568 * make problems: throughput reduction (!), data errors...
569 */
570 if (park) {
571 park = min (park, (unsigned) 3);
572 temp |= CMD_PARK;
573 temp |= park << 8;
574 }
575 ehci_info (ehci, "park %d\n", park);
576 }
577 if (HCC_PGM_FRAMELISTLEN (hcc_params)) {
578 /* periodic schedule size can be smaller than default */
579 temp &= ~(3 << 2);
580 temp |= (EHCI_TUNE_FLS << 2);
581 switch (EHCI_TUNE_FLS) {
582 case 0: ehci->periodic_size = 1024; break;
583 case 1: ehci->periodic_size = 512; break;
584 case 2: ehci->periodic_size = 256; break;
585 default: BUG ();
586 }
587 }
588 // Philips, Intel, and maybe others need CMD_RUN before the
589 // root hub will detect new devices (why?); NEC doesn't
590 temp |= CMD_RUN;
591 writel (temp, &ehci->regs->command);
592 dbg_cmd (ehci, "init", temp);
593
594 /* set async sleep time = 10 us ... ? */
595
596 /* wire up the root hub */
597 bus = hcd_to_bus (hcd);
598 udev = first ? usb_alloc_dev (NULL, bus, 0) : bus->root_hub;
599 if (!udev) {
600done2:
601 ehci_mem_cleanup (ehci);
602 return -ENOMEM;
603 }
604 udev->speed = USB_SPEED_HIGH;
605 udev->state = first ? USB_STATE_ATTACHED : USB_STATE_CONFIGURED;
606
607 /*
608 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
609 * are explicitly handed to companion controller(s), so no TT is
610 * involved with the root hub. (Except where one is integrated,
611 * and there's no companion controller unless maybe for USB OTG.)
612 */
613 if (first) {
614 ehci->reboot_notifier.notifier_call = ehci_reboot;
615 register_reboot_notifier (&ehci->reboot_notifier);
616 }
617
618 hcd->state = HC_STATE_RUNNING;
619 writel (FLAG_CF, &ehci->regs->configured_flag);
620 readl (&ehci->regs->command); /* unblock posted write */
621
622 temp = HC_VERSION(readl (&ehci->caps->hc_capbase));
623 ehci_info (ehci,
624 "USB %x.%x %s, EHCI %x.%02x, driver %s\n",
625 ((sbrn & 0xf0)>>4), (sbrn & 0x0f),
626 first ? "initialized" : "restarted",
627 temp >> 8, temp & 0xff, DRIVER_VERSION);
628
629 /*
630 * From here on, khubd concurrently accesses the root
631 * hub; drivers will be talking to enumerated devices.
632 * (On restart paths, khubd already knows about the root
633 * hub and could find work as soon as we wrote FLAG_CF.)
634 *
635 * Before this point the HC was idle/ready. After, khubd
636 * and device drivers may start it running.
637 */
638 if (first && usb_hcd_register_root_hub (udev, hcd) != 0) {
639 if (hcd->state == HC_STATE_RUNNING)
640 ehci_quiesce (ehci);
641 ehci_reset (ehci);
642 usb_put_dev (udev);
643 retval = -ENODEV;
644 goto done2;
645 }
646
647 writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */
648
649 if (first)
650 create_debug_files (ehci);
651
652 return 0;
653}
654
655/* always called by thread; normally rmmod */
656
657static void ehci_stop (struct usb_hcd *hcd)
658{
659 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
660 u8 rh_ports, port;
661
662 ehci_dbg (ehci, "stop\n");
663
664 /* Turn off port power on all root hub ports. */
665 rh_ports = HCS_N_PORTS (ehci->hcs_params);
666 for (port = 1; port <= rh_ports; port++)
667 (void) ehci_hub_control(hcd,
668 ClearPortFeature, USB_PORT_FEAT_POWER,
669 port, NULL, 0);
670
671 /* no more interrupts ... */
672 del_timer_sync (&ehci->watchdog);
673
674 spin_lock_irq(&ehci->lock);
675 if (HC_IS_RUNNING (hcd->state))
676 ehci_quiesce (ehci);
677
678 ehci_reset (ehci);
679 writel (0, &ehci->regs->intr_enable);
680 spin_unlock_irq(&ehci->lock);
681
682 /* let companion controllers work when we aren't */
683 writel (0, &ehci->regs->configured_flag);
684 unregister_reboot_notifier (&ehci->reboot_notifier);
685
686 remove_debug_files (ehci);
687
688 /* root hub is shut down separately (first, when possible) */
689 spin_lock_irq (&ehci->lock);
690 if (ehci->async)
691 ehci_work (ehci, NULL);
692 spin_unlock_irq (&ehci->lock);
693 ehci_mem_cleanup (ehci);
694
695#ifdef EHCI_STATS
696 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
697 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
698 ehci->stats.lost_iaa);
699 ehci_dbg (ehci, "complete %ld unlink %ld\n",
700 ehci->stats.complete, ehci->stats.unlink);
701#endif
702
703 dbg_status (ehci, "ehci_stop completed", readl (&ehci->regs->status));
704}
705
706static int ehci_get_frame (struct usb_hcd *hcd)
707{
708 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
709 return (readl (&ehci->regs->frame_index) >> 3) % ehci->periodic_size;
710}
711
712/*-------------------------------------------------------------------------*/
713
714#ifdef CONFIG_PM
715
716/* suspend/resume, section 4.3 */
717
718/* These routines rely on the bus (pci, platform, etc)
719 * to handle powerdown and wakeup, and currently also on
720 * transceivers that don't need any software attention to set up
721 * the right sort of wakeup.
722 */
723
724static int ehci_suspend (struct usb_hcd *hcd, u32 state)
725{
726 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
727
728 if (time_before (jiffies, ehci->next_statechange))
729 msleep (100);
730
731#ifdef CONFIG_USB_SUSPEND
732 (void) usb_suspend_device (hcd->self.root_hub, state);
733#else
734 usb_lock_device (hcd->self.root_hub);
735 (void) ehci_hub_suspend (hcd);
736 usb_unlock_device (hcd->self.root_hub);
737#endif
738
739 // save (PCI) FLADJ in case of Vaux power loss
740 // ... we'd only use it to handle clock skew
741
742 return 0;
743}
744
745static int ehci_resume (struct usb_hcd *hcd)
746{
747 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
748 unsigned port;
749 struct usb_device *root = hcd->self.root_hub;
750 int retval = -EINVAL;
751 int powerup = 0;
752
753 // maybe restore (PCI) FLADJ
754
755 if (time_before (jiffies, ehci->next_statechange))
756 msleep (100);
757
758 /* If any port is suspended, we know we can/must resume the HC. */
759 for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) {
760 u32 status;
761 port--;
762 status = readl (&ehci->regs->port_status [port]);
763 if (status & PORT_SUSPEND) {
764 down (&hcd->self.root_hub->serialize);
765 retval = ehci_hub_resume (hcd);
766 up (&hcd->self.root_hub->serialize);
767 break;
768 }
769 if ((status & PORT_POWER) == 0)
770 powerup = 1;
771 if (!root->children [port])
772 continue;
773 dbg_port (ehci, __FUNCTION__, port + 1, status);
774 usb_set_device_state (root->children[port],
775 USB_STATE_NOTATTACHED);
776 }
777
778 /* Else reset, to cope with power loss or flush-to-storage
779 * style "resume" having activated BIOS during reboot.
780 */
781 if (port == 0) {
782 (void) ehci_halt (ehci);
783 (void) ehci_reset (ehci);
784 (void) ehci_hc_reset (hcd);
785
786 /* emptying the schedule aborts any urbs */
787 spin_lock_irq (&ehci->lock);
788 if (ehci->reclaim)
789 ehci->reclaim_ready = 1;
790 ehci_work (ehci, NULL);
791 spin_unlock_irq (&ehci->lock);
792
793 /* restart; khubd will disconnect devices */
794 retval = ehci_start (hcd);
795
796 /* here we "know" root ports should always stay powered;
797 * but some controllers may lost all power.
798 */
799 if (powerup) {
800 ehci_dbg (ehci, "...powerup ports...\n");
801 for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
802 (void) ehci_hub_control(hcd,
803 SetPortFeature, USB_PORT_FEAT_POWER,
804 port--, NULL, 0);
805 msleep(20);
806 }
807 }
808
809 return retval;
810}
811
812#endif
813
814/*-------------------------------------------------------------------------*/
815
816/*
817 * ehci_work is called from some interrupts, timers, and so on.
818 * it calls driver completion functions, after dropping ehci->lock.
819 */
820static void ehci_work (struct ehci_hcd *ehci, struct pt_regs *regs)
821{
822 timer_action_done (ehci, TIMER_IO_WATCHDOG);
823 if (ehci->reclaim_ready)
824 end_unlink_async (ehci, regs);
825
826 /* another CPU may drop ehci->lock during a schedule scan while
827 * it reports urb completions. this flag guards against bogus
828 * attempts at re-entrant schedule scanning.
829 */
830 if (ehci->scanning)
831 return;
832 ehci->scanning = 1;
833 scan_async (ehci, regs);
834 if (ehci->next_uframe != -1)
835 scan_periodic (ehci, regs);
836 ehci->scanning = 0;
837
838 /* the IO watchdog guards against hardware or driver bugs that
839 * misplace IRQs, and should let us run completely without IRQs.
840 * such lossage has been observed on both VT6202 and VT8235.
841 */
842 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) &&
843 (ehci->async->qh_next.ptr != NULL ||
844 ehci->periodic_sched != 0))
845 timer_action (ehci, TIMER_IO_WATCHDOG);
846}
847
848/*-------------------------------------------------------------------------*/
849
850static irqreturn_t ehci_irq (struct usb_hcd *hcd, struct pt_regs *regs)
851{
852 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
853 u32 status;
854 int bh;
855
856 spin_lock (&ehci->lock);
857
858 status = readl (&ehci->regs->status);
859
860 /* e.g. cardbus physical eject */
861 if (status == ~(u32) 0) {
862 ehci_dbg (ehci, "device removed\n");
863 goto dead;
864 }
865
866 status &= INTR_MASK;
867 if (!status) { /* irq sharing? */
868 spin_unlock(&ehci->lock);
869 return IRQ_NONE;
870 }
871
872 /* clear (just) interrupts */
873 writel (status, &ehci->regs->status);
874 readl (&ehci->regs->command); /* unblock posted write */
875 bh = 0;
876
877#ifdef EHCI_VERBOSE_DEBUG
878 /* unrequested/ignored: Frame List Rollover */
879 dbg_status (ehci, "irq", status);
880#endif
881
882 /* INT, ERR, and IAA interrupt rates can be throttled */
883
884 /* normal [4.15.1.2] or error [4.15.1.1] completion */
885 if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
886 if (likely ((status & STS_ERR) == 0))
887 COUNT (ehci->stats.normal);
888 else
889 COUNT (ehci->stats.error);
890 bh = 1;
891 }
892
893 /* complete the unlinking of some qh [4.15.2.3] */
894 if (status & STS_IAA) {
895 COUNT (ehci->stats.reclaim);
896 ehci->reclaim_ready = 1;
897 bh = 1;
898 }
899
900 /* remote wakeup [4.3.1] */
901 if ((status & STS_PCD) && hcd->remote_wakeup) {
902 unsigned i = HCS_N_PORTS (ehci->hcs_params);
903
904 /* resume root hub? */
905 status = readl (&ehci->regs->command);
906 if (!(status & CMD_RUN))
907 writel (status | CMD_RUN, &ehci->regs->command);
908
909 while (i--) {
910 status = readl (&ehci->regs->port_status [i]);
911 if (status & PORT_OWNER)
912 continue;
913 if (!(status & PORT_RESUME)
914 || ehci->reset_done [i] != 0)
915 continue;
916
917 /* start 20 msec resume signaling from this port,
918 * and make khubd collect PORT_STAT_C_SUSPEND to
919 * stop that signaling.
920 */
921 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
922 mod_timer (&hcd->rh_timer,
923 ehci->reset_done [i] + 1);
924 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
925 }
926 }
927
928 /* PCI errors [4.15.2.4] */
929 if (unlikely ((status & STS_FATAL) != 0)) {
930 /* bogus "fatal" IRQs appear on some chips... why? */
931 status = readl (&ehci->regs->status);
932 dbg_cmd (ehci, "fatal", readl (&ehci->regs->command));
933 dbg_status (ehci, "fatal", status);
934 if (status & STS_HALT) {
935 ehci_err (ehci, "fatal error\n");
936dead:
937 ehci_reset (ehci);
938 writel (0, &ehci->regs->configured_flag);
939 /* generic layer kills/unlinks all urbs, then
940 * uses ehci_stop to clean up the rest
941 */
942 bh = 1;
943 }
944 }
945
946 if (bh)
947 ehci_work (ehci, regs);
948 spin_unlock (&ehci->lock);
949 return IRQ_HANDLED;
950}
951
952/*-------------------------------------------------------------------------*/
953
954/*
955 * non-error returns are a promise to giveback() the urb later
956 * we drop ownership so next owner (or urb unlink) can get it
957 *
958 * urb + dev is in hcd.self.controller.urb_list
959 * we're queueing TDs onto software and hardware lists
960 *
961 * hcd-specific init for hcpriv hasn't been done yet
962 *
963 * NOTE: control, bulk, and interrupt share the same code to append TDs
964 * to a (possibly active) QH, and the same QH scanning code.
965 */
966static int ehci_urb_enqueue (
967 struct usb_hcd *hcd,
968 struct usb_host_endpoint *ep,
969 struct urb *urb,
970 int mem_flags
971) {
972 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
973 struct list_head qtd_list;
974
975 INIT_LIST_HEAD (&qtd_list);
976
977 switch (usb_pipetype (urb->pipe)) {
978 // case PIPE_CONTROL:
979 // case PIPE_BULK:
980 default:
981 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
982 return -ENOMEM;
983 return submit_async (ehci, ep, urb, &qtd_list, mem_flags);
984
985 case PIPE_INTERRUPT:
986 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
987 return -ENOMEM;
988 return intr_submit (ehci, ep, urb, &qtd_list, mem_flags);
989
990 case PIPE_ISOCHRONOUS:
991 if (urb->dev->speed == USB_SPEED_HIGH)
992 return itd_submit (ehci, urb, mem_flags);
993 else
994 return sitd_submit (ehci, urb, mem_flags);
995 }
996}
997
998static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
999{
1000 /* if we need to use IAA and it's busy, defer */
1001 if (qh->qh_state == QH_STATE_LINKED
1002 && ehci->reclaim
1003 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) {
1004 struct ehci_qh *last;
1005
1006 for (last = ehci->reclaim;
1007 last->reclaim;
1008 last = last->reclaim)
1009 continue;
1010 qh->qh_state = QH_STATE_UNLINK_WAIT;
1011 last->reclaim = qh;
1012
1013 /* bypass IAA if the hc can't care */
1014 } else if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && ehci->reclaim)
1015 end_unlink_async (ehci, NULL);
1016
1017 /* something else might have unlinked the qh by now */
1018 if (qh->qh_state == QH_STATE_LINKED)
1019 start_unlink_async (ehci, qh);
1020}
1021
1022/* remove from hardware lists
1023 * completions normally happen asynchronously
1024 */
1025
1026static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
1027{
1028 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
1029 struct ehci_qh *qh;
1030 unsigned long flags;
1031
1032 spin_lock_irqsave (&ehci->lock, flags);
1033 switch (usb_pipetype (urb->pipe)) {
1034 // case PIPE_CONTROL:
1035 // case PIPE_BULK:
1036 default:
1037 qh = (struct ehci_qh *) urb->hcpriv;
1038 if (!qh)
1039 break;
1040 unlink_async (ehci, qh);
1041 break;
1042
1043 case PIPE_INTERRUPT:
1044 qh = (struct ehci_qh *) urb->hcpriv;
1045 if (!qh)
1046 break;
1047 switch (qh->qh_state) {
1048 case QH_STATE_LINKED:
1049 intr_deschedule (ehci, qh);
1050 /* FALL THROUGH */
1051 case QH_STATE_IDLE:
1052 qh_completions (ehci, qh, NULL);
1053 break;
1054 default:
1055 ehci_dbg (ehci, "bogus qh %p state %d\n",
1056 qh, qh->qh_state);
1057 goto done;
1058 }
1059
1060 /* reschedule QH iff another request is queued */
1061 if (!list_empty (&qh->qtd_list)
1062 && HC_IS_RUNNING (hcd->state)) {
1063 int status;
1064
1065 status = qh_schedule (ehci, qh);
1066 spin_unlock_irqrestore (&ehci->lock, flags);
1067
1068 if (status != 0) {
1069 // shouldn't happen often, but ...
1070 // FIXME kill those tds' urbs
1071 err ("can't reschedule qh %p, err %d",
1072 qh, status);
1073 }
1074 return status;
1075 }
1076 break;
1077
1078 case PIPE_ISOCHRONOUS:
1079 // itd or sitd ...
1080
1081 // wait till next completion, do it then.
1082 // completion irqs can wait up to 1024 msec,
1083 break;
1084 }
1085done:
1086 spin_unlock_irqrestore (&ehci->lock, flags);
1087 return 0;
1088}
1089
1090/*-------------------------------------------------------------------------*/
1091
1092// bulk qh holds the data toggle
1093
1094static void
1095ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1096{
1097 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
1098 unsigned long flags;
1099 struct ehci_qh *qh, *tmp;
1100
1101 /* ASSERT: any requests/urbs are being unlinked */
1102 /* ASSERT: nobody can be submitting urbs for this any more */
1103
1104rescan:
1105 spin_lock_irqsave (&ehci->lock, flags);
1106 qh = ep->hcpriv;
1107 if (!qh)
1108 goto done;
1109
1110 /* endpoints can be iso streams. for now, we don't
1111 * accelerate iso completions ... so spin a while.
1112 */
1113 if (qh->hw_info1 == 0) {
1114 ehci_vdbg (ehci, "iso delay\n");
1115 goto idle_timeout;
1116 }
1117
1118 if (!HC_IS_RUNNING (hcd->state))
1119 qh->qh_state = QH_STATE_IDLE;
1120 switch (qh->qh_state) {
1121 case QH_STATE_LINKED:
1122 for (tmp = ehci->async->qh_next.qh;
1123 tmp && tmp != qh;
1124 tmp = tmp->qh_next.qh)
1125 continue;
1126 /* periodic qh self-unlinks on empty */
1127 if (!tmp)
1128 goto nogood;
1129 unlink_async (ehci, qh);
1130 /* FALL THROUGH */
1131 case QH_STATE_UNLINK: /* wait for hw to finish? */
1132idle_timeout:
1133 spin_unlock_irqrestore (&ehci->lock, flags);
1134 set_current_state (TASK_UNINTERRUPTIBLE);
1135 schedule_timeout (1);
1136 goto rescan;
1137 case QH_STATE_IDLE: /* fully unlinked */
1138 if (list_empty (&qh->qtd_list)) {
1139 qh_put (qh);
1140 break;
1141 }
1142 /* else FALL THROUGH */
1143 default:
1144nogood:
1145 /* caller was supposed to have unlinked any requests;
1146 * that's not our job. just leak this memory.
1147 */
1148 ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
1149 qh, ep->desc.bEndpointAddress, qh->qh_state,
1150 list_empty (&qh->qtd_list) ? "" : "(has tds)");
1151 break;
1152 }
1153 ep->hcpriv = NULL;
1154done:
1155 spin_unlock_irqrestore (&ehci->lock, flags);
1156 return;
1157}
1158
1159/*-------------------------------------------------------------------------*/
1160
1161static const struct hc_driver ehci_driver = {
1162 .description = hcd_name,
1163 .product_desc = "EHCI Host Controller",
1164 .hcd_priv_size = sizeof(struct ehci_hcd),
1165
1166 /*
1167 * generic hardware linkage
1168 */
1169 .irq = ehci_irq,
1170 .flags = HCD_MEMORY | HCD_USB2,
1171
1172 /*
1173 * basic lifecycle operations
1174 */
1175 .reset = ehci_hc_reset,
1176 .start = ehci_start,
1177#ifdef CONFIG_PM
1178 .suspend = ehci_suspend,
1179 .resume = ehci_resume,
1180#endif
1181 .stop = ehci_stop,
1182
1183 /*
1184 * managing i/o requests and associated device resources
1185 */
1186 .urb_enqueue = ehci_urb_enqueue,
1187 .urb_dequeue = ehci_urb_dequeue,
1188 .endpoint_disable = ehci_endpoint_disable,
1189
1190 /*
1191 * scheduling support
1192 */
1193 .get_frame_number = ehci_get_frame,
1194
1195 /*
1196 * root hub support
1197 */
1198 .hub_status_data = ehci_hub_status_data,
1199 .hub_control = ehci_hub_control,
1200 .hub_suspend = ehci_hub_suspend,
1201 .hub_resume = ehci_hub_resume,
1202};
1203
1204/*-------------------------------------------------------------------------*/
1205
1206/* EHCI 1.0 doesn't require PCI */
1207
1208#ifdef CONFIG_PCI
1209
1210/* PCI driver selection metadata; PCI hotplugging uses this */
1211static const struct pci_device_id pci_ids [] = { {
1212 /* handle any USB 2.0 EHCI controller */
1213 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x20), ~0),
1214 .driver_data = (unsigned long) &ehci_driver,
1215 },
1216 { /* end: all zeroes */ }
1217};
1218MODULE_DEVICE_TABLE (pci, pci_ids);
1219
1220/* pci driver glue; this is a "new style" PCI driver module */
1221static struct pci_driver ehci_pci_driver = {
1222 .name = (char *) hcd_name,
1223 .id_table = pci_ids,
1224
1225 .probe = usb_hcd_pci_probe,
1226 .remove = usb_hcd_pci_remove,
1227
1228#ifdef CONFIG_PM
1229 .suspend = usb_hcd_pci_suspend,
1230 .resume = usb_hcd_pci_resume,
1231#endif
1232};
1233
1234#endif /* PCI */
1235
1236
1237#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC
1238
1239MODULE_DESCRIPTION (DRIVER_INFO);
1240MODULE_AUTHOR (DRIVER_AUTHOR);
1241MODULE_LICENSE ("GPL");
1242
1243static int __init init (void)
1244{
1245 if (usb_disabled())
1246 return -ENODEV;
1247
1248 pr_debug ("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
1249 hcd_name,
1250 sizeof (struct ehci_qh), sizeof (struct ehci_qtd),
1251 sizeof (struct ehci_itd), sizeof (struct ehci_sitd));
1252
1253 return pci_register_driver (&ehci_pci_driver);
1254}
1255module_init (init);
1256
1257static void __exit cleanup (void)
1258{
1259 pci_unregister_driver (&ehci_pci_driver);
1260}
1261module_exit (cleanup);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
new file mode 100644
index 000000000000..2373537fabed
--- /dev/null
+++ b/drivers/usb/host/ehci-hub.c
@@ -0,0 +1,553 @@
1/*
2 * Copyright (c) 2001-2002 by David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* this file is part of ehci-hcd.c */
20
21/*-------------------------------------------------------------------------*/
22
23/*
24 * EHCI Root Hub ... the nonsharable stuff
25 *
26 * Registers don't need cpu_to_le32, that happens transparently
27 */
28
29/*-------------------------------------------------------------------------*/
30
31#ifdef CONFIG_PM
32
33static int ehci_hub_suspend (struct usb_hcd *hcd)
34{
35 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
36 int port;
37
38 if (time_before (jiffies, ehci->next_statechange))
39 msleep(5);
40
41 port = HCS_N_PORTS (ehci->hcs_params);
42 spin_lock_irq (&ehci->lock);
43
44 /* stop schedules, clean any completed work */
45 if (HC_IS_RUNNING(hcd->state)) {
46 ehci_quiesce (ehci);
47 hcd->state = HC_STATE_QUIESCING;
48 }
49 ehci->command = readl (&ehci->regs->command);
50 if (ehci->reclaim)
51 ehci->reclaim_ready = 1;
52 ehci_work(ehci, NULL);
53
54 /* suspend any active/unsuspended ports, maybe allow wakeup */
55 while (port--) {
56 u32 __iomem *reg = &ehci->regs->port_status [port];
57 u32 t1 = readl (reg);
58 u32 t2 = t1;
59
60 if ((t1 & PORT_PE) && !(t1 & PORT_OWNER))
61 t2 |= PORT_SUSPEND;
62 if (hcd->remote_wakeup)
63 t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
64 else
65 t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
66
67 if (t1 != t2) {
68 ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
69 port + 1, t1, t2);
70 writel (t2, reg);
71 }
72 }
73
74 /* turn off now-idle HC */
75 ehci_halt (ehci);
76 hcd->state = HC_STATE_SUSPENDED;
77
78 ehci->next_statechange = jiffies + msecs_to_jiffies(10);
79 spin_unlock_irq (&ehci->lock);
80 return 0;
81}
82
83
84/* caller has locked the root hub, and should reset/reinit on error */
85static int ehci_hub_resume (struct usb_hcd *hcd)
86{
87 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
88 u32 temp;
89 int i;
90 int intr_enable;
91
92 if (time_before (jiffies, ehci->next_statechange))
93 msleep(5);
94 spin_lock_irq (&ehci->lock);
95
96 /* re-init operational registers in case we lost power */
97 if (readl (&ehci->regs->intr_enable) == 0) {
98 /* at least some APM implementations will try to deliver
99 * IRQs right away, so delay them until we're ready.
100 */
101 intr_enable = 1;
102 writel (0, &ehci->regs->segment);
103 writel (ehci->periodic_dma, &ehci->regs->frame_list);
104 writel ((u32)ehci->async->qh_dma, &ehci->regs->async_next);
105 } else
106 intr_enable = 0;
107 ehci_dbg(ehci, "resume root hub%s\n",
108 intr_enable ? " after power loss" : "");
109
110 /* restore CMD_RUN, framelist size, and irq threshold */
111 writel (ehci->command, &ehci->regs->command);
112
113 /* take ports out of suspend */
114 i = HCS_N_PORTS (ehci->hcs_params);
115 while (i--) {
116 temp = readl (&ehci->regs->port_status [i]);
117 temp &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
118 if (temp & PORT_SUSPEND) {
119 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
120 temp |= PORT_RESUME;
121 }
122 writel (temp, &ehci->regs->port_status [i]);
123 }
124 i = HCS_N_PORTS (ehci->hcs_params);
125 mdelay (20);
126 while (i--) {
127 temp = readl (&ehci->regs->port_status [i]);
128 if ((temp & PORT_SUSPEND) == 0)
129 continue;
130 temp &= ~PORT_RESUME;
131 writel (temp, &ehci->regs->port_status [i]);
132 ehci_vdbg (ehci, "resumed port %d\n", i + 1);
133 }
134 (void) readl (&ehci->regs->command);
135
136 /* maybe re-activate the schedule(s) */
137 temp = 0;
138 if (ehci->async->qh_next.qh)
139 temp |= CMD_ASE;
140 if (ehci->periodic_sched)
141 temp |= CMD_PSE;
142 if (temp) {
143 ehci->command |= temp;
144 writel (ehci->command, &ehci->regs->command);
145 }
146
147 ehci->next_statechange = jiffies + msecs_to_jiffies(5);
148 hcd->state = HC_STATE_RUNNING;
149
150 /* Now we can safely re-enable irqs */
151 if (intr_enable)
152 writel (INTR_MASK, &ehci->regs->intr_enable);
153
154 spin_unlock_irq (&ehci->lock);
155 return 0;
156}
157
158#else
159
160#define ehci_hub_suspend NULL
161#define ehci_hub_resume NULL
162
163#endif /* CONFIG_PM */
164
165/*-------------------------------------------------------------------------*/
166
167static int check_reset_complete (
168 struct ehci_hcd *ehci,
169 int index,
170 int port_status
171) {
172 if (!(port_status & PORT_CONNECT)) {
173 ehci->reset_done [index] = 0;
174 return port_status;
175 }
176
177 /* if reset finished and it's still not enabled -- handoff */
178 if (!(port_status & PORT_PE)) {
179
180 /* with integrated TT, there's nobody to hand it to! */
181 if (ehci_is_TDI(ehci)) {
182 ehci_dbg (ehci,
183 "Failed to enable port %d on root hub TT\n",
184 index+1);
185 return port_status;
186 }
187
188 ehci_dbg (ehci, "port %d full speed --> companion\n",
189 index + 1);
190
191 // what happens if HCS_N_CC(params) == 0 ?
192 port_status |= PORT_OWNER;
193 writel (port_status, &ehci->regs->port_status [index]);
194
195 } else
196 ehci_dbg (ehci, "port %d high speed\n", index + 1);
197
198 return port_status;
199}
200
201/*-------------------------------------------------------------------------*/
202
203
204/* build "status change" packet (one or two bytes) from HC registers */
205
206static int
207ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
208{
209 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
210 u32 temp, status = 0;
211 int ports, i, retval = 1;
212 unsigned long flags;
213
214 /* if !USB_SUSPEND, root hub timers won't get shut down ... */
215 if (!HC_IS_RUNNING(hcd->state))
216 return 0;
217
218 /* init status to no-changes */
219 buf [0] = 0;
220 ports = HCS_N_PORTS (ehci->hcs_params);
221 if (ports > 7) {
222 buf [1] = 0;
223 retval++;
224 }
225
226 /* no hub change reports (bit 0) for now (power, ...) */
227
228 /* port N changes (bit N)? */
229 spin_lock_irqsave (&ehci->lock, flags);
230 for (i = 0; i < ports; i++) {
231 temp = readl (&ehci->regs->port_status [i]);
232 if (temp & PORT_OWNER) {
233 /* don't report this in GetPortStatus */
234 if (temp & PORT_CSC) {
235 temp &= ~PORT_CSC;
236 writel (temp, &ehci->regs->port_status [i]);
237 }
238 continue;
239 }
240 if (!(temp & PORT_CONNECT))
241 ehci->reset_done [i] = 0;
242 if ((temp & (PORT_CSC | PORT_PEC | PORT_OCC)) != 0
243 // PORT_STAT_C_SUSPEND?
244 || ((temp & PORT_RESUME) != 0
245 && time_after (jiffies,
246 ehci->reset_done [i]))) {
247 if (i < 7)
248 buf [0] |= 1 << (i + 1);
249 else
250 buf [1] |= 1 << (i - 7);
251 status = STS_PCD;
252 }
253 }
254 /* FIXME autosuspend idle root hubs */
255 spin_unlock_irqrestore (&ehci->lock, flags);
256 return status ? retval : 0;
257}
258
259/*-------------------------------------------------------------------------*/
260
261static void
262ehci_hub_descriptor (
263 struct ehci_hcd *ehci,
264 struct usb_hub_descriptor *desc
265) {
266 int ports = HCS_N_PORTS (ehci->hcs_params);
267 u16 temp;
268
269 desc->bDescriptorType = 0x29;
270 desc->bPwrOn2PwrGood = 10; /* ehci 1.0, 2.3.9 says 20ms max */
271 desc->bHubContrCurrent = 0;
272
273 desc->bNbrPorts = ports;
274 temp = 1 + (ports / 8);
275 desc->bDescLength = 7 + 2 * temp;
276
277 /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
278 memset (&desc->bitmap [0], 0, temp);
279 memset (&desc->bitmap [temp], 0xff, temp);
280
281 temp = 0x0008; /* per-port overcurrent reporting */
282 if (HCS_PPC (ehci->hcs_params))
283 temp |= 0x0001; /* per-port power control */
284#if 0
285// re-enable when we support USB_PORT_FEAT_INDICATOR below.
286 if (HCS_INDICATOR (ehci->hcs_params))
287 temp |= 0x0080; /* per-port indicators (LEDs) */
288#endif
289 desc->wHubCharacteristics = (__force __u16)cpu_to_le16 (temp);
290}
291
292/*-------------------------------------------------------------------------*/
293
294#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
295
296static int ehci_hub_control (
297 struct usb_hcd *hcd,
298 u16 typeReq,
299 u16 wValue,
300 u16 wIndex,
301 char *buf,
302 u16 wLength
303) {
304 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
305 int ports = HCS_N_PORTS (ehci->hcs_params);
306 u32 temp, status;
307 unsigned long flags;
308 int retval = 0;
309
310 /*
311 * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
312 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
313 * (track current state ourselves) ... blink for diagnostics,
314 * power, "this is the one", etc. EHCI spec supports this.
315 */
316
317 spin_lock_irqsave (&ehci->lock, flags);
318 switch (typeReq) {
319 case ClearHubFeature:
320 switch (wValue) {
321 case C_HUB_LOCAL_POWER:
322 case C_HUB_OVER_CURRENT:
323 /* no hub-wide feature/status flags */
324 break;
325 default:
326 goto error;
327 }
328 break;
329 case ClearPortFeature:
330 if (!wIndex || wIndex > ports)
331 goto error;
332 wIndex--;
333 temp = readl (&ehci->regs->port_status [wIndex]);
334 if (temp & PORT_OWNER)
335 break;
336
337 switch (wValue) {
338 case USB_PORT_FEAT_ENABLE:
339 writel (temp & ~PORT_PE,
340 &ehci->regs->port_status [wIndex]);
341 break;
342 case USB_PORT_FEAT_C_ENABLE:
343 writel (temp | PORT_PEC,
344 &ehci->regs->port_status [wIndex]);
345 break;
346 case USB_PORT_FEAT_SUSPEND:
347 if (temp & PORT_RESET)
348 goto error;
349 if (temp & PORT_SUSPEND) {
350 if ((temp & PORT_PE) == 0)
351 goto error;
352 /* resume signaling for 20 msec */
353 writel ((temp & ~PORT_WAKE_BITS) | PORT_RESUME,
354 &ehci->regs->port_status [wIndex]);
355 ehci->reset_done [wIndex] = jiffies
356 + msecs_to_jiffies (20);
357 }
358 break;
359 case USB_PORT_FEAT_C_SUSPEND:
360 /* we auto-clear this feature */
361 break;
362 case USB_PORT_FEAT_POWER:
363 if (HCS_PPC (ehci->hcs_params))
364 writel (temp & ~PORT_POWER,
365 &ehci->regs->port_status [wIndex]);
366 break;
367 case USB_PORT_FEAT_C_CONNECTION:
368 writel (temp | PORT_CSC,
369 &ehci->regs->port_status [wIndex]);
370 break;
371 case USB_PORT_FEAT_C_OVER_CURRENT:
372 writel (temp | PORT_OCC,
373 &ehci->regs->port_status [wIndex]);
374 break;
375 case USB_PORT_FEAT_C_RESET:
376 /* GetPortStatus clears reset */
377 break;
378 default:
379 goto error;
380 }
381 readl (&ehci->regs->command); /* unblock posted write */
382 break;
383 case GetHubDescriptor:
384 ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *)
385 buf);
386 break;
387 case GetHubStatus:
388 /* no hub-wide feature/status flags */
389 memset (buf, 0, 4);
390 //cpu_to_le32s ((u32 *) buf);
391 break;
392 case GetPortStatus:
393 if (!wIndex || wIndex > ports)
394 goto error;
395 wIndex--;
396 status = 0;
397 temp = readl (&ehci->regs->port_status [wIndex]);
398
399 // wPortChange bits
400 if (temp & PORT_CSC)
401 status |= 1 << USB_PORT_FEAT_C_CONNECTION;
402 if (temp & PORT_PEC)
403 status |= 1 << USB_PORT_FEAT_C_ENABLE;
404 if (temp & PORT_OCC)
405 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
406
407 /* whoever resumes must GetPortStatus to complete it!! */
408 if ((temp & PORT_RESUME)
409 && time_after (jiffies,
410 ehci->reset_done [wIndex])) {
411 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
412 ehci->reset_done [wIndex] = 0;
413
414 /* stop resume signaling */
415 temp = readl (&ehci->regs->port_status [wIndex]);
416 writel (temp & ~PORT_RESUME,
417 &ehci->regs->port_status [wIndex]);
418 retval = handshake (
419 &ehci->regs->port_status [wIndex],
420 PORT_RESUME, 0, 2000 /* 2msec */);
421 if (retval != 0) {
422 ehci_err (ehci, "port %d resume error %d\n",
423 wIndex + 1, retval);
424 goto error;
425 }
426 temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
427 }
428
429 /* whoever resets must GetPortStatus to complete it!! */
430 if ((temp & PORT_RESET)
431 && time_after (jiffies,
432 ehci->reset_done [wIndex])) {
433 status |= 1 << USB_PORT_FEAT_C_RESET;
434 ehci->reset_done [wIndex] = 0;
435
436 /* force reset to complete */
437 writel (temp & ~PORT_RESET,
438 &ehci->regs->port_status [wIndex]);
439 retval = handshake (
440 &ehci->regs->port_status [wIndex],
441 PORT_RESET, 0, 500);
442 if (retval != 0) {
443 ehci_err (ehci, "port %d reset error %d\n",
444 wIndex + 1, retval);
445 goto error;
446 }
447
448 /* see what we found out */
449 temp = check_reset_complete (ehci, wIndex,
450 readl (&ehci->regs->port_status [wIndex]));
451 }
452
453 // don't show wPortStatus if it's owned by a companion hc
454 if (!(temp & PORT_OWNER)) {
455 if (temp & PORT_CONNECT) {
456 status |= 1 << USB_PORT_FEAT_CONNECTION;
457 // status may be from integrated TT
458 status |= ehci_port_speed(ehci, temp);
459 }
460 if (temp & PORT_PE)
461 status |= 1 << USB_PORT_FEAT_ENABLE;
462 if (temp & (PORT_SUSPEND|PORT_RESUME))
463 status |= 1 << USB_PORT_FEAT_SUSPEND;
464 if (temp & PORT_OC)
465 status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
466 if (temp & PORT_RESET)
467 status |= 1 << USB_PORT_FEAT_RESET;
468 if (temp & PORT_POWER)
469 status |= 1 << USB_PORT_FEAT_POWER;
470 }
471
472#ifndef EHCI_VERBOSE_DEBUG
473 if (status & ~0xffff) /* only if wPortChange is interesting */
474#endif
475 dbg_port (ehci, "GetStatus", wIndex + 1, temp);
476 // we "know" this alignment is good, caller used kmalloc()...
477 *((__le32 *) buf) = cpu_to_le32 (status);
478 break;
479 case SetHubFeature:
480 switch (wValue) {
481 case C_HUB_LOCAL_POWER:
482 case C_HUB_OVER_CURRENT:
483 /* no hub-wide feature/status flags */
484 break;
485 default:
486 goto error;
487 }
488 break;
489 case SetPortFeature:
490 if (!wIndex || wIndex > ports)
491 goto error;
492 wIndex--;
493 temp = readl (&ehci->regs->port_status [wIndex]);
494 if (temp & PORT_OWNER)
495 break;
496
497 switch (wValue) {
498 case USB_PORT_FEAT_SUSPEND:
499 if ((temp & PORT_PE) == 0
500 || (temp & PORT_RESET) != 0)
501 goto error;
502 if (hcd->remote_wakeup)
503 temp |= PORT_WAKE_BITS;
504 writel (temp | PORT_SUSPEND,
505 &ehci->regs->port_status [wIndex]);
506 break;
507 case USB_PORT_FEAT_POWER:
508 if (HCS_PPC (ehci->hcs_params))
509 writel (temp | PORT_POWER,
510 &ehci->regs->port_status [wIndex]);
511 break;
512 case USB_PORT_FEAT_RESET:
513 if (temp & PORT_RESUME)
514 goto error;
515 /* line status bits may report this as low speed,
516 * which can be fine if this root hub has a
517 * transaction translator built in.
518 */
519 if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
520 && !ehci_is_TDI(ehci)
521 && PORT_USB11 (temp)) {
522 ehci_dbg (ehci,
523 "port %d low speed --> companion\n",
524 wIndex + 1);
525 temp |= PORT_OWNER;
526 } else {
527 ehci_vdbg (ehci, "port %d reset\n", wIndex + 1);
528 temp |= PORT_RESET;
529 temp &= ~PORT_PE;
530
531 /*
532 * caller must wait, then call GetPortStatus
533 * usb 2.0 spec says 50 ms resets on root
534 */
535 ehci->reset_done [wIndex] = jiffies
536 + msecs_to_jiffies (50);
537 }
538 writel (temp, &ehci->regs->port_status [wIndex]);
539 break;
540 default:
541 goto error;
542 }
543 readl (&ehci->regs->command); /* unblock posted writes */
544 break;
545
546 default:
547error:
548 /* "stall" on error */
549 retval = -EPIPE;
550 }
551 spin_unlock_irqrestore (&ehci->lock, flags);
552 return retval;
553}
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
new file mode 100644
index 000000000000..9938697ff361
--- /dev/null
+++ b/drivers/usb/host/ehci-mem.c
@@ -0,0 +1,237 @@
1/*
2 * Copyright (c) 2001 by David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* this file is part of ehci-hcd.c */
20
21/*-------------------------------------------------------------------------*/
22
23/*
24 * There's basically three types of memory:
25 * - data used only by the HCD ... kmalloc is fine
26 * - async and periodic schedules, shared by HC and HCD ... these
27 * need to use dma_pool or dma_alloc_coherent
28 * - driver buffers, read/written by HC ... single shot DMA mapped
29 *
30 * There's also PCI "register" data, which is memory mapped.
31 * No memory seen by this driver is pageable.
32 */
33
34/*-------------------------------------------------------------------------*/
35
36/* Allocate the key transfer structures from the previously allocated pool */
37
38static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma)
39{
40 memset (qtd, 0, sizeof *qtd);
41 qtd->qtd_dma = dma;
42 qtd->hw_token = cpu_to_le32 (QTD_STS_HALT);
43 qtd->hw_next = EHCI_LIST_END;
44 qtd->hw_alt_next = EHCI_LIST_END;
45 INIT_LIST_HEAD (&qtd->qtd_list);
46}
47
48static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags)
49{
50 struct ehci_qtd *qtd;
51 dma_addr_t dma;
52
53 qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
54 if (qtd != NULL) {
55 ehci_qtd_init (qtd, dma);
56 }
57 return qtd;
58}
59
60static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
61{
62 dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma);
63}
64
65
66static void qh_destroy (struct kref *kref)
67{
68 struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
69 struct ehci_hcd *ehci = qh->ehci;
70
71 /* clean qtds first, and know this is not linked */
72 if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
73 ehci_dbg (ehci, "unused qh not empty!\n");
74 BUG ();
75 }
76 if (qh->dummy)
77 ehci_qtd_free (ehci, qh->dummy);
78 usb_put_dev (qh->dev);
79 dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
80}
81
82static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags)
83{
84 struct ehci_qh *qh;
85 dma_addr_t dma;
86
87 qh = (struct ehci_qh *)
88 dma_pool_alloc (ehci->qh_pool, flags, &dma);
89 if (!qh)
90 return qh;
91
92 memset (qh, 0, sizeof *qh);
93 kref_init(&qh->kref);
94 qh->ehci = ehci;
95 qh->qh_dma = dma;
96 // INIT_LIST_HEAD (&qh->qh_list);
97 INIT_LIST_HEAD (&qh->qtd_list);
98
99 /* dummy td enables safe urb queuing */
100 qh->dummy = ehci_qtd_alloc (ehci, flags);
101 if (qh->dummy == NULL) {
102 ehci_dbg (ehci, "no dummy td\n");
103 dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
104 qh = NULL;
105 }
106 return qh;
107}
108
109/* to share a qh (cpu threads, or hc) */
110static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
111{
112 kref_get(&qh->kref);
113 return qh;
114}
115
116static inline void qh_put (struct ehci_qh *qh)
117{
118 kref_put(&qh->kref, qh_destroy);
119}
120
121/*-------------------------------------------------------------------------*/
122
123/* The queue heads and transfer descriptors are managed from pools tied
124 * to each of the "per device" structures.
125 * This is the initialisation and cleanup code.
126 */
127
128static void ehci_mem_cleanup (struct ehci_hcd *ehci)
129{
130 if (ehci->async)
131 qh_put (ehci->async);
132 ehci->async = NULL;
133
134 /* DMA consistent memory and pools */
135 if (ehci->qtd_pool)
136 dma_pool_destroy (ehci->qtd_pool);
137 ehci->qtd_pool = NULL;
138
139 if (ehci->qh_pool) {
140 dma_pool_destroy (ehci->qh_pool);
141 ehci->qh_pool = NULL;
142 }
143
144 if (ehci->itd_pool)
145 dma_pool_destroy (ehci->itd_pool);
146 ehci->itd_pool = NULL;
147
148 if (ehci->sitd_pool)
149 dma_pool_destroy (ehci->sitd_pool);
150 ehci->sitd_pool = NULL;
151
152 if (ehci->periodic)
153 dma_free_coherent (ehci_to_hcd(ehci)->self.controller,
154 ehci->periodic_size * sizeof (u32),
155 ehci->periodic, ehci->periodic_dma);
156 ehci->periodic = NULL;
157
158 /* shadow periodic table */
159 if (ehci->pshadow)
160 kfree (ehci->pshadow);
161 ehci->pshadow = NULL;
162}
163
164/* remember to add cleanup code (above) if you add anything here */
165static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
166{
167 int i;
168
169 /* QTDs for control/bulk/intr transfers */
170 ehci->qtd_pool = dma_pool_create ("ehci_qtd",
171 ehci_to_hcd(ehci)->self.controller,
172 sizeof (struct ehci_qtd),
173 32 /* byte alignment (for hw parts) */,
174 4096 /* can't cross 4K */);
175 if (!ehci->qtd_pool) {
176 goto fail;
177 }
178
179 /* QHs for control/bulk/intr transfers */
180 ehci->qh_pool = dma_pool_create ("ehci_qh",
181 ehci_to_hcd(ehci)->self.controller,
182 sizeof (struct ehci_qh),
183 32 /* byte alignment (for hw parts) */,
184 4096 /* can't cross 4K */);
185 if (!ehci->qh_pool) {
186 goto fail;
187 }
188 ehci->async = ehci_qh_alloc (ehci, flags);
189 if (!ehci->async) {
190 goto fail;
191 }
192
193 /* ITD for high speed ISO transfers */
194 ehci->itd_pool = dma_pool_create ("ehci_itd",
195 ehci_to_hcd(ehci)->self.controller,
196 sizeof (struct ehci_itd),
197 32 /* byte alignment (for hw parts) */,
198 4096 /* can't cross 4K */);
199 if (!ehci->itd_pool) {
200 goto fail;
201 }
202
203 /* SITD for full/low speed split ISO transfers */
204 ehci->sitd_pool = dma_pool_create ("ehci_sitd",
205 ehci_to_hcd(ehci)->self.controller,
206 sizeof (struct ehci_sitd),
207 32 /* byte alignment (for hw parts) */,
208 4096 /* can't cross 4K */);
209 if (!ehci->sitd_pool) {
210 goto fail;
211 }
212
213 /* Hardware periodic table */
214 ehci->periodic = (__le32 *)
215 dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller,
216 ehci->periodic_size * sizeof(__le32),
217 &ehci->periodic_dma, 0);
218 if (ehci->periodic == NULL) {
219 goto fail;
220 }
221 for (i = 0; i < ehci->periodic_size; i++)
222 ehci->periodic [i] = EHCI_LIST_END;
223
224 /* software shadow of hardware table */
225 ehci->pshadow = kmalloc (ehci->periodic_size * sizeof (void *), flags);
226 if (ehci->pshadow == NULL) {
227 goto fail;
228 }
229 memset (ehci->pshadow, 0, ehci->periodic_size * sizeof (void *));
230
231 return 0;
232
233fail:
234 ehci_dbg (ehci, "couldn't init memory\n");
235 ehci_mem_cleanup (ehci);
236 return -ENOMEM;
237}
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
new file mode 100644
index 000000000000..7df9b9af54f6
--- /dev/null
+++ b/drivers/usb/host/ehci-q.c
@@ -0,0 +1,1090 @@
1/*
2 * Copyright (c) 2001-2002 by David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* this file is part of ehci-hcd.c */
20
21/*-------------------------------------------------------------------------*/
22
23/*
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
25 *
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
30 *
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
34 *
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
39 */
40
41/*-------------------------------------------------------------------------*/
42
43/* fill a qtd, returning how much of the buffer we were able to queue up */
44
45static int
46qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
47 int token, int maxpacket)
48{
49 int i, count;
50 u64 addr = buf;
51
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd->hw_buf [0] = cpu_to_le32 ((u32)addr);
54 qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32));
55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */
56 if (likely (len < count)) /* ... iff needed */
57 count = len;
58 else {
59 buf += 0x1000;
60 buf &= ~0x0fff;
61
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i = 1; count < len && i < 5; i++) {
64 addr = buf;
65 qtd->hw_buf [i] = cpu_to_le32 ((u32)addr);
66 qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32));
67 buf += 0x1000;
68 if ((count + 0x1000) < len)
69 count += 0x1000;
70 else
71 count = len;
72 }
73
74 /* short packets may only terminate transfers */
75 if (count != len)
76 count -= (count % maxpacket);
77 }
78 qtd->hw_token = cpu_to_le32 ((count << 16) | token);
79 qtd->length = count;
80
81 return count;
82}
83
84/*-------------------------------------------------------------------------*/
85
86static inline void
87qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
88{
89 /* writes to an active overlay are unsafe */
90 BUG_ON(qh->qh_state != QH_STATE_IDLE);
91
92 qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma);
93 qh->hw_alt_next = EHCI_LIST_END;
94
95 /* Except for control endpoints, we make hardware maintain data
96 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
97 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
98 * ever clear it.
99 */
100 if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
101 unsigned is_out, epnum;
102
103 is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
104 epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
105 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
106 qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE);
107 usb_settoggle (qh->dev, epnum, is_out, 1);
108 }
109 }
110
111 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
112 wmb ();
113 qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING);
114}
115
116/* if it weren't for a common silicon quirk (writing the dummy into the qh
117 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
118 * recovery (including urb dequeue) would need software changes to a QH...
119 */
120static void
121qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
122{
123 struct ehci_qtd *qtd;
124
125 if (list_empty (&qh->qtd_list))
126 qtd = qh->dummy;
127 else {
128 qtd = list_entry (qh->qtd_list.next,
129 struct ehci_qtd, qtd_list);
130 /* first qtd may already be partially processed */
131 if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current)
132 qtd = NULL;
133 }
134
135 if (qtd)
136 qh_update (ehci, qh, qtd);
137}
138
139/*-------------------------------------------------------------------------*/
140
141static void qtd_copy_status (
142 struct ehci_hcd *ehci,
143 struct urb *urb,
144 size_t length,
145 u32 token
146)
147{
148 /* count IN/OUT bytes, not SETUP (even short packets) */
149 if (likely (QTD_PID (token) != 2))
150 urb->actual_length += length - QTD_LENGTH (token);
151
152 /* don't modify error codes */
153 if (unlikely (urb->status != -EINPROGRESS))
154 return;
155
156 /* force cleanup after short read; not always an error */
157 if (unlikely (IS_SHORT_READ (token)))
158 urb->status = -EREMOTEIO;
159
160 /* serious "can't proceed" faults reported by the hardware */
161 if (token & QTD_STS_HALT) {
162 if (token & QTD_STS_BABBLE) {
163 /* FIXME "must" disable babbling device's port too */
164 urb->status = -EOVERFLOW;
165 } else if (token & QTD_STS_MMF) {
166 /* fs/ls interrupt xfer missed the complete-split */
167 urb->status = -EPROTO;
168 } else if (token & QTD_STS_DBE) {
169 urb->status = (QTD_PID (token) == 1) /* IN ? */
170 ? -ENOSR /* hc couldn't read data */
171 : -ECOMM; /* hc couldn't write data */
172 } else if (token & QTD_STS_XACT) {
173 /* timeout, bad crc, wrong PID, etc; retried */
174 if (QTD_CERR (token))
175 urb->status = -EPIPE;
176 else {
177 ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n",
178 urb->dev->devpath,
179 usb_pipeendpoint (urb->pipe),
180 usb_pipein (urb->pipe) ? "in" : "out");
181 urb->status = -EPROTO;
182 }
183 /* CERR nonzero + no errors + halt --> stall */
184 } else if (QTD_CERR (token))
185 urb->status = -EPIPE;
186 else /* unknown */
187 urb->status = -EPROTO;
188
189 ehci_vdbg (ehci,
190 "dev%d ep%d%s qtd token %08x --> status %d\n",
191 usb_pipedevice (urb->pipe),
192 usb_pipeendpoint (urb->pipe),
193 usb_pipein (urb->pipe) ? "in" : "out",
194 token, urb->status);
195
196 /* if async CSPLIT failed, try cleaning out the TT buffer */
197 if (urb->status != -EPIPE
198 && urb->dev->tt && !usb_pipeint (urb->pipe)
199 && ((token & QTD_STS_MMF) != 0
200 || QTD_CERR(token) == 0)
201 && (!ehci_is_TDI(ehci)
202 || urb->dev->tt->hub !=
203 ehci_to_hcd(ehci)->self.root_hub)) {
204#ifdef DEBUG
205 struct usb_device *tt = urb->dev->tt->hub;
206 dev_dbg (&tt->dev,
207 "clear tt buffer port %d, a%d ep%d t%08x\n",
208 urb->dev->ttport, urb->dev->devnum,
209 usb_pipeendpoint (urb->pipe), token);
210#endif /* DEBUG */
211 usb_hub_tt_clear_buffer (urb->dev, urb->pipe);
212 }
213 }
214}
215
216static void
217ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb, struct pt_regs *regs)
218__releases(ehci->lock)
219__acquires(ehci->lock)
220{
221 if (likely (urb->hcpriv != NULL)) {
222 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
223
224 /* S-mask in a QH means it's an interrupt urb */
225 if ((qh->hw_info2 & __constant_cpu_to_le32 (0x00ff)) != 0) {
226
227 /* ... update hc-wide periodic stats (for usbfs) */
228 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
229 }
230 qh_put (qh);
231 }
232
233 spin_lock (&urb->lock);
234 urb->hcpriv = NULL;
235 switch (urb->status) {
236 case -EINPROGRESS: /* success */
237 urb->status = 0;
238 default: /* fault */
239 COUNT (ehci->stats.complete);
240 break;
241 case -EREMOTEIO: /* fault or normal */
242 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
243 urb->status = 0;
244 COUNT (ehci->stats.complete);
245 break;
246 case -ECONNRESET: /* canceled */
247 case -ENOENT:
248 COUNT (ehci->stats.unlink);
249 break;
250 }
251 spin_unlock (&urb->lock);
252
253#ifdef EHCI_URB_TRACE
254 ehci_dbg (ehci,
255 "%s %s urb %p ep%d%s status %d len %d/%d\n",
256 __FUNCTION__, urb->dev->devpath, urb,
257 usb_pipeendpoint (urb->pipe),
258 usb_pipein (urb->pipe) ? "in" : "out",
259 urb->status,
260 urb->actual_length, urb->transfer_buffer_length);
261#endif
262
263 /* complete() can reenter this HCD */
264 spin_unlock (&ehci->lock);
265 usb_hcd_giveback_urb (ehci_to_hcd(ehci), urb, regs);
266 spin_lock (&ehci->lock);
267}
268
269static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
270static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
271
272static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
273static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
274
275/*
276 * Process and free completed qtds for a qh, returning URBs to drivers.
277 * Chases up to qh->hw_current. Returns number of completions called,
278 * indicating how much "real" work we did.
279 */
280#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
281static unsigned
282qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs)
283{
284 struct ehci_qtd *last = NULL, *end = qh->dummy;
285 struct list_head *entry, *tmp;
286 int stopped;
287 unsigned count = 0;
288 int do_status = 0;
289 u8 state;
290
291 if (unlikely (list_empty (&qh->qtd_list)))
292 return count;
293
294 /* completions (or tasks on other cpus) must never clobber HALT
295 * till we've gone through and cleaned everything up, even when
296 * they add urbs to this qh's queue or mark them for unlinking.
297 *
298 * NOTE: unlinking expects to be done in queue order.
299 */
300 state = qh->qh_state;
301 qh->qh_state = QH_STATE_COMPLETING;
302 stopped = (state == QH_STATE_IDLE);
303
304 /* remove de-activated QTDs from front of queue.
305 * after faults (including short reads), cleanup this urb
306 * then let the queue advance.
307 * if queue is stopped, handles unlinks.
308 */
309 list_for_each_safe (entry, tmp, &qh->qtd_list) {
310 struct ehci_qtd *qtd;
311 struct urb *urb;
312 u32 token = 0;
313
314 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
315 urb = qtd->urb;
316
317 /* clean up any state from previous QTD ...*/
318 if (last) {
319 if (likely (last->urb != urb)) {
320 ehci_urb_done (ehci, last->urb, regs);
321 count++;
322 }
323 ehci_qtd_free (ehci, last);
324 last = NULL;
325 }
326
327 /* ignore urbs submitted during completions we reported */
328 if (qtd == end)
329 break;
330
331 /* hardware copies qtd out of qh overlay */
332 rmb ();
333 token = le32_to_cpu (qtd->hw_token);
334
335 /* always clean up qtds the hc de-activated */
336 if ((token & QTD_STS_ACTIVE) == 0) {
337
338 if ((token & QTD_STS_HALT) != 0) {
339 stopped = 1;
340
341 /* magic dummy for some short reads; qh won't advance.
342 * that silicon quirk can kick in with this dummy too.
343 */
344 } else if (IS_SHORT_READ (token)
345 && !(qtd->hw_alt_next & EHCI_LIST_END)) {
346 stopped = 1;
347 goto halt;
348 }
349
350 /* stop scanning when we reach qtds the hc is using */
351 } else if (likely (!stopped
352 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) {
353 break;
354
355 } else {
356 stopped = 1;
357
358 if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)))
359 urb->status = -ESHUTDOWN;
360
361 /* ignore active urbs unless some previous qtd
362 * for the urb faulted (including short read) or
363 * its urb was canceled. we may patch qh or qtds.
364 */
365 if (likely (urb->status == -EINPROGRESS))
366 continue;
367
368 /* issue status after short control reads */
369 if (unlikely (do_status != 0)
370 && QTD_PID (token) == 0 /* OUT */) {
371 do_status = 0;
372 continue;
373 }
374
375 /* token in overlay may be most current */
376 if (state == QH_STATE_IDLE
377 && cpu_to_le32 (qtd->qtd_dma)
378 == qh->hw_current)
379 token = le32_to_cpu (qh->hw_token);
380
381 /* force halt for unlinked or blocked qh, so we'll
382 * patch the qh later and so that completions can't
383 * activate it while we "know" it's stopped.
384 */
385 if ((HALT_BIT & qh->hw_token) == 0) {
386halt:
387 qh->hw_token |= HALT_BIT;
388 wmb ();
389 }
390 }
391
392 /* remove it from the queue */
393 spin_lock (&urb->lock);
394 qtd_copy_status (ehci, urb, qtd->length, token);
395 do_status = (urb->status == -EREMOTEIO)
396 && usb_pipecontrol (urb->pipe);
397 spin_unlock (&urb->lock);
398
399 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
400 last = list_entry (qtd->qtd_list.prev,
401 struct ehci_qtd, qtd_list);
402 last->hw_next = qtd->hw_next;
403 }
404 list_del (&qtd->qtd_list);
405 last = qtd;
406 }
407
408 /* last urb's completion might still need calling */
409 if (likely (last != NULL)) {
410 ehci_urb_done (ehci, last->urb, regs);
411 count++;
412 ehci_qtd_free (ehci, last);
413 }
414
415 /* restore original state; caller must unlink or relink */
416 qh->qh_state = state;
417
418 /* be sure the hardware's done with the qh before refreshing
419 * it after fault cleanup, or recovering from silicon wrongly
420 * overlaying the dummy qtd (which reduces DMA chatter).
421 */
422 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
423 switch (state) {
424 case QH_STATE_IDLE:
425 qh_refresh(ehci, qh);
426 break;
427 case QH_STATE_LINKED:
428 /* should be rare for periodic transfers,
429 * except maybe high bandwidth ...
430 */
431 if (qh->period) {
432 intr_deschedule (ehci, qh);
433 (void) qh_schedule (ehci, qh);
434 } else
435 unlink_async (ehci, qh);
436 break;
437 /* otherwise, unlink already started */
438 }
439 }
440
441 return count;
442}
443
444/*-------------------------------------------------------------------------*/
445
446// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
447#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
448// ... and packet size, for any kind of endpoint descriptor
449#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
450
451/*
452 * reverse of qh_urb_transaction: free a list of TDs.
453 * used for cleanup after errors, before HC sees an URB's TDs.
454 */
455static void qtd_list_free (
456 struct ehci_hcd *ehci,
457 struct urb *urb,
458 struct list_head *qtd_list
459) {
460 struct list_head *entry, *temp;
461
462 list_for_each_safe (entry, temp, qtd_list) {
463 struct ehci_qtd *qtd;
464
465 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
466 list_del (&qtd->qtd_list);
467 ehci_qtd_free (ehci, qtd);
468 }
469}
470
471/*
472 * create a list of filled qtds for this URB; won't link into qh.
473 */
474static struct list_head *
475qh_urb_transaction (
476 struct ehci_hcd *ehci,
477 struct urb *urb,
478 struct list_head *head,
479 int flags
480) {
481 struct ehci_qtd *qtd, *qtd_prev;
482 dma_addr_t buf;
483 int len, maxpacket;
484 int is_input;
485 u32 token;
486
487 /*
488 * URBs map to sequences of QTDs: one logical transaction
489 */
490 qtd = ehci_qtd_alloc (ehci, flags);
491 if (unlikely (!qtd))
492 return NULL;
493 list_add_tail (&qtd->qtd_list, head);
494 qtd->urb = urb;
495
496 token = QTD_STS_ACTIVE;
497 token |= (EHCI_TUNE_CERR << 10);
498 /* for split transactions, SplitXState initialized to zero */
499
500 len = urb->transfer_buffer_length;
501 is_input = usb_pipein (urb->pipe);
502 if (usb_pipecontrol (urb->pipe)) {
503 /* SETUP pid */
504 qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest),
505 token | (2 /* "setup" */ << 8), 8);
506
507 /* ... and always at least one more pid */
508 token ^= QTD_TOGGLE;
509 qtd_prev = qtd;
510 qtd = ehci_qtd_alloc (ehci, flags);
511 if (unlikely (!qtd))
512 goto cleanup;
513 qtd->urb = urb;
514 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
515 list_add_tail (&qtd->qtd_list, head);
516 }
517
518 /*
519 * data transfer stage: buffer setup
520 */
521 if (likely (len > 0))
522 buf = urb->transfer_dma;
523 else
524 buf = 0;
525
526 /* for zero length DATA stages, STATUS is always IN */
527 if (!buf || is_input)
528 token |= (1 /* "in" */ << 8);
529 /* else it's already initted to "out" pid (0 << 8) */
530
531 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
532
533 /*
534 * buffer gets wrapped in one or more qtds;
535 * last one may be "short" (including zero len)
536 * and may serve as a control status ack
537 */
538 for (;;) {
539 int this_qtd_len;
540
541 this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket);
542 len -= this_qtd_len;
543 buf += this_qtd_len;
544 if (is_input)
545 qtd->hw_alt_next = ehci->async->hw_alt_next;
546
547 /* qh makes control packets use qtd toggle; maybe switch it */
548 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
549 token ^= QTD_TOGGLE;
550
551 if (likely (len <= 0))
552 break;
553
554 qtd_prev = qtd;
555 qtd = ehci_qtd_alloc (ehci, flags);
556 if (unlikely (!qtd))
557 goto cleanup;
558 qtd->urb = urb;
559 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
560 list_add_tail (&qtd->qtd_list, head);
561 }
562
563 /* unless the bulk/interrupt caller wants a chance to clean
564 * up after short reads, hc should advance qh past this urb
565 */
566 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
567 || usb_pipecontrol (urb->pipe)))
568 qtd->hw_alt_next = EHCI_LIST_END;
569
570 /*
571 * control requests may need a terminating data "status" ack;
572 * bulk ones may need a terminating short packet (zero length).
573 */
574 if (likely (buf != 0)) {
575 int one_more = 0;
576
577 if (usb_pipecontrol (urb->pipe)) {
578 one_more = 1;
579 token ^= 0x0100; /* "in" <--> "out" */
580 token |= QTD_TOGGLE; /* force DATA1 */
581 } else if (usb_pipebulk (urb->pipe)
582 && (urb->transfer_flags & URB_ZERO_PACKET)
583 && !(urb->transfer_buffer_length % maxpacket)) {
584 one_more = 1;
585 }
586 if (one_more) {
587 qtd_prev = qtd;
588 qtd = ehci_qtd_alloc (ehci, flags);
589 if (unlikely (!qtd))
590 goto cleanup;
591 qtd->urb = urb;
592 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
593 list_add_tail (&qtd->qtd_list, head);
594
595 /* never any data in such packets */
596 qtd_fill (qtd, 0, 0, token, 0);
597 }
598 }
599
600 /* by default, enable interrupt on urb completion */
601 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
602 qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC);
603 return head;
604
605cleanup:
606 qtd_list_free (ehci, urb, head);
607 return NULL;
608}
609
610/*-------------------------------------------------------------------------*/
611
612// Would be best to create all qh's from config descriptors,
613// when each interface/altsetting is established. Unlink
614// any previous qh and cancel its urbs first; endpoints are
615// implicitly reset then (data toggle too).
616// That'd mean updating how usbcore talks to HCDs. (2.7?)
617
618
619/*
620 * Each QH holds a qtd list; a QH is used for everything except iso.
621 *
622 * For interrupt urbs, the scheduler must set the microframe scheduling
623 * mask(s) each time the QH gets scheduled. For highspeed, that's
624 * just one microframe in the s-mask. For split interrupt transactions
625 * there are additional complications: c-mask, maybe FSTNs.
626 */
627static struct ehci_qh *
628qh_make (
629 struct ehci_hcd *ehci,
630 struct urb *urb,
631 int flags
632) {
633 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
634 u32 info1 = 0, info2 = 0;
635 int is_input, type;
636 int maxp = 0;
637
638 if (!qh)
639 return qh;
640
641 /*
642 * init endpoint/device data for this QH
643 */
644 info1 |= usb_pipeendpoint (urb->pipe) << 8;
645 info1 |= usb_pipedevice (urb->pipe) << 0;
646
647 is_input = usb_pipein (urb->pipe);
648 type = usb_pipetype (urb->pipe);
649 maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
650
651 /* Compute interrupt scheduling parameters just once, and save.
652 * - allowing for high bandwidth, how many nsec/uframe are used?
653 * - split transactions need a second CSPLIT uframe; same question
654 * - splits also need a schedule gap (for full/low speed I/O)
655 * - qh has a polling interval
656 *
657 * For control/bulk requests, the HC or TT handles these.
658 */
659 if (type == PIPE_INTERRUPT) {
660 qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
661 hb_mult (maxp) * max_packet (maxp));
662 qh->start = NO_FRAME;
663
664 if (urb->dev->speed == USB_SPEED_HIGH) {
665 qh->c_usecs = 0;
666 qh->gap_uf = 0;
667
668 qh->period = urb->interval >> 3;
669 if (qh->period == 0 && urb->interval != 1) {
670 /* NOTE interval 2 or 4 uframes could work.
671 * But interval 1 scheduling is simpler, and
672 * includes high bandwidth.
673 */
674 dbg ("intr period %d uframes, NYET!",
675 urb->interval);
676 goto done;
677 }
678 } else {
679 /* gap is f(FS/LS transfer times) */
680 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
681 is_input, 0, maxp) / (125 * 1000);
682
683 /* FIXME this just approximates SPLIT/CSPLIT times */
684 if (is_input) { // SPLIT, gap, CSPLIT+DATA
685 qh->c_usecs = qh->usecs + HS_USECS (0);
686 qh->usecs = HS_USECS (1);
687 } else { // SPLIT+DATA, gap, CSPLIT
688 qh->usecs += HS_USECS (1);
689 qh->c_usecs = HS_USECS (0);
690 }
691
692 qh->period = urb->interval;
693 }
694 }
695
696 /* support for tt scheduling, and access to toggles */
697 qh->dev = usb_get_dev (urb->dev);
698
699 /* using TT? */
700 switch (urb->dev->speed) {
701 case USB_SPEED_LOW:
702 info1 |= (1 << 12); /* EPS "low" */
703 /* FALL THROUGH */
704
705 case USB_SPEED_FULL:
706 /* EPS 0 means "full" */
707 if (type != PIPE_INTERRUPT)
708 info1 |= (EHCI_TUNE_RL_TT << 28);
709 if (type == PIPE_CONTROL) {
710 info1 |= (1 << 27); /* for TT */
711 info1 |= 1 << 14; /* toggle from qtd */
712 }
713 info1 |= maxp << 16;
714
715 info2 |= (EHCI_TUNE_MULT_TT << 30);
716 info2 |= urb->dev->ttport << 23;
717
718 /* set the address of the TT; for TDI's integrated
719 * root hub tt, leave it zeroed.
720 */
721 if (!ehci_is_TDI(ehci)
722 || urb->dev->tt->hub !=
723 ehci_to_hcd(ehci)->self.root_hub)
724 info2 |= urb->dev->tt->hub->devnum << 16;
725
726 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
727
728 break;
729
730 case USB_SPEED_HIGH: /* no TT involved */
731 info1 |= (2 << 12); /* EPS "high" */
732 if (type == PIPE_CONTROL) {
733 info1 |= (EHCI_TUNE_RL_HS << 28);
734 info1 |= 64 << 16; /* usb2 fixed maxpacket */
735 info1 |= 1 << 14; /* toggle from qtd */
736 info2 |= (EHCI_TUNE_MULT_HS << 30);
737 } else if (type == PIPE_BULK) {
738 info1 |= (EHCI_TUNE_RL_HS << 28);
739 info1 |= 512 << 16; /* usb2 fixed maxpacket */
740 info2 |= (EHCI_TUNE_MULT_HS << 30);
741 } else { /* PIPE_INTERRUPT */
742 info1 |= max_packet (maxp) << 16;
743 info2 |= hb_mult (maxp) << 30;
744 }
745 break;
746 default:
747 dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
748done:
749 qh_put (qh);
750 return NULL;
751 }
752
753 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
754
755 /* init as live, toggle clear, advance to dummy */
756 qh->qh_state = QH_STATE_IDLE;
757 qh->hw_info1 = cpu_to_le32 (info1);
758 qh->hw_info2 = cpu_to_le32 (info2);
759 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
760 qh_refresh (ehci, qh);
761 return qh;
762}
763
764/*-------------------------------------------------------------------------*/
765
766/* move qh (and its qtds) onto async queue; maybe enable queue. */
767
768static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
769{
770 __le32 dma = QH_NEXT (qh->qh_dma);
771 struct ehci_qh *head;
772
773 /* (re)start the async schedule? */
774 head = ehci->async;
775 timer_action_done (ehci, TIMER_ASYNC_OFF);
776 if (!head->qh_next.qh) {
777 u32 cmd = readl (&ehci->regs->command);
778
779 if (!(cmd & CMD_ASE)) {
780 /* in case a clear of CMD_ASE didn't take yet */
781 (void) handshake (&ehci->regs->status, STS_ASS, 0, 150);
782 cmd |= CMD_ASE | CMD_RUN;
783 writel (cmd, &ehci->regs->command);
784 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
785 /* posted write need not be known to HC yet ... */
786 }
787 }
788
789 /* clear halt and/or toggle; and maybe recover from silicon quirk */
790 if (qh->qh_state == QH_STATE_IDLE)
791 qh_refresh (ehci, qh);
792
793 /* splice right after start */
794 qh->qh_next = head->qh_next;
795 qh->hw_next = head->hw_next;
796 wmb ();
797
798 head->qh_next.qh = qh;
799 head->hw_next = dma;
800
801 qh->qh_state = QH_STATE_LINKED;
802 /* qtd completions reported later by interrupt */
803}
804
805/*-------------------------------------------------------------------------*/
806
807#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)
808
809/*
810 * For control/bulk/interrupt, return QH with these TDs appended.
811 * Allocates and initializes the QH if necessary.
812 * Returns null if it can't allocate a QH it needs to.
813 * If the QH has TDs (urbs) already, that's great.
814 */
815static struct ehci_qh *qh_append_tds (
816 struct ehci_hcd *ehci,
817 struct urb *urb,
818 struct list_head *qtd_list,
819 int epnum,
820 void **ptr
821)
822{
823 struct ehci_qh *qh = NULL;
824
825 qh = (struct ehci_qh *) *ptr;
826 if (unlikely (qh == NULL)) {
827 /* can't sleep here, we have ehci->lock... */
828 qh = qh_make (ehci, urb, GFP_ATOMIC);
829 *ptr = qh;
830 }
831 if (likely (qh != NULL)) {
832 struct ehci_qtd *qtd;
833
834 if (unlikely (list_empty (qtd_list)))
835 qtd = NULL;
836 else
837 qtd = list_entry (qtd_list->next, struct ehci_qtd,
838 qtd_list);
839
840 /* control qh may need patching ... */
841 if (unlikely (epnum == 0)) {
842
843 /* usb_reset_device() briefly reverts to address 0 */
844 if (usb_pipedevice (urb->pipe) == 0)
845 qh->hw_info1 &= ~QH_ADDR_MASK;
846 }
847
848 /* just one way to queue requests: swap with the dummy qtd.
849 * only hc or qh_refresh() ever modify the overlay.
850 */
851 if (likely (qtd != NULL)) {
852 struct ehci_qtd *dummy;
853 dma_addr_t dma;
854 __le32 token;
855
856 /* to avoid racing the HC, use the dummy td instead of
857 * the first td of our list (becomes new dummy). both
858 * tds stay deactivated until we're done, when the
859 * HC is allowed to fetch the old dummy (4.10.2).
860 */
861 token = qtd->hw_token;
862 qtd->hw_token = HALT_BIT;
863 wmb ();
864 dummy = qh->dummy;
865
866 dma = dummy->qtd_dma;
867 *dummy = *qtd;
868 dummy->qtd_dma = dma;
869
870 list_del (&qtd->qtd_list);
871 list_add (&dummy->qtd_list, qtd_list);
872 __list_splice (qtd_list, qh->qtd_list.prev);
873
874 ehci_qtd_init (qtd, qtd->qtd_dma);
875 qh->dummy = qtd;
876
877 /* hc must see the new dummy at list end */
878 dma = qtd->qtd_dma;
879 qtd = list_entry (qh->qtd_list.prev,
880 struct ehci_qtd, qtd_list);
881 qtd->hw_next = QTD_NEXT (dma);
882
883 /* let the hc process these next qtds */
884 wmb ();
885 dummy->hw_token = token;
886
887 urb->hcpriv = qh_get (qh);
888 }
889 }
890 return qh;
891}
892
893/*-------------------------------------------------------------------------*/
894
895static int
896submit_async (
897 struct ehci_hcd *ehci,
898 struct usb_host_endpoint *ep,
899 struct urb *urb,
900 struct list_head *qtd_list,
901 int mem_flags
902) {
903 struct ehci_qtd *qtd;
904 int epnum;
905 unsigned long flags;
906 struct ehci_qh *qh = NULL;
907
908 qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
909 epnum = ep->desc.bEndpointAddress;
910
911#ifdef EHCI_URB_TRACE
912 ehci_dbg (ehci,
913 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
914 __FUNCTION__, urb->dev->devpath, urb,
915 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
916 urb->transfer_buffer_length,
917 qtd, ep->hcpriv);
918#endif
919
920 spin_lock_irqsave (&ehci->lock, flags);
921 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
922
923 /* Control/bulk operations through TTs don't need scheduling,
924 * the HC and TT handle it when the TT has a buffer ready.
925 */
926 if (likely (qh != NULL)) {
927 if (likely (qh->qh_state == QH_STATE_IDLE))
928 qh_link_async (ehci, qh_get (qh));
929 }
930 spin_unlock_irqrestore (&ehci->lock, flags);
931 if (unlikely (qh == NULL)) {
932 qtd_list_free (ehci, urb, qtd_list);
933 return -ENOMEM;
934 }
935 return 0;
936}
937
938/*-------------------------------------------------------------------------*/
939
940/* the async qh for the qtds being reclaimed are now unlinked from the HC */
941
942static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs)
943{
944 struct ehci_qh *qh = ehci->reclaim;
945 struct ehci_qh *next;
946
947 timer_action_done (ehci, TIMER_IAA_WATCHDOG);
948
949 // qh->hw_next = cpu_to_le32 (qh->qh_dma);
950 qh->qh_state = QH_STATE_IDLE;
951 qh->qh_next.qh = NULL;
952 qh_put (qh); // refcount from reclaim
953
954 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
955 next = qh->reclaim;
956 ehci->reclaim = next;
957 ehci->reclaim_ready = 0;
958 qh->reclaim = NULL;
959
960 qh_completions (ehci, qh, regs);
961
962 if (!list_empty (&qh->qtd_list)
963 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
964 qh_link_async (ehci, qh);
965 else {
966 qh_put (qh); // refcount from async list
967
968 /* it's not free to turn the async schedule on/off; leave it
969 * active but idle for a while once it empties.
970 */
971 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
972 && ehci->async->qh_next.qh == NULL)
973 timer_action (ehci, TIMER_ASYNC_OFF);
974 }
975
976 if (next) {
977 ehci->reclaim = NULL;
978 start_unlink_async (ehci, next);
979 }
980}
981
982/* makes sure the async qh will become idle */
983/* caller must own ehci->lock */
984
985static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
986{
987 int cmd = readl (&ehci->regs->command);
988 struct ehci_qh *prev;
989
990#ifdef DEBUG
991 assert_spin_locked(&ehci->lock);
992 if (ehci->reclaim
993 || (qh->qh_state != QH_STATE_LINKED
994 && qh->qh_state != QH_STATE_UNLINK_WAIT)
995 )
996 BUG ();
997#endif
998
999 /* stop async schedule right now? */
1000 if (unlikely (qh == ehci->async)) {
1001 /* can't get here without STS_ASS set */
1002 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT) {
1003 writel (cmd & ~CMD_ASE, &ehci->regs->command);
1004 wmb ();
1005 // handshake later, if we need to
1006 }
1007 timer_action_done (ehci, TIMER_ASYNC_OFF);
1008 return;
1009 }
1010
1011 qh->qh_state = QH_STATE_UNLINK;
1012 ehci->reclaim = qh = qh_get (qh);
1013
1014 prev = ehci->async;
1015 while (prev->qh_next.qh != qh)
1016 prev = prev->qh_next.qh;
1017
1018 prev->hw_next = qh->hw_next;
1019 prev->qh_next = qh->qh_next;
1020 wmb ();
1021
1022 if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) {
1023 /* if (unlikely (qh->reclaim != 0))
1024 * this will recurse, probably not much
1025 */
1026 end_unlink_async (ehci, NULL);
1027 return;
1028 }
1029
1030 ehci->reclaim_ready = 0;
1031 cmd |= CMD_IAAD;
1032 writel (cmd, &ehci->regs->command);
1033 (void) readl (&ehci->regs->command);
1034 timer_action (ehci, TIMER_IAA_WATCHDOG);
1035}
1036
1037/*-------------------------------------------------------------------------*/
1038
1039static void
1040scan_async (struct ehci_hcd *ehci, struct pt_regs *regs)
1041{
1042 struct ehci_qh *qh;
1043 enum ehci_timer_action action = TIMER_IO_WATCHDOG;
1044
1045 if (!++(ehci->stamp))
1046 ehci->stamp++;
1047 timer_action_done (ehci, TIMER_ASYNC_SHRINK);
1048rescan:
1049 qh = ehci->async->qh_next.qh;
1050 if (likely (qh != NULL)) {
1051 do {
1052 /* clean any finished work for this qh */
1053 if (!list_empty (&qh->qtd_list)
1054 && qh->stamp != ehci->stamp) {
1055 int temp;
1056
1057 /* unlinks could happen here; completion
1058 * reporting drops the lock. rescan using
1059 * the latest schedule, but don't rescan
1060 * qhs we already finished (no looping).
1061 */
1062 qh = qh_get (qh);
1063 qh->stamp = ehci->stamp;
1064 temp = qh_completions (ehci, qh, regs);
1065 qh_put (qh);
1066 if (temp != 0) {
1067 goto rescan;
1068 }
1069 }
1070
1071 /* unlink idle entries, reducing HC PCI usage as well
1072 * as HCD schedule-scanning costs. delay for any qh
1073 * we just scanned, there's a not-unusual case that it
1074 * doesn't stay idle for long.
1075 * (plus, avoids some kind of re-activation race.)
1076 */
1077 if (list_empty (&qh->qtd_list)) {
1078 if (qh->stamp == ehci->stamp)
1079 action = TIMER_ASYNC_SHRINK;
1080 else if (!ehci->reclaim
1081 && qh->qh_state == QH_STATE_LINKED)
1082 start_unlink_async (ehci, qh);
1083 }
1084
1085 qh = qh->qh_next.qh;
1086 } while (qh);
1087 }
1088 if (action == TIMER_ASYNC_SHRINK)
1089 timer_action (ehci, TIMER_ASYNC_SHRINK);
1090}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
new file mode 100644
index 000000000000..f6c86354e304
--- /dev/null
+++ b/drivers/usb/host/ehci-sched.c
@@ -0,0 +1,1999 @@
1/*
2 * Copyright (c) 2001-2004 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/* this file is part of ehci-hcd.c */
21
22/*-------------------------------------------------------------------------*/
23
24/*
25 * EHCI scheduled transaction support: interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
27 *
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
31 *
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
35 */
36
37static int ehci_get_frame (struct usb_hcd *hcd);
38
39/*-------------------------------------------------------------------------*/
40
41/*
42 * periodic_next_shadow - return "next" pointer on shadow list
43 * @periodic: host pointer to qh/itd/sitd
44 * @tag: hardware tag for type of this record
45 */
46static union ehci_shadow *
47periodic_next_shadow (union ehci_shadow *periodic, __le32 tag)
48{
49 switch (tag) {
50 case Q_TYPE_QH:
51 return &periodic->qh->qh_next;
52 case Q_TYPE_FSTN:
53 return &periodic->fstn->fstn_next;
54 case Q_TYPE_ITD:
55 return &periodic->itd->itd_next;
56 // case Q_TYPE_SITD:
57 default:
58 return &periodic->sitd->sitd_next;
59 }
60}
61
62/* caller must hold ehci->lock */
63static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
64{
65 union ehci_shadow *prev_p = &ehci->pshadow [frame];
66 __le32 *hw_p = &ehci->periodic [frame];
67 union ehci_shadow here = *prev_p;
68
69 /* find predecessor of "ptr"; hw and shadow lists are in sync */
70 while (here.ptr && here.ptr != ptr) {
71 prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
72 hw_p = here.hw_next;
73 here = *prev_p;
74 }
75 /* an interrupt entry (at list end) could have been shared */
76 if (!here.ptr)
77 return;
78
79 /* update shadow and hardware lists ... the old "next" pointers
80 * from ptr may still be in use, the caller updates them.
81 */
82 *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
83 *hw_p = *here.hw_next;
84}
85
86/* how many of the uframe's 125 usecs are allocated? */
87static unsigned short
88periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
89{
90 __le32 *hw_p = &ehci->periodic [frame];
91 union ehci_shadow *q = &ehci->pshadow [frame];
92 unsigned usecs = 0;
93
94 while (q->ptr) {
95 switch (Q_NEXT_TYPE (*hw_p)) {
96 case Q_TYPE_QH:
97 /* is it in the S-mask? */
98 if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe))
99 usecs += q->qh->usecs;
100 /* ... or C-mask? */
101 if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
102 usecs += q->qh->c_usecs;
103 hw_p = &q->qh->hw_next;
104 q = &q->qh->qh_next;
105 break;
106 // case Q_TYPE_FSTN:
107 default:
108 /* for "save place" FSTNs, count the relevant INTR
109 * bandwidth from the previous frame
110 */
111 if (q->fstn->hw_prev != EHCI_LIST_END) {
112 ehci_dbg (ehci, "ignoring FSTN cost ...\n");
113 }
114 hw_p = &q->fstn->hw_next;
115 q = &q->fstn->fstn_next;
116 break;
117 case Q_TYPE_ITD:
118 usecs += q->itd->usecs [uframe];
119 hw_p = &q->itd->hw_next;
120 q = &q->itd->itd_next;
121 break;
122 case Q_TYPE_SITD:
123 /* is it in the S-mask? (count SPLIT, DATA) */
124 if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) {
125 if (q->sitd->hw_fullspeed_ep &
126 __constant_cpu_to_le32 (1<<31))
127 usecs += q->sitd->stream->usecs;
128 else /* worst case for OUT start-split */
129 usecs += HS_USECS_ISO (188);
130 }
131
132 /* ... C-mask? (count CSPLIT, DATA) */
133 if (q->sitd->hw_uframe &
134 cpu_to_le32 (1 << (8 + uframe))) {
135 /* worst case for IN complete-split */
136 usecs += q->sitd->stream->c_usecs;
137 }
138
139 hw_p = &q->sitd->hw_next;
140 q = &q->sitd->sitd_next;
141 break;
142 }
143 }
144#ifdef DEBUG
145 if (usecs > 100)
146 ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
147 frame * 8 + uframe, usecs);
148#endif
149 return usecs;
150}
151
152/*-------------------------------------------------------------------------*/
153
154static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
155{
156 if (!dev1->tt || !dev2->tt)
157 return 0;
158 if (dev1->tt != dev2->tt)
159 return 0;
160 if (dev1->tt->multi)
161 return dev1->ttport == dev2->ttport;
162 else
163 return 1;
164}
165
166/* return true iff the device's transaction translator is available
167 * for a periodic transfer starting at the specified frame, using
168 * all the uframes in the mask.
169 */
170static int tt_no_collision (
171 struct ehci_hcd *ehci,
172 unsigned period,
173 struct usb_device *dev,
174 unsigned frame,
175 u32 uf_mask
176)
177{
178 if (period == 0) /* error */
179 return 0;
180
181 /* note bandwidth wastage: split never follows csplit
182 * (different dev or endpoint) until the next uframe.
183 * calling convention doesn't make that distinction.
184 */
185 for (; frame < ehci->periodic_size; frame += period) {
186 union ehci_shadow here;
187 __le32 type;
188
189 here = ehci->pshadow [frame];
190 type = Q_NEXT_TYPE (ehci->periodic [frame]);
191 while (here.ptr) {
192 switch (type) {
193 case Q_TYPE_ITD:
194 type = Q_NEXT_TYPE (here.itd->hw_next);
195 here = here.itd->itd_next;
196 continue;
197 case Q_TYPE_QH:
198 if (same_tt (dev, here.qh->dev)) {
199 u32 mask;
200
201 mask = le32_to_cpu (here.qh->hw_info2);
202 /* "knows" no gap is needed */
203 mask |= mask >> 8;
204 if (mask & uf_mask)
205 break;
206 }
207 type = Q_NEXT_TYPE (here.qh->hw_next);
208 here = here.qh->qh_next;
209 continue;
210 case Q_TYPE_SITD:
211 if (same_tt (dev, here.sitd->urb->dev)) {
212 u16 mask;
213
214 mask = le32_to_cpu (here.sitd
215 ->hw_uframe);
216 /* FIXME assumes no gap for IN! */
217 mask |= mask >> 8;
218 if (mask & uf_mask)
219 break;
220 }
221 type = Q_NEXT_TYPE (here.sitd->hw_next);
222 here = here.sitd->sitd_next;
223 continue;
224 // case Q_TYPE_FSTN:
225 default:
226 ehci_dbg (ehci,
227 "periodic frame %d bogus type %d\n",
228 frame, type);
229 }
230
231 /* collision or error */
232 return 0;
233 }
234 }
235
236 /* no collision */
237 return 1;
238}
239
240/*-------------------------------------------------------------------------*/
241
242static int enable_periodic (struct ehci_hcd *ehci)
243{
244 u32 cmd;
245 int status;
246
247 /* did clearing PSE did take effect yet?
248 * takes effect only at frame boundaries...
249 */
250 status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125);
251 if (status != 0) {
252 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
253 return status;
254 }
255
256 cmd = readl (&ehci->regs->command) | CMD_PSE;
257 writel (cmd, &ehci->regs->command);
258 /* posted write ... PSS happens later */
259 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
260
261 /* make sure ehci_work scans these */
262 ehci->next_uframe = readl (&ehci->regs->frame_index)
263 % (ehci->periodic_size << 3);
264 return 0;
265}
266
267static int disable_periodic (struct ehci_hcd *ehci)
268{
269 u32 cmd;
270 int status;
271
272 /* did setting PSE not take effect yet?
273 * takes effect only at frame boundaries...
274 */
275 status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
276 if (status != 0) {
277 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
278 return status;
279 }
280
281 cmd = readl (&ehci->regs->command) & ~CMD_PSE;
282 writel (cmd, &ehci->regs->command);
283 /* posted write ... */
284
285 ehci->next_uframe = -1;
286 return 0;
287}
288
289/*-------------------------------------------------------------------------*/
290
291/* periodic schedule slots have iso tds (normal or split) first, then a
292 * sparse tree for active interrupt transfers.
293 *
294 * this just links in a qh; caller guarantees uframe masks are set right.
295 * no FSTN support (yet; ehci 0.96+)
296 */
297static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
298{
299 unsigned i;
300 unsigned period = qh->period;
301
302 dev_dbg (&qh->dev->dev,
303 "link qh%d-%04x/%p start %d [%d/%d us]\n",
304 period, le32_to_cpup (&qh->hw_info2) & 0xffff,
305 qh, qh->start, qh->usecs, qh->c_usecs);
306
307 /* high bandwidth, or otherwise every microframe */
308 if (period == 0)
309 period = 1;
310
311 for (i = qh->start; i < ehci->periodic_size; i += period) {
312 union ehci_shadow *prev = &ehci->pshadow [i];
313 u32 *hw_p = &ehci->periodic [i];
314 union ehci_shadow here = *prev;
315 u32 type = 0;
316
317 /* skip the iso nodes at list head */
318 while (here.ptr) {
319 type = Q_NEXT_TYPE (*hw_p);
320 if (type == Q_TYPE_QH)
321 break;
322 prev = periodic_next_shadow (prev, type);
323 hw_p = &here.qh->hw_next;
324 here = *prev;
325 }
326
327 /* sorting each branch by period (slow-->fast)
328 * enables sharing interior tree nodes
329 */
330 while (here.ptr && qh != here.qh) {
331 if (qh->period > here.qh->period)
332 break;
333 prev = &here.qh->qh_next;
334 hw_p = &here.qh->hw_next;
335 here = *prev;
336 }
337 /* link in this qh, unless some earlier pass did that */
338 if (qh != here.qh) {
339 qh->qh_next = here;
340 if (here.qh)
341 qh->hw_next = *hw_p;
342 wmb ();
343 prev->qh = qh;
344 *hw_p = QH_NEXT (qh->qh_dma);
345 }
346 }
347 qh->qh_state = QH_STATE_LINKED;
348 qh_get (qh);
349
350 /* update per-qh bandwidth for usbfs */
351 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
352 ? ((qh->usecs + qh->c_usecs) / qh->period)
353 : (qh->usecs * 8);
354
355 /* maybe enable periodic schedule processing */
356 if (!ehci->periodic_sched++)
357 return enable_periodic (ehci);
358
359 return 0;
360}
361
362static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
363{
364 unsigned i;
365 unsigned period;
366
367 // FIXME:
368 // IF this isn't high speed
369 // and this qh is active in the current uframe
370 // (and overlay token SplitXstate is false?)
371 // THEN
372 // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */);
373
374 /* high bandwidth, or otherwise part of every microframe */
375 if ((period = qh->period) == 0)
376 period = 1;
377
378 for (i = qh->start; i < ehci->periodic_size; i += period)
379 periodic_unlink (ehci, i, qh);
380
381 /* update per-qh bandwidth for usbfs */
382 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
383 ? ((qh->usecs + qh->c_usecs) / qh->period)
384 : (qh->usecs * 8);
385
386 dev_dbg (&qh->dev->dev,
387 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
388 qh->period, le32_to_cpup (&qh->hw_info2) & 0xffff,
389 qh, qh->start, qh->usecs, qh->c_usecs);
390
391 /* qh->qh_next still "live" to HC */
392 qh->qh_state = QH_STATE_UNLINK;
393 qh->qh_next.ptr = NULL;
394 qh_put (qh);
395
396 /* maybe turn off periodic schedule */
397 ehci->periodic_sched--;
398 if (!ehci->periodic_sched)
399 (void) disable_periodic (ehci);
400}
401
402static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
403{
404 unsigned wait;
405
406 qh_unlink_periodic (ehci, qh);
407
408 /* simple/paranoid: always delay, expecting the HC needs to read
409 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
410 * expect khubd to clean up after any CSPLITs we won't issue.
411 * active high speed queues may need bigger delays...
412 */
413 if (list_empty (&qh->qtd_list)
414 || (__constant_cpu_to_le32 (0x0ff << 8)
415 & qh->hw_info2) != 0)
416 wait = 2;
417 else
418 wait = 55; /* worst case: 3 * 1024 */
419
420 udelay (wait);
421 qh->qh_state = QH_STATE_IDLE;
422 qh->hw_next = EHCI_LIST_END;
423 wmb ();
424}
425
426/*-------------------------------------------------------------------------*/
427
428static int check_period (
429 struct ehci_hcd *ehci,
430 unsigned frame,
431 unsigned uframe,
432 unsigned period,
433 unsigned usecs
434) {
435 int claimed;
436
437 /* complete split running into next frame?
438 * given FSTN support, we could sometimes check...
439 */
440 if (uframe >= 8)
441 return 0;
442
443 /*
444 * 80% periodic == 100 usec/uframe available
445 * convert "usecs we need" to "max already claimed"
446 */
447 usecs = 100 - usecs;
448
449 /* we "know" 2 and 4 uframe intervals were rejected; so
450 * for period 0, check _every_ microframe in the schedule.
451 */
452 if (unlikely (period == 0)) {
453 do {
454 for (uframe = 0; uframe < 7; uframe++) {
455 claimed = periodic_usecs (ehci, frame, uframe);
456 if (claimed > usecs)
457 return 0;
458 }
459 } while ((frame += 1) < ehci->periodic_size);
460
461 /* just check the specified uframe, at that period */
462 } else {
463 do {
464 claimed = periodic_usecs (ehci, frame, uframe);
465 if (claimed > usecs)
466 return 0;
467 } while ((frame += period) < ehci->periodic_size);
468 }
469
470 // success!
471 return 1;
472}
473
474static int check_intr_schedule (
475 struct ehci_hcd *ehci,
476 unsigned frame,
477 unsigned uframe,
478 const struct ehci_qh *qh,
479 __le32 *c_maskp
480)
481{
482 int retval = -ENOSPC;
483 u8 mask;
484
485 if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
486 goto done;
487
488 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
489 goto done;
490 if (!qh->c_usecs) {
491 retval = 0;
492 *c_maskp = 0;
493 goto done;
494 }
495
496 /* Make sure this tt's buffer is also available for CSPLITs.
497 * We pessimize a bit; probably the typical full speed case
498 * doesn't need the second CSPLIT.
499 *
500 * NOTE: both SPLIT and CSPLIT could be checked in just
501 * one smart pass...
502 */
503 mask = 0x03 << (uframe + qh->gap_uf);
504 *c_maskp = cpu_to_le32 (mask << 8);
505
506 mask |= 1 << uframe;
507 if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
508 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
509 qh->period, qh->c_usecs))
510 goto done;
511 if (!check_period (ehci, frame, uframe + qh->gap_uf,
512 qh->period, qh->c_usecs))
513 goto done;
514 retval = 0;
515 }
516done:
517 return retval;
518}
519
520/* "first fit" scheduling policy used the first time through,
521 * or when the previous schedule slot can't be re-used.
522 */
523static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
524{
525 int status;
526 unsigned uframe;
527 __le32 c_mask;
528 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
529
530 qh_refresh(ehci, qh);
531 qh->hw_next = EHCI_LIST_END;
532 frame = qh->start;
533
534 /* reuse the previous schedule slots, if we can */
535 if (frame < qh->period) {
536 uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
537 status = check_intr_schedule (ehci, frame, --uframe,
538 qh, &c_mask);
539 } else {
540 uframe = 0;
541 c_mask = 0;
542 status = -ENOSPC;
543 }
544
545 /* else scan the schedule to find a group of slots such that all
546 * uframes have enough periodic bandwidth available.
547 */
548 if (status) {
549 /* "normal" case, uframing flexible except with splits */
550 if (qh->period) {
551 frame = qh->period - 1;
552 do {
553 for (uframe = 0; uframe < 8; uframe++) {
554 status = check_intr_schedule (ehci,
555 frame, uframe, qh,
556 &c_mask);
557 if (status == 0)
558 break;
559 }
560 } while (status && frame--);
561
562 /* qh->period == 0 means every uframe */
563 } else {
564 frame = 0;
565 status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
566 }
567 if (status)
568 goto done;
569 qh->start = frame;
570
571 /* reset S-frame and (maybe) C-frame masks */
572 qh->hw_info2 &= __constant_cpu_to_le32 (~0xffff);
573 qh->hw_info2 |= qh->period
574 ? cpu_to_le32 (1 << uframe)
575 : __constant_cpu_to_le32 (0xff);
576 qh->hw_info2 |= c_mask;
577 } else
578 ehci_dbg (ehci, "reused qh %p schedule\n", qh);
579
580 /* stuff into the periodic schedule */
581 status = qh_link_periodic (ehci, qh);
582done:
583 return status;
584}
585
586static int intr_submit (
587 struct ehci_hcd *ehci,
588 struct usb_host_endpoint *ep,
589 struct urb *urb,
590 struct list_head *qtd_list,
591 int mem_flags
592) {
593 unsigned epnum;
594 unsigned long flags;
595 struct ehci_qh *qh;
596 int status = 0;
597 struct list_head empty;
598
599 /* get endpoint and transfer/schedule data */
600 epnum = ep->desc.bEndpointAddress;
601
602 spin_lock_irqsave (&ehci->lock, flags);
603
604 /* get qh and force any scheduling errors */
605 INIT_LIST_HEAD (&empty);
606 qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv);
607 if (qh == NULL) {
608 status = -ENOMEM;
609 goto done;
610 }
611 if (qh->qh_state == QH_STATE_IDLE) {
612 if ((status = qh_schedule (ehci, qh)) != 0)
613 goto done;
614 }
615
616 /* then queue the urb's tds to the qh */
617 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
618 BUG_ON (qh == NULL);
619
620 /* ... update usbfs periodic stats */
621 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
622
623done:
624 spin_unlock_irqrestore (&ehci->lock, flags);
625 if (status)
626 qtd_list_free (ehci, urb, qtd_list);
627
628 return status;
629}
630
631/*-------------------------------------------------------------------------*/
632
633/* ehci_iso_stream ops work with both ITD and SITD */
634
635static struct ehci_iso_stream *
636iso_stream_alloc (int mem_flags)
637{
638 struct ehci_iso_stream *stream;
639
640 stream = kmalloc(sizeof *stream, mem_flags);
641 if (likely (stream != NULL)) {
642 memset (stream, 0, sizeof(*stream));
643 INIT_LIST_HEAD(&stream->td_list);
644 INIT_LIST_HEAD(&stream->free_list);
645 stream->next_uframe = -1;
646 stream->refcount = 1;
647 }
648 return stream;
649}
650
651static void
652iso_stream_init (
653 struct ehci_hcd *ehci,
654 struct ehci_iso_stream *stream,
655 struct usb_device *dev,
656 int pipe,
657 unsigned interval
658)
659{
660 static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
661
662 u32 buf1;
663 unsigned epnum, maxp;
664 int is_input;
665 long bandwidth;
666
667 /*
668 * this might be a "high bandwidth" highspeed endpoint,
669 * as encoded in the ep descriptor's wMaxPacket field
670 */
671 epnum = usb_pipeendpoint (pipe);
672 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
673 maxp = usb_maxpacket(dev, pipe, !is_input);
674 if (is_input) {
675 buf1 = (1 << 11);
676 } else {
677 buf1 = 0;
678 }
679
680 /* knows about ITD vs SITD */
681 if (dev->speed == USB_SPEED_HIGH) {
682 unsigned multi = hb_mult(maxp);
683
684 stream->highspeed = 1;
685
686 maxp = max_packet(maxp);
687 buf1 |= maxp;
688 maxp *= multi;
689
690 stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum);
691 stream->buf1 = cpu_to_le32 (buf1);
692 stream->buf2 = cpu_to_le32 (multi);
693
694 /* usbfs wants to report the average usecs per frame tied up
695 * when transfers on this endpoint are scheduled ...
696 */
697 stream->usecs = HS_USECS_ISO (maxp);
698 bandwidth = stream->usecs * 8;
699 bandwidth /= 1 << (interval - 1);
700
701 } else {
702 u32 addr;
703
704 addr = dev->ttport << 24;
705 if (!ehci_is_TDI(ehci)
706 || (dev->tt->hub !=
707 ehci_to_hcd(ehci)->self.root_hub))
708 addr |= dev->tt->hub->devnum << 16;
709 addr |= epnum << 8;
710 addr |= dev->devnum;
711 stream->usecs = HS_USECS_ISO (maxp);
712 if (is_input) {
713 u32 tmp;
714
715 addr |= 1 << 31;
716 stream->c_usecs = stream->usecs;
717 stream->usecs = HS_USECS_ISO (1);
718 stream->raw_mask = 1;
719
720 /* pessimistic c-mask */
721 tmp = usb_calc_bus_time (USB_SPEED_FULL, 1, 0, maxp)
722 / (125 * 1000);
723 stream->raw_mask |= 3 << (tmp + 9);
724 } else
725 stream->raw_mask = smask_out [maxp / 188];
726 bandwidth = stream->usecs + stream->c_usecs;
727 bandwidth /= 1 << (interval + 2);
728
729 /* stream->splits gets created from raw_mask later */
730 stream->address = cpu_to_le32 (addr);
731 }
732 stream->bandwidth = bandwidth;
733
734 stream->udev = dev;
735
736 stream->bEndpointAddress = is_input | epnum;
737 stream->interval = interval;
738 stream->maxp = maxp;
739}
740
741static void
742iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
743{
744 stream->refcount--;
745
746 /* free whenever just a dev->ep reference remains.
747 * not like a QH -- no persistent state (toggle, halt)
748 */
749 if (stream->refcount == 1) {
750 int is_in;
751
752 // BUG_ON (!list_empty(&stream->td_list));
753
754 while (!list_empty (&stream->free_list)) {
755 struct list_head *entry;
756
757 entry = stream->free_list.next;
758 list_del (entry);
759
760 /* knows about ITD vs SITD */
761 if (stream->highspeed) {
762 struct ehci_itd *itd;
763
764 itd = list_entry (entry, struct ehci_itd,
765 itd_list);
766 dma_pool_free (ehci->itd_pool, itd,
767 itd->itd_dma);
768 } else {
769 struct ehci_sitd *sitd;
770
771 sitd = list_entry (entry, struct ehci_sitd,
772 sitd_list);
773 dma_pool_free (ehci->sitd_pool, sitd,
774 sitd->sitd_dma);
775 }
776 }
777
778 is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
779 stream->bEndpointAddress &= 0x0f;
780 stream->ep->hcpriv = NULL;
781
782 if (stream->rescheduled) {
783 ehci_info (ehci, "ep%d%s-iso rescheduled "
784 "%lu times in %lu seconds\n",
785 stream->bEndpointAddress, is_in ? "in" : "out",
786 stream->rescheduled,
787 ((jiffies - stream->start)/HZ)
788 );
789 }
790
791 kfree(stream);
792 }
793}
794
795static inline struct ehci_iso_stream *
796iso_stream_get (struct ehci_iso_stream *stream)
797{
798 if (likely (stream != NULL))
799 stream->refcount++;
800 return stream;
801}
802
803static struct ehci_iso_stream *
804iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
805{
806 unsigned epnum;
807 struct ehci_iso_stream *stream;
808 struct usb_host_endpoint *ep;
809 unsigned long flags;
810
811 epnum = usb_pipeendpoint (urb->pipe);
812 if (usb_pipein(urb->pipe))
813 ep = urb->dev->ep_in[epnum];
814 else
815 ep = urb->dev->ep_out[epnum];
816
817 spin_lock_irqsave (&ehci->lock, flags);
818 stream = ep->hcpriv;
819
820 if (unlikely (stream == NULL)) {
821 stream = iso_stream_alloc(GFP_ATOMIC);
822 if (likely (stream != NULL)) {
823 /* dev->ep owns the initial refcount */
824 ep->hcpriv = stream;
825 stream->ep = ep;
826 iso_stream_init(ehci, stream, urb->dev, urb->pipe,
827 urb->interval);
828 }
829
830 /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
831 } else if (unlikely (stream->hw_info1 != 0)) {
832 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
833 urb->dev->devpath, epnum,
834 usb_pipein(urb->pipe) ? "in" : "out");
835 stream = NULL;
836 }
837
838 /* caller guarantees an eventual matching iso_stream_put */
839 stream = iso_stream_get (stream);
840
841 spin_unlock_irqrestore (&ehci->lock, flags);
842 return stream;
843}
844
845/*-------------------------------------------------------------------------*/
846
847/* ehci_iso_sched ops can be ITD-only or SITD-only */
848
849static struct ehci_iso_sched *
850iso_sched_alloc (unsigned packets, int mem_flags)
851{
852 struct ehci_iso_sched *iso_sched;
853 int size = sizeof *iso_sched;
854
855 size += packets * sizeof (struct ehci_iso_packet);
856 iso_sched = kmalloc (size, mem_flags);
857 if (likely (iso_sched != NULL)) {
858 memset(iso_sched, 0, size);
859 INIT_LIST_HEAD (&iso_sched->td_list);
860 }
861 return iso_sched;
862}
863
864static inline void
865itd_sched_init (
866 struct ehci_iso_sched *iso_sched,
867 struct ehci_iso_stream *stream,
868 struct urb *urb
869)
870{
871 unsigned i;
872 dma_addr_t dma = urb->transfer_dma;
873
874 /* how many uframes are needed for these transfers */
875 iso_sched->span = urb->number_of_packets * stream->interval;
876
877 /* figure out per-uframe itd fields that we'll need later
878 * when we fit new itds into the schedule.
879 */
880 for (i = 0; i < urb->number_of_packets; i++) {
881 struct ehci_iso_packet *uframe = &iso_sched->packet [i];
882 unsigned length;
883 dma_addr_t buf;
884 u32 trans;
885
886 length = urb->iso_frame_desc [i].length;
887 buf = dma + urb->iso_frame_desc [i].offset;
888
889 trans = EHCI_ISOC_ACTIVE;
890 trans |= buf & 0x0fff;
891 if (unlikely (((i + 1) == urb->number_of_packets))
892 && !(urb->transfer_flags & URB_NO_INTERRUPT))
893 trans |= EHCI_ITD_IOC;
894 trans |= length << 16;
895 uframe->transaction = cpu_to_le32 (trans);
896
897 /* might need to cross a buffer page within a td */
898 uframe->bufp = (buf & ~(u64)0x0fff);
899 buf += length;
900 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
901 uframe->cross = 1;
902 }
903}
904
905static void
906iso_sched_free (
907 struct ehci_iso_stream *stream,
908 struct ehci_iso_sched *iso_sched
909)
910{
911 if (!iso_sched)
912 return;
913 // caller must hold ehci->lock!
914 list_splice (&iso_sched->td_list, &stream->free_list);
915 kfree (iso_sched);
916}
917
918static int
919itd_urb_transaction (
920 struct ehci_iso_stream *stream,
921 struct ehci_hcd *ehci,
922 struct urb *urb,
923 int mem_flags
924)
925{
926 struct ehci_itd *itd;
927 dma_addr_t itd_dma;
928 int i;
929 unsigned num_itds;
930 struct ehci_iso_sched *sched;
931 unsigned long flags;
932
933 sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
934 if (unlikely (sched == NULL))
935 return -ENOMEM;
936
937 itd_sched_init (sched, stream, urb);
938
939 if (urb->interval < 8)
940 num_itds = 1 + (sched->span + 7) / 8;
941 else
942 num_itds = urb->number_of_packets;
943
944 /* allocate/init ITDs */
945 spin_lock_irqsave (&ehci->lock, flags);
946 for (i = 0; i < num_itds; i++) {
947
948 /* free_list.next might be cache-hot ... but maybe
949 * the HC caches it too. avoid that issue for now.
950 */
951
952 /* prefer previously-allocated itds */
953 if (likely (!list_empty(&stream->free_list))) {
954 itd = list_entry (stream->free_list.prev,
955 struct ehci_itd, itd_list);
956 list_del (&itd->itd_list);
957 itd_dma = itd->itd_dma;
958 } else
959 itd = NULL;
960
961 if (!itd) {
962 spin_unlock_irqrestore (&ehci->lock, flags);
963 itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
964 &itd_dma);
965 spin_lock_irqsave (&ehci->lock, flags);
966 }
967
968 if (unlikely (NULL == itd)) {
969 iso_sched_free (stream, sched);
970 spin_unlock_irqrestore (&ehci->lock, flags);
971 return -ENOMEM;
972 }
973 memset (itd, 0, sizeof *itd);
974 itd->itd_dma = itd_dma;
975 list_add (&itd->itd_list, &sched->td_list);
976 }
977 spin_unlock_irqrestore (&ehci->lock, flags);
978
979 /* temporarily store schedule info in hcpriv */
980 urb->hcpriv = sched;
981 urb->error_count = 0;
982 return 0;
983}
984
985/*-------------------------------------------------------------------------*/
986
987static inline int
988itd_slot_ok (
989 struct ehci_hcd *ehci,
990 u32 mod,
991 u32 uframe,
992 u8 usecs,
993 u32 period
994)
995{
996 uframe %= period;
997 do {
998 /* can't commit more than 80% periodic == 100 usec */
999 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
1000 > (100 - usecs))
1001 return 0;
1002
1003 /* we know urb->interval is 2^N uframes */
1004 uframe += period;
1005 } while (uframe < mod);
1006 return 1;
1007}
1008
1009static inline int
1010sitd_slot_ok (
1011 struct ehci_hcd *ehci,
1012 u32 mod,
1013 struct ehci_iso_stream *stream,
1014 u32 uframe,
1015 struct ehci_iso_sched *sched,
1016 u32 period_uframes
1017)
1018{
1019 u32 mask, tmp;
1020 u32 frame, uf;
1021
1022 mask = stream->raw_mask << (uframe & 7);
1023
1024 /* for IN, don't wrap CSPLIT into the next frame */
1025 if (mask & ~0xffff)
1026 return 0;
1027
1028 /* this multi-pass logic is simple, but performance may
1029 * suffer when the schedule data isn't cached.
1030 */
1031
1032 /* check bandwidth */
1033 uframe %= period_uframes;
1034 do {
1035 u32 max_used;
1036
1037 frame = uframe >> 3;
1038 uf = uframe & 7;
1039
1040 /* tt must be idle for start(s), any gap, and csplit.
1041 * assume scheduling slop leaves 10+% for control/bulk.
1042 */
1043 if (!tt_no_collision (ehci, period_uframes << 3,
1044 stream->udev, frame, mask))
1045 return 0;
1046
1047 /* check starts (OUT uses more than one) */
1048 max_used = 100 - stream->usecs;
1049 for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
1050 if (periodic_usecs (ehci, frame, uf) > max_used)
1051 return 0;
1052 }
1053
1054 /* for IN, check CSPLIT */
1055 if (stream->c_usecs) {
1056 max_used = 100 - stream->c_usecs;
1057 do {
1058 tmp = 1 << uf;
1059 tmp <<= 8;
1060 if ((stream->raw_mask & tmp) == 0)
1061 continue;
1062 if (periodic_usecs (ehci, frame, uf)
1063 > max_used)
1064 return 0;
1065 } while (++uf < 8);
1066 }
1067
1068 /* we know urb->interval is 2^N uframes */
1069 uframe += period_uframes;
1070 } while (uframe < mod);
1071
1072 stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7));
1073 return 1;
1074}
1075
1076/*
1077 * This scheduler plans almost as far into the future as it has actual
1078 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1079 * "as small as possible" to be cache-friendlier.) That limits the size
1080 * transfers you can stream reliably; avoid more than 64 msec per urb.
1081 * Also avoid queue depths of less than ehci's worst irq latency (affected
1082 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1083 * and other factors); or more than about 230 msec total (for portability,
1084 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1085 */
1086
1087#define SCHEDULE_SLOP 10 /* frames */
1088
1089static int
1090iso_stream_schedule (
1091 struct ehci_hcd *ehci,
1092 struct urb *urb,
1093 struct ehci_iso_stream *stream
1094)
1095{
1096 u32 now, start, max, period;
1097 int status;
1098 unsigned mod = ehci->periodic_size << 3;
1099 struct ehci_iso_sched *sched = urb->hcpriv;
1100
1101 if (sched->span > (mod - 8 * SCHEDULE_SLOP)) {
1102 ehci_dbg (ehci, "iso request %p too long\n", urb);
1103 status = -EFBIG;
1104 goto fail;
1105 }
1106
1107 if ((stream->depth + sched->span) > mod) {
1108 ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n",
1109 urb, stream->depth, sched->span, mod);
1110 status = -EFBIG;
1111 goto fail;
1112 }
1113
1114 now = readl (&ehci->regs->frame_index) % mod;
1115
1116 /* when's the last uframe this urb could start? */
1117 max = now + mod;
1118
1119 /* typical case: reuse current schedule. stream is still active,
1120 * and no gaps from host falling behind (irq delays etc)
1121 */
1122 if (likely (!list_empty (&stream->td_list))) {
1123 start = stream->next_uframe;
1124 if (start < now)
1125 start += mod;
1126 if (likely ((start + sched->span) < max))
1127 goto ready;
1128 /* else fell behind; someday, try to reschedule */
1129 status = -EL2NSYNC;
1130 goto fail;
1131 }
1132
1133 /* need to schedule; when's the next (u)frame we could start?
1134 * this is bigger than ehci->i_thresh allows; scheduling itself
1135 * isn't free, the slop should handle reasonably slow cpus. it
1136 * can also help high bandwidth if the dma and irq loads don't
1137 * jump until after the queue is primed.
1138 */
1139 start = SCHEDULE_SLOP * 8 + (now & ~0x07);
1140 start %= mod;
1141 stream->next_uframe = start;
1142
1143 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
1144
1145 period = urb->interval;
1146 if (!stream->highspeed)
1147 period <<= 3;
1148
1149 /* find a uframe slot with enough bandwidth */
1150 for (; start < (stream->next_uframe + period); start++) {
1151 int enough_space;
1152
1153 /* check schedule: enough space? */
1154 if (stream->highspeed)
1155 enough_space = itd_slot_ok (ehci, mod, start,
1156 stream->usecs, period);
1157 else {
1158 if ((start % 8) >= 6)
1159 continue;
1160 enough_space = sitd_slot_ok (ehci, mod, stream,
1161 start, sched, period);
1162 }
1163
1164 /* schedule it here if there's enough bandwidth */
1165 if (enough_space) {
1166 stream->next_uframe = start % mod;
1167 goto ready;
1168 }
1169 }
1170
1171 /* no room in the schedule */
1172 ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
1173 list_empty (&stream->td_list) ? "" : "re",
1174 urb, now, max);
1175 status = -ENOSPC;
1176
1177fail:
1178 iso_sched_free (stream, sched);
1179 urb->hcpriv = NULL;
1180 return status;
1181
1182ready:
1183 /* report high speed start in uframes; full speed, in frames */
1184 urb->start_frame = stream->next_uframe;
1185 if (!stream->highspeed)
1186 urb->start_frame >>= 3;
1187 return 0;
1188}
1189
1190/*-------------------------------------------------------------------------*/
1191
1192static inline void
1193itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd)
1194{
1195 int i;
1196
1197 itd->hw_next = EHCI_LIST_END;
1198 itd->hw_bufp [0] = stream->buf0;
1199 itd->hw_bufp [1] = stream->buf1;
1200 itd->hw_bufp [2] = stream->buf2;
1201
1202 for (i = 0; i < 8; i++)
1203 itd->index[i] = -1;
1204
1205 /* All other fields are filled when scheduling */
1206}
1207
1208static inline void
1209itd_patch (
1210 struct ehci_itd *itd,
1211 struct ehci_iso_sched *iso_sched,
1212 unsigned index,
1213 u16 uframe,
1214 int first
1215)
1216{
1217 struct ehci_iso_packet *uf = &iso_sched->packet [index];
1218 unsigned pg = itd->pg;
1219
1220 // BUG_ON (pg == 6 && uf->cross);
1221
1222 uframe &= 0x07;
1223 itd->index [uframe] = index;
1224
1225 itd->hw_transaction [uframe] = uf->transaction;
1226 itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12);
1227 itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0);
1228 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32));
1229
1230 /* iso_frame_desc[].offset must be strictly increasing */
1231 if (unlikely (!first && uf->cross)) {
1232 u64 bufp = uf->bufp + 4096;
1233 itd->pg = ++pg;
1234 itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0);
1235 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32));
1236 }
1237}
1238
1239static inline void
1240itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1241{
1242 /* always prepend ITD/SITD ... only QH tree is order-sensitive */
1243 itd->itd_next = ehci->pshadow [frame];
1244 itd->hw_next = ehci->periodic [frame];
1245 ehci->pshadow [frame].itd = itd;
1246 itd->frame = frame;
1247 wmb ();
1248 ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
1249}
1250
1251/* fit urb's itds into the selected schedule slot; activate as needed */
1252static int
1253itd_link_urb (
1254 struct ehci_hcd *ehci,
1255 struct urb *urb,
1256 unsigned mod,
1257 struct ehci_iso_stream *stream
1258)
1259{
1260 int packet, first = 1;
1261 unsigned next_uframe, uframe, frame;
1262 struct ehci_iso_sched *iso_sched = urb->hcpriv;
1263 struct ehci_itd *itd;
1264
1265 next_uframe = stream->next_uframe % mod;
1266
1267 if (unlikely (list_empty(&stream->td_list))) {
1268 ehci_to_hcd(ehci)->self.bandwidth_allocated
1269 += stream->bandwidth;
1270 ehci_vdbg (ehci,
1271 "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1272 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1273 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1274 urb->interval,
1275 next_uframe >> 3, next_uframe & 0x7);
1276 stream->start = jiffies;
1277 }
1278 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1279
1280 /* fill iTDs uframe by uframe */
1281 for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
1282 if (itd == NULL) {
1283 /* ASSERT: we have all necessary itds */
1284 // BUG_ON (list_empty (&iso_sched->td_list));
1285
1286 /* ASSERT: no itds for this endpoint in this uframe */
1287
1288 itd = list_entry (iso_sched->td_list.next,
1289 struct ehci_itd, itd_list);
1290 list_move_tail (&itd->itd_list, &stream->td_list);
1291 itd->stream = iso_stream_get (stream);
1292 itd->urb = usb_get_urb (urb);
1293 first = 1;
1294 itd_init (stream, itd);
1295 }
1296
1297 uframe = next_uframe & 0x07;
1298 frame = next_uframe >> 3;
1299
1300 itd->usecs [uframe] = stream->usecs;
1301 itd_patch (itd, iso_sched, packet, uframe, first);
1302 first = 0;
1303
1304 next_uframe += stream->interval;
1305 stream->depth += stream->interval;
1306 next_uframe %= mod;
1307 packet++;
1308
1309 /* link completed itds into the schedule */
1310 if (((next_uframe >> 3) != frame)
1311 || packet == urb->number_of_packets) {
1312 itd_link (ehci, frame % ehci->periodic_size, itd);
1313 itd = NULL;
1314 }
1315 }
1316 stream->next_uframe = next_uframe;
1317
1318 /* don't need that schedule data any more */
1319 iso_sched_free (stream, iso_sched);
1320 urb->hcpriv = NULL;
1321
1322 timer_action (ehci, TIMER_IO_WATCHDOG);
1323 if (unlikely (!ehci->periodic_sched++))
1324 return enable_periodic (ehci);
1325 return 0;
1326}
1327
1328#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1329
1330static unsigned
1331itd_complete (
1332 struct ehci_hcd *ehci,
1333 struct ehci_itd *itd,
1334 struct pt_regs *regs
1335) {
1336 struct urb *urb = itd->urb;
1337 struct usb_iso_packet_descriptor *desc;
1338 u32 t;
1339 unsigned uframe;
1340 int urb_index = -1;
1341 struct ehci_iso_stream *stream = itd->stream;
1342 struct usb_device *dev;
1343
1344 /* for each uframe with a packet */
1345 for (uframe = 0; uframe < 8; uframe++) {
1346 if (likely (itd->index[uframe] == -1))
1347 continue;
1348 urb_index = itd->index[uframe];
1349 desc = &urb->iso_frame_desc [urb_index];
1350
1351 t = le32_to_cpup (&itd->hw_transaction [uframe]);
1352 itd->hw_transaction [uframe] = 0;
1353 stream->depth -= stream->interval;
1354
1355 /* report transfer status */
1356 if (unlikely (t & ISO_ERRS)) {
1357 urb->error_count++;
1358 if (t & EHCI_ISOC_BUF_ERR)
1359 desc->status = usb_pipein (urb->pipe)
1360 ? -ENOSR /* hc couldn't read */
1361 : -ECOMM; /* hc couldn't write */
1362 else if (t & EHCI_ISOC_BABBLE)
1363 desc->status = -EOVERFLOW;
1364 else /* (t & EHCI_ISOC_XACTERR) */
1365 desc->status = -EPROTO;
1366
1367 /* HC need not update length with this error */
1368 if (!(t & EHCI_ISOC_BABBLE))
1369 desc->actual_length = EHCI_ITD_LENGTH (t);
1370 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1371 desc->status = 0;
1372 desc->actual_length = EHCI_ITD_LENGTH (t);
1373 }
1374 }
1375
1376 usb_put_urb (urb);
1377 itd->urb = NULL;
1378 itd->stream = NULL;
1379 list_move (&itd->itd_list, &stream->free_list);
1380 iso_stream_put (ehci, stream);
1381
1382 /* handle completion now? */
1383 if (likely ((urb_index + 1) != urb->number_of_packets))
1384 return 0;
1385
1386 /* ASSERT: it's really the last itd for this urb
1387 list_for_each_entry (itd, &stream->td_list, itd_list)
1388 BUG_ON (itd->urb == urb);
1389 */
1390
1391 /* give urb back to the driver ... can be out-of-order */
1392 dev = usb_get_dev (urb->dev);
1393 ehci_urb_done (ehci, urb, regs);
1394 urb = NULL;
1395
1396 /* defer stopping schedule; completion can submit */
1397 ehci->periodic_sched--;
1398 if (unlikely (!ehci->periodic_sched))
1399 (void) disable_periodic (ehci);
1400 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1401
1402 if (unlikely (list_empty (&stream->td_list))) {
1403 ehci_to_hcd(ehci)->self.bandwidth_allocated
1404 -= stream->bandwidth;
1405 ehci_vdbg (ehci,
1406 "deschedule devp %s ep%d%s-iso\n",
1407 dev->devpath, stream->bEndpointAddress & 0x0f,
1408 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1409 }
1410 iso_stream_put (ehci, stream);
1411 usb_put_dev (dev);
1412
1413 return 1;
1414}
1415
1416/*-------------------------------------------------------------------------*/
1417
1418static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
1419{
1420 int status = -EINVAL;
1421 unsigned long flags;
1422 struct ehci_iso_stream *stream;
1423
1424 /* Get iso_stream head */
1425 stream = iso_stream_find (ehci, urb);
1426 if (unlikely (stream == NULL)) {
1427 ehci_dbg (ehci, "can't get iso stream\n");
1428 return -ENOMEM;
1429 }
1430 if (unlikely (urb->interval != stream->interval)) {
1431 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1432 stream->interval, urb->interval);
1433 goto done;
1434 }
1435
1436#ifdef EHCI_URB_TRACE
1437 ehci_dbg (ehci,
1438 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1439 __FUNCTION__, urb->dev->devpath, urb,
1440 usb_pipeendpoint (urb->pipe),
1441 usb_pipein (urb->pipe) ? "in" : "out",
1442 urb->transfer_buffer_length,
1443 urb->number_of_packets, urb->interval,
1444 stream);
1445#endif
1446
1447 /* allocate ITDs w/o locking anything */
1448 status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1449 if (unlikely (status < 0)) {
1450 ehci_dbg (ehci, "can't init itds\n");
1451 goto done;
1452 }
1453
1454 /* schedule ... need to lock */
1455 spin_lock_irqsave (&ehci->lock, flags);
1456 status = iso_stream_schedule (ehci, urb, stream);
1457 if (likely (status == 0))
1458 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1459 spin_unlock_irqrestore (&ehci->lock, flags);
1460
1461done:
1462 if (unlikely (status < 0))
1463 iso_stream_put (ehci, stream);
1464 return status;
1465}
1466
1467#ifdef CONFIG_USB_EHCI_SPLIT_ISO
1468
1469/*-------------------------------------------------------------------------*/
1470
1471/*
1472 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1473 * TTs in USB 2.0 hubs. These need microframe scheduling.
1474 */
1475
1476static inline void
1477sitd_sched_init (
1478 struct ehci_iso_sched *iso_sched,
1479 struct ehci_iso_stream *stream,
1480 struct urb *urb
1481)
1482{
1483 unsigned i;
1484 dma_addr_t dma = urb->transfer_dma;
1485
1486 /* how many frames are needed for these transfers */
1487 iso_sched->span = urb->number_of_packets * stream->interval;
1488
1489 /* figure out per-frame sitd fields that we'll need later
1490 * when we fit new sitds into the schedule.
1491 */
1492 for (i = 0; i < urb->number_of_packets; i++) {
1493 struct ehci_iso_packet *packet = &iso_sched->packet [i];
1494 unsigned length;
1495 dma_addr_t buf;
1496 u32 trans;
1497
1498 length = urb->iso_frame_desc [i].length & 0x03ff;
1499 buf = dma + urb->iso_frame_desc [i].offset;
1500
1501 trans = SITD_STS_ACTIVE;
1502 if (((i + 1) == urb->number_of_packets)
1503 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1504 trans |= SITD_IOC;
1505 trans |= length << 16;
1506 packet->transaction = cpu_to_le32 (trans);
1507
1508 /* might need to cross a buffer page within a td */
1509 packet->bufp = buf;
1510 packet->buf1 = (buf + length) & ~0x0fff;
1511 if (packet->buf1 != (buf & ~(u64)0x0fff))
1512 packet->cross = 1;
1513
1514 /* OUT uses multiple start-splits */
1515 if (stream->bEndpointAddress & USB_DIR_IN)
1516 continue;
1517 length = (length + 187) / 188;
1518 if (length > 1) /* BEGIN vs ALL */
1519 length |= 1 << 3;
1520 packet->buf1 |= length;
1521 }
1522}
1523
1524static int
1525sitd_urb_transaction (
1526 struct ehci_iso_stream *stream,
1527 struct ehci_hcd *ehci,
1528 struct urb *urb,
1529 int mem_flags
1530)
1531{
1532 struct ehci_sitd *sitd;
1533 dma_addr_t sitd_dma;
1534 int i;
1535 struct ehci_iso_sched *iso_sched;
1536 unsigned long flags;
1537
1538 iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1539 if (iso_sched == NULL)
1540 return -ENOMEM;
1541
1542 sitd_sched_init (iso_sched, stream, urb);
1543
1544 /* allocate/init sITDs */
1545 spin_lock_irqsave (&ehci->lock, flags);
1546 for (i = 0; i < urb->number_of_packets; i++) {
1547
1548 /* NOTE: for now, we don't try to handle wraparound cases
1549 * for IN (using sitd->hw_backpointer, like a FSTN), which
1550 * means we never need two sitds for full speed packets.
1551 */
1552
1553 /* free_list.next might be cache-hot ... but maybe
1554 * the HC caches it too. avoid that issue for now.
1555 */
1556
1557 /* prefer previously-allocated sitds */
1558 if (!list_empty(&stream->free_list)) {
1559 sitd = list_entry (stream->free_list.prev,
1560 struct ehci_sitd, sitd_list);
1561 list_del (&sitd->sitd_list);
1562 sitd_dma = sitd->sitd_dma;
1563 } else
1564 sitd = NULL;
1565
1566 if (!sitd) {
1567 spin_unlock_irqrestore (&ehci->lock, flags);
1568 sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
1569 &sitd_dma);
1570 spin_lock_irqsave (&ehci->lock, flags);
1571 }
1572
1573 if (!sitd) {
1574 iso_sched_free (stream, iso_sched);
1575 spin_unlock_irqrestore (&ehci->lock, flags);
1576 return -ENOMEM;
1577 }
1578 memset (sitd, 0, sizeof *sitd);
1579 sitd->sitd_dma = sitd_dma;
1580 list_add (&sitd->sitd_list, &iso_sched->td_list);
1581 }
1582
1583 /* temporarily store schedule info in hcpriv */
1584 urb->hcpriv = iso_sched;
1585 urb->error_count = 0;
1586
1587 spin_unlock_irqrestore (&ehci->lock, flags);
1588 return 0;
1589}
1590
1591/*-------------------------------------------------------------------------*/
1592
1593static inline void
1594sitd_patch (
1595 struct ehci_iso_stream *stream,
1596 struct ehci_sitd *sitd,
1597 struct ehci_iso_sched *iso_sched,
1598 unsigned index
1599)
1600{
1601 struct ehci_iso_packet *uf = &iso_sched->packet [index];
1602 u64 bufp = uf->bufp;
1603
1604 sitd->hw_next = EHCI_LIST_END;
1605 sitd->hw_fullspeed_ep = stream->address;
1606 sitd->hw_uframe = stream->splits;
1607 sitd->hw_results = uf->transaction;
1608 sitd->hw_backpointer = EHCI_LIST_END;
1609
1610 bufp = uf->bufp;
1611 sitd->hw_buf [0] = cpu_to_le32 (bufp);
1612 sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32);
1613
1614 sitd->hw_buf [1] = cpu_to_le32 (uf->buf1);
1615 if (uf->cross)
1616 bufp += 4096;
1617 sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32);
1618 sitd->index = index;
1619}
1620
1621static inline void
1622sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
1623{
1624 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
1625 sitd->sitd_next = ehci->pshadow [frame];
1626 sitd->hw_next = ehci->periodic [frame];
1627 ehci->pshadow [frame].sitd = sitd;
1628 sitd->frame = frame;
1629 wmb ();
1630 ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD;
1631}
1632
1633/* fit urb's sitds into the selected schedule slot; activate as needed */
1634static int
1635sitd_link_urb (
1636 struct ehci_hcd *ehci,
1637 struct urb *urb,
1638 unsigned mod,
1639 struct ehci_iso_stream *stream
1640)
1641{
1642 int packet;
1643 unsigned next_uframe;
1644 struct ehci_iso_sched *sched = urb->hcpriv;
1645 struct ehci_sitd *sitd;
1646
1647 next_uframe = stream->next_uframe;
1648
1649 if (list_empty(&stream->td_list)) {
1650 /* usbfs ignores TT bandwidth */
1651 ehci_to_hcd(ehci)->self.bandwidth_allocated
1652 += stream->bandwidth;
1653 ehci_vdbg (ehci,
1654 "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
1655 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1656 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1657 (next_uframe >> 3) % ehci->periodic_size,
1658 stream->interval, le32_to_cpu (stream->splits));
1659 stream->start = jiffies;
1660 }
1661 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1662
1663 /* fill sITDs frame by frame */
1664 for (packet = 0, sitd = NULL;
1665 packet < urb->number_of_packets;
1666 packet++) {
1667
1668 /* ASSERT: we have all necessary sitds */
1669 BUG_ON (list_empty (&sched->td_list));
1670
1671 /* ASSERT: no itds for this endpoint in this frame */
1672
1673 sitd = list_entry (sched->td_list.next,
1674 struct ehci_sitd, sitd_list);
1675 list_move_tail (&sitd->sitd_list, &stream->td_list);
1676 sitd->stream = iso_stream_get (stream);
1677 sitd->urb = usb_get_urb (urb);
1678
1679 sitd_patch (stream, sitd, sched, packet);
1680 sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
1681 sitd);
1682
1683 next_uframe += stream->interval << 3;
1684 stream->depth += stream->interval << 3;
1685 }
1686 stream->next_uframe = next_uframe % mod;
1687
1688 /* don't need that schedule data any more */
1689 iso_sched_free (stream, sched);
1690 urb->hcpriv = NULL;
1691
1692 timer_action (ehci, TIMER_IO_WATCHDOG);
1693 if (!ehci->periodic_sched++)
1694 return enable_periodic (ehci);
1695 return 0;
1696}
1697
1698/*-------------------------------------------------------------------------*/
1699
1700#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
1701 | SITD_STS_XACT | SITD_STS_MMF)
1702
1703static unsigned
1704sitd_complete (
1705 struct ehci_hcd *ehci,
1706 struct ehci_sitd *sitd,
1707 struct pt_regs *regs
1708) {
1709 struct urb *urb = sitd->urb;
1710 struct usb_iso_packet_descriptor *desc;
1711 u32 t;
1712 int urb_index = -1;
1713 struct ehci_iso_stream *stream = sitd->stream;
1714 struct usb_device *dev;
1715
1716 urb_index = sitd->index;
1717 desc = &urb->iso_frame_desc [urb_index];
1718 t = le32_to_cpup (&sitd->hw_results);
1719
1720 /* report transfer status */
1721 if (t & SITD_ERRS) {
1722 urb->error_count++;
1723 if (t & SITD_STS_DBE)
1724 desc->status = usb_pipein (urb->pipe)
1725 ? -ENOSR /* hc couldn't read */
1726 : -ECOMM; /* hc couldn't write */
1727 else if (t & SITD_STS_BABBLE)
1728 desc->status = -EOVERFLOW;
1729 else /* XACT, MMF, etc */
1730 desc->status = -EPROTO;
1731 } else {
1732 desc->status = 0;
1733 desc->actual_length = desc->length - SITD_LENGTH (t);
1734 }
1735
1736 usb_put_urb (urb);
1737 sitd->urb = NULL;
1738 sitd->stream = NULL;
1739 list_move (&sitd->sitd_list, &stream->free_list);
1740 stream->depth -= stream->interval << 3;
1741 iso_stream_put (ehci, stream);
1742
1743 /* handle completion now? */
1744 if ((urb_index + 1) != urb->number_of_packets)
1745 return 0;
1746
1747 /* ASSERT: it's really the last sitd for this urb
1748 list_for_each_entry (sitd, &stream->td_list, sitd_list)
1749 BUG_ON (sitd->urb == urb);
1750 */
1751
1752 /* give urb back to the driver */
1753 dev = usb_get_dev (urb->dev);
1754 ehci_urb_done (ehci, urb, regs);
1755 urb = NULL;
1756
1757 /* defer stopping schedule; completion can submit */
1758 ehci->periodic_sched--;
1759 if (!ehci->periodic_sched)
1760 (void) disable_periodic (ehci);
1761 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1762
1763 if (list_empty (&stream->td_list)) {
1764 ehci_to_hcd(ehci)->self.bandwidth_allocated
1765 -= stream->bandwidth;
1766 ehci_vdbg (ehci,
1767 "deschedule devp %s ep%d%s-iso\n",
1768 dev->devpath, stream->bEndpointAddress & 0x0f,
1769 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1770 }
1771 iso_stream_put (ehci, stream);
1772 usb_put_dev (dev);
1773
1774 return 1;
1775}
1776
1777
1778static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
1779{
1780 int status = -EINVAL;
1781 unsigned long flags;
1782 struct ehci_iso_stream *stream;
1783
1784 /* Get iso_stream head */
1785 stream = iso_stream_find (ehci, urb);
1786 if (stream == NULL) {
1787 ehci_dbg (ehci, "can't get iso stream\n");
1788 return -ENOMEM;
1789 }
1790 if (urb->interval != stream->interval) {
1791 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1792 stream->interval, urb->interval);
1793 goto done;
1794 }
1795
1796#ifdef EHCI_URB_TRACE
1797 ehci_dbg (ehci,
1798 "submit %p dev%s ep%d%s-iso len %d\n",
1799 urb, urb->dev->devpath,
1800 usb_pipeendpoint (urb->pipe),
1801 usb_pipein (urb->pipe) ? "in" : "out",
1802 urb->transfer_buffer_length);
1803#endif
1804
1805 /* allocate SITDs */
1806 status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
1807 if (status < 0) {
1808 ehci_dbg (ehci, "can't init sitds\n");
1809 goto done;
1810 }
1811
1812 /* schedule ... need to lock */
1813 spin_lock_irqsave (&ehci->lock, flags);
1814 status = iso_stream_schedule (ehci, urb, stream);
1815 if (status == 0)
1816 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1817 spin_unlock_irqrestore (&ehci->lock, flags);
1818
1819done:
1820 if (status < 0)
1821 iso_stream_put (ehci, stream);
1822 return status;
1823}
1824
1825#else
1826
1827static inline int
1828sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
1829{
1830 ehci_dbg (ehci, "split iso support is disabled\n");
1831 return -ENOSYS;
1832}
1833
1834static inline unsigned
1835sitd_complete (
1836 struct ehci_hcd *ehci,
1837 struct ehci_sitd *sitd,
1838 struct pt_regs *regs
1839) {
1840 ehci_err (ehci, "sitd_complete %p?\n", sitd);
1841 return 0;
1842}
1843
1844#endif /* USB_EHCI_SPLIT_ISO */
1845
1846/*-------------------------------------------------------------------------*/
1847
1848static void
1849scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
1850{
1851 unsigned frame, clock, now_uframe, mod;
1852 unsigned modified;
1853
1854 mod = ehci->periodic_size << 3;
1855
1856 /*
1857 * When running, scan from last scan point up to "now"
1858 * else clean up by scanning everything that's left.
1859 * Touches as few pages as possible: cache-friendly.
1860 */
1861 now_uframe = ehci->next_uframe;
1862 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
1863 clock = readl (&ehci->regs->frame_index);
1864 else
1865 clock = now_uframe + mod - 1;
1866 clock %= mod;
1867
1868 for (;;) {
1869 union ehci_shadow q, *q_p;
1870 __le32 type, *hw_p;
1871 unsigned uframes;
1872
1873 /* don't scan past the live uframe */
1874 frame = now_uframe >> 3;
1875 if (frame == (clock >> 3))
1876 uframes = now_uframe & 0x07;
1877 else {
1878 /* safe to scan the whole frame at once */
1879 now_uframe |= 0x07;
1880 uframes = 8;
1881 }
1882
1883restart:
1884 /* scan each element in frame's queue for completions */
1885 q_p = &ehci->pshadow [frame];
1886 hw_p = &ehci->periodic [frame];
1887 q.ptr = q_p->ptr;
1888 type = Q_NEXT_TYPE (*hw_p);
1889 modified = 0;
1890
1891 while (q.ptr != NULL) {
1892 unsigned uf;
1893 union ehci_shadow temp;
1894 int live;
1895
1896 live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
1897 switch (type) {
1898 case Q_TYPE_QH:
1899 /* handle any completions */
1900 temp.qh = qh_get (q.qh);
1901 type = Q_NEXT_TYPE (q.qh->hw_next);
1902 q = q.qh->qh_next;
1903 modified = qh_completions (ehci, temp.qh, regs);
1904 if (unlikely (list_empty (&temp.qh->qtd_list)))
1905 intr_deschedule (ehci, temp.qh);
1906 qh_put (temp.qh);
1907 break;
1908 case Q_TYPE_FSTN:
1909 /* for "save place" FSTNs, look at QH entries
1910 * in the previous frame for completions.
1911 */
1912 if (q.fstn->hw_prev != EHCI_LIST_END) {
1913 dbg ("ignoring completions from FSTNs");
1914 }
1915 type = Q_NEXT_TYPE (q.fstn->hw_next);
1916 q = q.fstn->fstn_next;
1917 break;
1918 case Q_TYPE_ITD:
1919 /* skip itds for later in the frame */
1920 rmb ();
1921 for (uf = live ? uframes : 8; uf < 8; uf++) {
1922 if (0 == (q.itd->hw_transaction [uf]
1923 & ITD_ACTIVE))
1924 continue;
1925 q_p = &q.itd->itd_next;
1926 hw_p = &q.itd->hw_next;
1927 type = Q_NEXT_TYPE (q.itd->hw_next);
1928 q = *q_p;
1929 break;
1930 }
1931 if (uf != 8)
1932 break;
1933
1934 /* this one's ready ... HC won't cache the
1935 * pointer for much longer, if at all.
1936 */
1937 *q_p = q.itd->itd_next;
1938 *hw_p = q.itd->hw_next;
1939 type = Q_NEXT_TYPE (q.itd->hw_next);
1940 wmb();
1941 modified = itd_complete (ehci, q.itd, regs);
1942 q = *q_p;
1943 break;
1944 case Q_TYPE_SITD:
1945 if ((q.sitd->hw_results & SITD_ACTIVE)
1946 && live) {
1947 q_p = &q.sitd->sitd_next;
1948 hw_p = &q.sitd->hw_next;
1949 type = Q_NEXT_TYPE (q.sitd->hw_next);
1950 q = *q_p;
1951 break;
1952 }
1953 *q_p = q.sitd->sitd_next;
1954 *hw_p = q.sitd->hw_next;
1955 type = Q_NEXT_TYPE (q.sitd->hw_next);
1956 wmb();
1957 modified = sitd_complete (ehci, q.sitd, regs);
1958 q = *q_p;
1959 break;
1960 default:
1961 dbg ("corrupt type %d frame %d shadow %p",
1962 type, frame, q.ptr);
1963 // BUG ();
1964 q.ptr = NULL;
1965 }
1966
1967 /* assume completion callbacks modify the queue */
1968 if (unlikely (modified))
1969 goto restart;
1970 }
1971
1972 /* stop when we catch up to the HC */
1973
1974 // FIXME: this assumes we won't get lapped when
1975 // latencies climb; that should be rare, but...
1976 // detect it, and just go all the way around.
1977 // FLR might help detect this case, so long as latencies
1978 // don't exceed periodic_size msec (default 1.024 sec).
1979
1980 // FIXME: likewise assumes HC doesn't halt mid-scan
1981
1982 if (now_uframe == clock) {
1983 unsigned now;
1984
1985 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
1986 break;
1987 ehci->next_uframe = now_uframe;
1988 now = readl (&ehci->regs->frame_index) % mod;
1989 if (now_uframe == now)
1990 break;
1991
1992 /* rescan the rest of this frame, then ... */
1993 clock = now;
1994 } else {
1995 now_uframe++;
1996 now_uframe %= mod;
1997 }
1998 }
1999}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
new file mode 100644
index 000000000000..67988dba9eb7
--- /dev/null
+++ b/drivers/usb/host/ehci.h
@@ -0,0 +1,637 @@
1/*
2 * Copyright (c) 2001-2002 by David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#ifndef __LINUX_EHCI_HCD_H
20#define __LINUX_EHCI_HCD_H
21
22/* definitions used for the EHCI driver */
23
24/* statistics can be kept for for tuning/monitoring */
25struct ehci_stats {
26 /* irq usage */
27 unsigned long normal;
28 unsigned long error;
29 unsigned long reclaim;
30 unsigned long lost_iaa;
31
32 /* termination of urbs from core */
33 unsigned long complete;
34 unsigned long unlink;
35};
36
37/* ehci_hcd->lock guards shared data against other CPUs:
38 * ehci_hcd: async, reclaim, periodic (and shadow), ...
39 * usb_host_endpoint: hcpriv
40 * ehci_qh: qh_next, qtd_list
41 * ehci_qtd: qtd_list
42 *
43 * Also, hold this lock when talking to HC registers or
44 * when updating hw_* fields in shared qh/qtd/... structures.
45 */
46
47#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
48
49struct ehci_hcd { /* one per controller */
50 spinlock_t lock;
51
52 /* async schedule support */
53 struct ehci_qh *async;
54 struct ehci_qh *reclaim;
55 unsigned reclaim_ready : 1;
56 unsigned scanning : 1;
57
58 /* periodic schedule support */
59#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
60 unsigned periodic_size;
61 __le32 *periodic; /* hw periodic table */
62 dma_addr_t periodic_dma;
63 unsigned i_thresh; /* uframes HC might cache */
64
65 union ehci_shadow *pshadow; /* mirror hw periodic table */
66 int next_uframe; /* scan periodic, start here */
67 unsigned periodic_sched; /* periodic activity count */
68
69 /* per root hub port */
70 unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
71
72 /* per-HC memory pools (could be per-bus, but ...) */
73 struct dma_pool *qh_pool; /* qh per active urb */
74 struct dma_pool *qtd_pool; /* one or more per qh */
75 struct dma_pool *itd_pool; /* itd per iso urb */
76 struct dma_pool *sitd_pool; /* sitd per split iso urb */
77
78 struct timer_list watchdog;
79 struct notifier_block reboot_notifier;
80 unsigned long actions;
81 unsigned stamp;
82 unsigned long next_statechange;
83 u32 command;
84
85 unsigned is_tdi_rh_tt:1; /* TDI roothub with TT */
86
87 /* glue to PCI and HCD framework */
88 struct ehci_caps __iomem *caps;
89 struct ehci_regs __iomem *regs;
90 __u32 hcs_params; /* cached register copy */
91
92 /* irq statistics */
93#ifdef EHCI_STATS
94 struct ehci_stats stats;
95# define COUNT(x) do { (x)++; } while (0)
96#else
97# define COUNT(x) do {} while (0)
98#endif
99};
100
101/* convert between an HCD pointer and the corresponding EHCI_HCD */
102static inline struct ehci_hcd *hcd_to_ehci (struct usb_hcd *hcd)
103{
104 return (struct ehci_hcd *) (hcd->hcd_priv);
105}
106static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci)
107{
108 return container_of ((void *) ehci, struct usb_hcd, hcd_priv);
109}
110
111
112enum ehci_timer_action {
113 TIMER_IO_WATCHDOG,
114 TIMER_IAA_WATCHDOG,
115 TIMER_ASYNC_SHRINK,
116 TIMER_ASYNC_OFF,
117};
118
119static inline void
120timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
121{
122 clear_bit (action, &ehci->actions);
123}
124
125static inline void
126timer_action (struct ehci_hcd *ehci, enum ehci_timer_action action)
127{
128 if (!test_and_set_bit (action, &ehci->actions)) {
129 unsigned long t;
130
131 switch (action) {
132 case TIMER_IAA_WATCHDOG:
133 t = EHCI_IAA_JIFFIES;
134 break;
135 case TIMER_IO_WATCHDOG:
136 t = EHCI_IO_JIFFIES;
137 break;
138 case TIMER_ASYNC_OFF:
139 t = EHCI_ASYNC_JIFFIES;
140 break;
141 // case TIMER_ASYNC_SHRINK:
142 default:
143 t = EHCI_SHRINK_JIFFIES;
144 break;
145 }
146 t += jiffies;
147 // all timings except IAA watchdog can be overridden.
148 // async queue SHRINK often precedes IAA. while it's ready
149 // to go OFF neither can matter, and afterwards the IO
150 // watchdog stops unless there's still periodic traffic.
151 if (action != TIMER_IAA_WATCHDOG
152 && t > ehci->watchdog.expires
153 && timer_pending (&ehci->watchdog))
154 return;
155 mod_timer (&ehci->watchdog, t);
156 }
157}
158
159/*-------------------------------------------------------------------------*/
160
161/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
162
163/* Section 2.2 Host Controller Capability Registers */
164struct ehci_caps {
165 /* these fields are specified as 8 and 16 bit registers,
166 * but some hosts can't perform 8 or 16 bit PCI accesses.
167 */
168 u32 hc_capbase;
169#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
170#define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
171 u32 hcs_params; /* HCSPARAMS - offset 0x4 */
172#define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */
173#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
174#define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */
175#define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */
176#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */
177#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
178#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
179
180 u32 hcc_params; /* HCCPARAMS - offset 0x8 */
181#define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
182#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
183#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
184#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
185#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
186#define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */
187 u8 portroute [8]; /* nibbles for routing - offset 0xC */
188} __attribute__ ((packed));
189
190
191/* Section 2.3 Host Controller Operational Registers */
192struct ehci_regs {
193
194 /* USBCMD: offset 0x00 */
195 u32 command;
196/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
197#define CMD_PARK (1<<11) /* enable "park" on async qh */
198#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
199#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
200#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
201#define CMD_ASE (1<<5) /* async schedule enable */
202#define CMD_PSE (1<<4) /* periodic schedule enable */
203/* 3:2 is periodic frame list size */
204#define CMD_RESET (1<<1) /* reset HC not bus */
205#define CMD_RUN (1<<0) /* start/stop HC */
206
207 /* USBSTS: offset 0x04 */
208 u32 status;
209#define STS_ASS (1<<15) /* Async Schedule Status */
210#define STS_PSS (1<<14) /* Periodic Schedule Status */
211#define STS_RECL (1<<13) /* Reclamation */
212#define STS_HALT (1<<12) /* Not running (any reason) */
213/* some bits reserved */
214 /* these STS_* flags are also intr_enable bits (USBINTR) */
215#define STS_IAA (1<<5) /* Interrupted on async advance */
216#define STS_FATAL (1<<4) /* such as some PCI access errors */
217#define STS_FLR (1<<3) /* frame list rolled over */
218#define STS_PCD (1<<2) /* port change detect */
219#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
220#define STS_INT (1<<0) /* "normal" completion (short, ...) */
221
222 /* USBINTR: offset 0x08 */
223 u32 intr_enable;
224
225 /* FRINDEX: offset 0x0C */
226 u32 frame_index; /* current microframe number */
227 /* CTRLDSSEGMENT: offset 0x10 */
228 u32 segment; /* address bits 63:32 if needed */
229 /* PERIODICLISTBASE: offset 0x14 */
230 u32 frame_list; /* points to periodic list */
231 /* ASYNCLISTADDR: offset 0x18 */
232 u32 async_next; /* address of next async queue head */
233
234 u32 reserved [9];
235
236 /* CONFIGFLAG: offset 0x40 */
237 u32 configured_flag;
238#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
239
240 /* PORTSC: offset 0x44 */
241 u32 port_status [0]; /* up to N_PORTS */
242/* 31:23 reserved */
243#define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
244#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
245#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */
246/* 19:16 for port testing */
247#define PORT_LED_OFF (0<<14)
248#define PORT_LED_AMBER (1<<14)
249#define PORT_LED_GREEN (2<<14)
250#define PORT_LED_MASK (3<<14)
251#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
252#define PORT_POWER (1<<12) /* true: has power (see PPC) */
253#define PORT_USB11(x) (((x)&(3<<10))==(1<<10)) /* USB 1.1 device */
254/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
255/* 9 reserved */
256#define PORT_RESET (1<<8) /* reset port */
257#define PORT_SUSPEND (1<<7) /* suspend port */
258#define PORT_RESUME (1<<6) /* resume it */
259#define PORT_OCC (1<<5) /* over current change */
260#define PORT_OC (1<<4) /* over current active */
261#define PORT_PEC (1<<3) /* port enable change */
262#define PORT_PE (1<<2) /* port enable */
263#define PORT_CSC (1<<1) /* connect status change */
264#define PORT_CONNECT (1<<0) /* device connected */
265} __attribute__ ((packed));
266
267/* Appendix C, Debug port ... intended for use with special "debug devices"
268 * that can help if there's no serial console. (nonstandard enumeration.)
269 */
270struct ehci_dbg_port {
271 u32 control;
272#define DBGP_OWNER (1<<30)
273#define DBGP_ENABLED (1<<28)
274#define DBGP_DONE (1<<16)
275#define DBGP_INUSE (1<<10)
276#define DBGP_ERRCODE(x) (((x)>>7)&0x0f)
277# define DBGP_ERR_BAD 1
278# define DBGP_ERR_SIGNAL 2
279#define DBGP_ERROR (1<<6)
280#define DBGP_GO (1<<5)
281#define DBGP_OUT (1<<4)
282#define DBGP_LEN(x) (((x)>>0)&0x0f)
283 u32 pids;
284#define DBGP_PID_GET(x) (((x)>>16)&0xff)
285#define DBGP_PID_SET(data,tok) (((data)<<8)|(tok));
286 u32 data03;
287 u32 data47;
288 u32 address;
289#define DBGP_EPADDR(dev,ep) (((dev)<<8)|(ep));
290} __attribute__ ((packed));
291
292/*-------------------------------------------------------------------------*/
293
294#define QTD_NEXT(dma) cpu_to_le32((u32)dma)
295
296/*
297 * EHCI Specification 0.95 Section 3.5
298 * QTD: describe data transfer components (buffer, direction, ...)
299 * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
300 *
301 * These are associated only with "QH" (Queue Head) structures,
302 * used with control, bulk, and interrupt transfers.
303 */
304struct ehci_qtd {
305 /* first part defined by EHCI spec */
306 __le32 hw_next; /* see EHCI 3.5.1 */
307 __le32 hw_alt_next; /* see EHCI 3.5.2 */
308 __le32 hw_token; /* see EHCI 3.5.3 */
309#define QTD_TOGGLE (1 << 31) /* data toggle */
310#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
311#define QTD_IOC (1 << 15) /* interrupt on complete */
312#define QTD_CERR(tok) (((tok)>>10) & 0x3)
313#define QTD_PID(tok) (((tok)>>8) & 0x3)
314#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
315#define QTD_STS_HALT (1 << 6) /* halted on error */
316#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
317#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
318#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
319#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
320#define QTD_STS_STS (1 << 1) /* split transaction state */
321#define QTD_STS_PING (1 << 0) /* issue PING? */
322 __le32 hw_buf [5]; /* see EHCI 3.5.4 */
323 __le32 hw_buf_hi [5]; /* Appendix B */
324
325 /* the rest is HCD-private */
326 dma_addr_t qtd_dma; /* qtd address */
327 struct list_head qtd_list; /* sw qtd list */
328 struct urb *urb; /* qtd's urb */
329 size_t length; /* length of buffer */
330} __attribute__ ((aligned (32)));
331
332/* mask NakCnt+T in qh->hw_alt_next */
333#define QTD_MASK __constant_cpu_to_le32 (~0x1f)
334
335#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
336
337/*-------------------------------------------------------------------------*/
338
339/* type tag from {qh,itd,sitd,fstn}->hw_next */
340#define Q_NEXT_TYPE(dma) ((dma) & __constant_cpu_to_le32 (3 << 1))
341
342/* values for that type tag */
343#define Q_TYPE_ITD __constant_cpu_to_le32 (0 << 1)
344#define Q_TYPE_QH __constant_cpu_to_le32 (1 << 1)
345#define Q_TYPE_SITD __constant_cpu_to_le32 (2 << 1)
346#define Q_TYPE_FSTN __constant_cpu_to_le32 (3 << 1)
347
348/* next async queue entry, or pointer to interrupt/periodic QH */
349#define QH_NEXT(dma) (cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
350
351/* for periodic/async schedules and qtd lists, mark end of list */
352#define EHCI_LIST_END __constant_cpu_to_le32(1) /* "null pointer" to hw */
353
354/*
355 * Entries in periodic shadow table are pointers to one of four kinds
356 * of data structure. That's dictated by the hardware; a type tag is
357 * encoded in the low bits of the hardware's periodic schedule. Use
358 * Q_NEXT_TYPE to get the tag.
359 *
360 * For entries in the async schedule, the type tag always says "qh".
361 */
362union ehci_shadow {
363 struct ehci_qh *qh; /* Q_TYPE_QH */
364 struct ehci_itd *itd; /* Q_TYPE_ITD */
365 struct ehci_sitd *sitd; /* Q_TYPE_SITD */
366 struct ehci_fstn *fstn; /* Q_TYPE_FSTN */
367 u32 *hw_next; /* (all types) */
368 void *ptr;
369};
370
371/*-------------------------------------------------------------------------*/
372
373/*
374 * EHCI Specification 0.95 Section 3.6
375 * QH: describes control/bulk/interrupt endpoints
376 * See Fig 3-7 "Queue Head Structure Layout".
377 *
378 * These appear in both the async and (for interrupt) periodic schedules.
379 */
380
381struct ehci_qh {
382 /* first part defined by EHCI spec */
383 __le32 hw_next; /* see EHCI 3.6.1 */
384 __le32 hw_info1; /* see EHCI 3.6.2 */
385#define QH_HEAD 0x00008000
386 __le32 hw_info2; /* see EHCI 3.6.2 */
387 __le32 hw_current; /* qtd list - see EHCI 3.6.4 */
388
389 /* qtd overlay (hardware parts of a struct ehci_qtd) */
390 __le32 hw_qtd_next;
391 __le32 hw_alt_next;
392 __le32 hw_token;
393 __le32 hw_buf [5];
394 __le32 hw_buf_hi [5];
395
396 /* the rest is HCD-private */
397 dma_addr_t qh_dma; /* address of qh */
398 union ehci_shadow qh_next; /* ptr to qh; or periodic */
399 struct list_head qtd_list; /* sw qtd list */
400 struct ehci_qtd *dummy;
401 struct ehci_qh *reclaim; /* next to reclaim */
402
403 struct ehci_hcd *ehci;
404 struct kref kref;
405 unsigned stamp;
406
407 u8 qh_state;
408#define QH_STATE_LINKED 1 /* HC sees this */
409#define QH_STATE_UNLINK 2 /* HC may still see this */
410#define QH_STATE_IDLE 3 /* HC doesn't see this */
411#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
412#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
413
414 /* periodic schedule info */
415 u8 usecs; /* intr bandwidth */
416 u8 gap_uf; /* uframes split/csplit gap */
417 u8 c_usecs; /* ... split completion bw */
418 unsigned short period; /* polling interval */
419 unsigned short start; /* where polling starts */
420#define NO_FRAME ((unsigned short)~0) /* pick new start */
421 struct usb_device *dev; /* access to TT */
422} __attribute__ ((aligned (32)));
423
424/*-------------------------------------------------------------------------*/
425
426/* description of one iso transaction (up to 3 KB data if highspeed) */
427struct ehci_iso_packet {
428 /* These will be copied to iTD when scheduling */
429 u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
430 __le32 transaction; /* itd->hw_transaction[i] |= */
431 u8 cross; /* buf crosses pages */
432 /* for full speed OUT splits */
433 u32 buf1;
434};
435
436/* temporary schedule data for packets from iso urbs (both speeds)
437 * each packet is one logical usb transaction to the device (not TT),
438 * beginning at stream->next_uframe
439 */
440struct ehci_iso_sched {
441 struct list_head td_list;
442 unsigned span;
443 struct ehci_iso_packet packet [0];
444};
445
446/*
447 * ehci_iso_stream - groups all (s)itds for this endpoint.
448 * acts like a qh would, if EHCI had them for ISO.
449 */
450struct ehci_iso_stream {
451 /* first two fields match QH, but info1 == 0 */
452 __le32 hw_next;
453 __le32 hw_info1;
454
455 u32 refcount;
456 u8 bEndpointAddress;
457 u8 highspeed;
458 u16 depth; /* depth in uframes */
459 struct list_head td_list; /* queued itds/sitds */
460 struct list_head free_list; /* list of unused itds/sitds */
461 struct usb_device *udev;
462 struct usb_host_endpoint *ep;
463
464 /* output of (re)scheduling */
465 unsigned long start; /* jiffies */
466 unsigned long rescheduled;
467 int next_uframe;
468 __le32 splits;
469
470 /* the rest is derived from the endpoint descriptor,
471 * trusting urb->interval == f(epdesc->bInterval) and
472 * including the extra info for hw_bufp[0..2]
473 */
474 u8 interval;
475 u8 usecs, c_usecs;
476 u16 maxp;
477 u16 raw_mask;
478 unsigned bandwidth;
479
480 /* This is used to initialize iTD's hw_bufp fields */
481 __le32 buf0;
482 __le32 buf1;
483 __le32 buf2;
484
485 /* this is used to initialize sITD's tt info */
486 __le32 address;
487};
488
489/*-------------------------------------------------------------------------*/
490
491/*
492 * EHCI Specification 0.95 Section 3.3
493 * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
494 *
495 * Schedule records for high speed iso xfers
496 */
497struct ehci_itd {
498 /* first part defined by EHCI spec */
499 __le32 hw_next; /* see EHCI 3.3.1 */
500 __le32 hw_transaction [8]; /* see EHCI 3.3.2 */
501#define EHCI_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
502#define EHCI_ISOC_BUF_ERR (1<<30) /* Data buffer error */
503#define EHCI_ISOC_BABBLE (1<<29) /* babble detected */
504#define EHCI_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
505#define EHCI_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
506#define EHCI_ITD_IOC (1 << 15) /* interrupt on complete */
507
508#define ITD_ACTIVE __constant_cpu_to_le32(EHCI_ISOC_ACTIVE)
509
510 __le32 hw_bufp [7]; /* see EHCI 3.3.3 */
511 __le32 hw_bufp_hi [7]; /* Appendix B */
512
513 /* the rest is HCD-private */
514 dma_addr_t itd_dma; /* for this itd */
515 union ehci_shadow itd_next; /* ptr to periodic q entry */
516
517 struct urb *urb;
518 struct ehci_iso_stream *stream; /* endpoint's queue */
519 struct list_head itd_list; /* list of stream's itds */
520
521 /* any/all hw_transactions here may be used by that urb */
522 unsigned frame; /* where scheduled */
523 unsigned pg;
524 unsigned index[8]; /* in urb->iso_frame_desc */
525 u8 usecs[8];
526} __attribute__ ((aligned (32)));
527
528/*-------------------------------------------------------------------------*/
529
530/*
531 * EHCI Specification 0.95 Section 3.4
532 * siTD, aka split-transaction isochronous Transfer Descriptor
533 * ... describe full speed iso xfers through TT in hubs
534 * see Figure 3-5 "Split-transaction Isochronous Transaction Descriptor (siTD)
535 */
536struct ehci_sitd {
537 /* first part defined by EHCI spec */
538 __le32 hw_next;
539/* uses bit field macros above - see EHCI 0.95 Table 3-8 */
540 __le32 hw_fullspeed_ep; /* EHCI table 3-9 */
541 __le32 hw_uframe; /* EHCI table 3-10 */
542 __le32 hw_results; /* EHCI table 3-11 */
543#define SITD_IOC (1 << 31) /* interrupt on completion */
544#define SITD_PAGE (1 << 30) /* buffer 0/1 */
545#define SITD_LENGTH(x) (0x3ff & ((x)>>16))
546#define SITD_STS_ACTIVE (1 << 7) /* HC may execute this */
547#define SITD_STS_ERR (1 << 6) /* error from TT */
548#define SITD_STS_DBE (1 << 5) /* data buffer error (in HC) */
549#define SITD_STS_BABBLE (1 << 4) /* device was babbling */
550#define SITD_STS_XACT (1 << 3) /* illegal IN response */
551#define SITD_STS_MMF (1 << 2) /* incomplete split transaction */
552#define SITD_STS_STS (1 << 1) /* split transaction state */
553
554#define SITD_ACTIVE __constant_cpu_to_le32(SITD_STS_ACTIVE)
555
556 __le32 hw_buf [2]; /* EHCI table 3-12 */
557 __le32 hw_backpointer; /* EHCI table 3-13 */
558 __le32 hw_buf_hi [2]; /* Appendix B */
559
560 /* the rest is HCD-private */
561 dma_addr_t sitd_dma;
562 union ehci_shadow sitd_next; /* ptr to periodic q entry */
563
564 struct urb *urb;
565 struct ehci_iso_stream *stream; /* endpoint's queue */
566 struct list_head sitd_list; /* list of stream's sitds */
567 unsigned frame;
568 unsigned index;
569} __attribute__ ((aligned (32)));
570
571/*-------------------------------------------------------------------------*/
572
573/*
574 * EHCI Specification 0.96 Section 3.7
575 * Periodic Frame Span Traversal Node (FSTN)
576 *
577 * Manages split interrupt transactions (using TT) that span frame boundaries
578 * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
579 * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
580 * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
581 */
582struct ehci_fstn {
583 __le32 hw_next; /* any periodic q entry */
584 __le32 hw_prev; /* qh or EHCI_LIST_END */
585
586 /* the rest is HCD-private */
587 dma_addr_t fstn_dma;
588 union ehci_shadow fstn_next; /* ptr to periodic q entry */
589} __attribute__ ((aligned (32)));
590
591/*-------------------------------------------------------------------------*/
592
593#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
594
595/*
596 * Some EHCI controllers have a Transaction Translator built into the
597 * root hub. This is a non-standard feature. Each controller will need
598 * to add code to the following inline functions, and call them as
599 * needed (mostly in root hub code).
600 */
601
602#define ehci_is_TDI(e) ((e)->is_tdi_rh_tt)
603
604/* Returns the speed of a device attached to a port on the root hub. */
605static inline unsigned int
606ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
607{
608 if (ehci_is_TDI(ehci)) {
609 switch ((portsc>>26)&3) {
610 case 0:
611 return 0;
612 case 1:
613 return (1<<USB_PORT_FEAT_LOWSPEED);
614 case 2:
615 default:
616 return (1<<USB_PORT_FEAT_HIGHSPEED);
617 }
618 }
619 return (1<<USB_PORT_FEAT_HIGHSPEED);
620}
621
622#else
623
624#define ehci_is_TDI(e) (0)
625
626#define ehci_port_speed(ehci, portsc) (1<<USB_PORT_FEAT_HIGHSPEED)
627#endif
628
629/*-------------------------------------------------------------------------*/
630
631#ifndef DEBUG
632#define STUB_DEBUG_FILES
633#endif /* DEBUG */
634
635/*-------------------------------------------------------------------------*/
636
637#endif /* __LINUX_EHCI_HCD_H */
diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c
new file mode 100644
index 000000000000..4b12be822bd4
--- /dev/null
+++ b/drivers/usb/host/hc_crisv10.c
@@ -0,0 +1,4556 @@
1/*
2 * usb-host.c: ETRAX 100LX USB Host Controller Driver (HCD)
3 *
4 * Copyright (c) 2002, 2003 Axis Communications AB.
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/delay.h>
10#include <linux/ioport.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/errno.h>
14#include <linux/unistd.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/version.h>
18#include <linux/list.h>
19#include <linux/spinlock.h>
20
21#include <asm/uaccess.h>
22#include <asm/io.h>
23#include <asm/irq.h>
24#include <asm/dma.h>
25#include <asm/system.h>
26#include <asm/arch/svinto.h>
27
28#include <linux/usb.h>
29/* Ugly include because we don't live with the other host drivers. */
30#include <../drivers/usb/core/hcd.h>
31#include <../drivers/usb/core/usb.h>
32
33#include "hc_crisv10.h"
34
35#define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
36#define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
37#define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
38
39static const char *usb_hcd_version = "$Revision: 1.2 $";
40
41#undef KERN_DEBUG
42#define KERN_DEBUG ""
43
44
45#undef USB_DEBUG_RH
46#undef USB_DEBUG_EPID
47#undef USB_DEBUG_SB
48#undef USB_DEBUG_DESC
49#undef USB_DEBUG_URB
50#undef USB_DEBUG_TRACE
51#undef USB_DEBUG_BULK
52#undef USB_DEBUG_CTRL
53#undef USB_DEBUG_INTR
54#undef USB_DEBUG_ISOC
55
56#ifdef USB_DEBUG_RH
57#define dbg_rh(format, arg...) printk(KERN_DEBUG __FILE__ ": (RH) " format "\n" , ## arg)
58#else
59#define dbg_rh(format, arg...) do {} while (0)
60#endif
61
62#ifdef USB_DEBUG_EPID
63#define dbg_epid(format, arg...) printk(KERN_DEBUG __FILE__ ": (EPID) " format "\n" , ## arg)
64#else
65#define dbg_epid(format, arg...) do {} while (0)
66#endif
67
68#ifdef USB_DEBUG_SB
69#define dbg_sb(format, arg...) printk(KERN_DEBUG __FILE__ ": (SB) " format "\n" , ## arg)
70#else
71#define dbg_sb(format, arg...) do {} while (0)
72#endif
73
74#ifdef USB_DEBUG_CTRL
75#define dbg_ctrl(format, arg...) printk(KERN_DEBUG __FILE__ ": (CTRL) " format "\n" , ## arg)
76#else
77#define dbg_ctrl(format, arg...) do {} while (0)
78#endif
79
80#ifdef USB_DEBUG_BULK
81#define dbg_bulk(format, arg...) printk(KERN_DEBUG __FILE__ ": (BULK) " format "\n" , ## arg)
82#else
83#define dbg_bulk(format, arg...) do {} while (0)
84#endif
85
86#ifdef USB_DEBUG_INTR
87#define dbg_intr(format, arg...) printk(KERN_DEBUG __FILE__ ": (INTR) " format "\n" , ## arg)
88#else
89#define dbg_intr(format, arg...) do {} while (0)
90#endif
91
92#ifdef USB_DEBUG_ISOC
93#define dbg_isoc(format, arg...) printk(KERN_DEBUG __FILE__ ": (ISOC) " format "\n" , ## arg)
94#else
95#define dbg_isoc(format, arg...) do {} while (0)
96#endif
97
98#ifdef USB_DEBUG_TRACE
99#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
100#define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
101#else
102#define DBFENTER do {} while (0)
103#define DBFEXIT do {} while (0)
104#endif
105
106#define usb_pipeslow(pipe) (((pipe) >> 26) & 1)
107
108/*-------------------------------------------------------------------
109 Virtual Root Hub
110 -------------------------------------------------------------------*/
111
112static __u8 root_hub_dev_des[] =
113{
114 0x12, /* __u8 bLength; */
115 0x01, /* __u8 bDescriptorType; Device */
116 0x00, /* __le16 bcdUSB; v1.0 */
117 0x01,
118 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
119 0x00, /* __u8 bDeviceSubClass; */
120 0x00, /* __u8 bDeviceProtocol; */
121 0x08, /* __u8 bMaxPacketSize0; 8 Bytes */
122 0x00, /* __le16 idVendor; */
123 0x00,
124 0x00, /* __le16 idProduct; */
125 0x00,
126 0x00, /* __le16 bcdDevice; */
127 0x00,
128 0x00, /* __u8 iManufacturer; */
129 0x02, /* __u8 iProduct; */
130 0x01, /* __u8 iSerialNumber; */
131 0x01 /* __u8 bNumConfigurations; */
132};
133
134/* Configuration descriptor */
135static __u8 root_hub_config_des[] =
136{
137 0x09, /* __u8 bLength; */
138 0x02, /* __u8 bDescriptorType; Configuration */
139 0x19, /* __le16 wTotalLength; */
140 0x00,
141 0x01, /* __u8 bNumInterfaces; */
142 0x01, /* __u8 bConfigurationValue; */
143 0x00, /* __u8 iConfiguration; */
144 0x40, /* __u8 bmAttributes; Bit 7: Bus-powered */
145 0x00, /* __u8 MaxPower; */
146
147 /* interface */
148 0x09, /* __u8 if_bLength; */
149 0x04, /* __u8 if_bDescriptorType; Interface */
150 0x00, /* __u8 if_bInterfaceNumber; */
151 0x00, /* __u8 if_bAlternateSetting; */
152 0x01, /* __u8 if_bNumEndpoints; */
153 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
154 0x00, /* __u8 if_bInterfaceSubClass; */
155 0x00, /* __u8 if_bInterfaceProtocol; */
156 0x00, /* __u8 if_iInterface; */
157
158 /* endpoint */
159 0x07, /* __u8 ep_bLength; */
160 0x05, /* __u8 ep_bDescriptorType; Endpoint */
161 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
162 0x03, /* __u8 ep_bmAttributes; Interrupt */
163 0x08, /* __le16 ep_wMaxPacketSize; 8 Bytes */
164 0x00,
165 0xff /* __u8 ep_bInterval; 255 ms */
166};
167
168static __u8 root_hub_hub_des[] =
169{
170 0x09, /* __u8 bLength; */
171 0x29, /* __u8 bDescriptorType; Hub-descriptor */
172 0x02, /* __u8 bNbrPorts; */
173 0x00, /* __u16 wHubCharacteristics; */
174 0x00,
175 0x01, /* __u8 bPwrOn2pwrGood; 2ms */
176 0x00, /* __u8 bHubContrCurrent; 0 mA */
177 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
178 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
179};
180
181static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
182static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
183
184/* We want the start timer to expire before the eot timer, because the former might start
185 traffic, thus making it unnecessary for the latter to time out. */
186#define BULK_START_TIMER_INTERVAL (HZ/10) /* 100 ms */
187#define BULK_EOT_TIMER_INTERVAL (HZ/10+2) /* 120 ms */
188
189#define OK(x) len = (x); dbg_rh("OK(%d): line: %d", x, __LINE__); break
190#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
191{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
192
193#define SLAB_FLAG (in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL)
194#define KMALLOC_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
195
196/* Most helpful debugging aid */
197#define assert(expr) ((void) ((expr) ? 0 : (err("assert failed at line %d",__LINE__))))
198
199/* Alternative assert define which stops after a failed assert. */
200/*
201#define assert(expr) \
202{ \
203 if (!(expr)) { \
204 err("assert failed at line %d",__LINE__); \
205 while (1); \
206 } \
207}
208*/
209
210
211/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it dynamically?
212 To adjust it dynamically we would have to get an interrupt when we reach the end
213 of the rx descriptor list, or when we get close to the end, and then allocate more
214 descriptors. */
215
216#define NBR_OF_RX_DESC 512
217#define RX_DESC_BUF_SIZE 1024
218#define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
219
220/* The number of epids is, among other things, used for pre-allocating
221 ctrl, bulk and isoc EP descriptors (one for each epid).
222 Assumed to be > 1 when initiating the DMA lists. */
223#define NBR_OF_EPIDS 32
224
225/* Support interrupt traffic intervals up to 128 ms. */
226#define MAX_INTR_INTERVAL 128
227
228/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP table
229 must be "invalid". By this we mean that we shouldn't care about epid attentions
230 for this epid, or at least handle them differently from epid attentions for "valid"
231 epids. This define determines which one to use (don't change it). */
232#define INVALID_EPID 31
233/* A special epid for the bulk dummys. */
234#define DUMMY_EPID 30
235
236/* This is just a software cache for the valid entries in R_USB_EPT_DATA. */
237static __u32 epid_usage_bitmask;
238
239/* A bitfield to keep information on in/out traffic is needed to uniquely identify
240 an endpoint on a device, since the most significant bit which indicates traffic
241 direction is lacking in the ep_id field (ETRAX epids can handle both in and
242 out traffic on endpoints that are otherwise identical). The USB framework, however,
243 relies on them to be handled separately. For example, bulk IN and OUT urbs cannot
244 be queued in the same list, since they would block each other. */
245static __u32 epid_out_traffic;
246
247/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
248 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be cache aligned. */
249static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
250static volatile USB_IN_Desc_t RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
251
252/* Pointers into RxDescList. */
253static volatile USB_IN_Desc_t *myNextRxDesc;
254static volatile USB_IN_Desc_t *myLastRxDesc;
255static volatile USB_IN_Desc_t *myPrevRxDesc;
256
257/* EP descriptors must be 32-bit aligned. */
258static volatile USB_EP_Desc_t TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
259static volatile USB_EP_Desc_t TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
260/* After each enabled bulk EP (IN or OUT) we put two disabled EP descriptors with the eol flag set,
261 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
262 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
263 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
264 in each frame. */
265static volatile USB_EP_Desc_t TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
266
267static volatile USB_EP_Desc_t TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
268static volatile USB_SB_Desc_t TxIsocSB_zout __attribute__ ((aligned (4)));
269
270static volatile USB_EP_Desc_t TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
271static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4)));
272
273/* A zout transfer makes a memory access at the address of its buf pointer, which means that setting
274 this buf pointer to 0 will cause an access to the flash. In addition to this, setting sw_len to 0
275 results in a 16/32 bytes (depending on DMA burst size) transfer. Instead, we set it to 1, and point
276 it to this buffer. */
277static int zout_buffer[4] __attribute__ ((aligned (4)));
278
279/* Cache for allocating new EP and SB descriptors. */
280static kmem_cache_t *usb_desc_cache;
281
282/* Cache for the registers allocated in the top half. */
283static kmem_cache_t *top_half_reg_cache;
284
285/* Cache for the data allocated in the isoc descr top half. */
286static kmem_cache_t *isoc_compl_cache;
287
288static struct usb_bus *etrax_usb_bus;
289
290/* This is a circular (double-linked) list of the active urbs for each epid.
291 The head is never removed, and new urbs are linked onto the list as
292 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
293 functions instead. Note that working with these lists might require spinlock
294 protection. */
295static struct list_head urb_list[NBR_OF_EPIDS];
296
297/* Read about the need and usage of this lock in submit_ctrl_urb. */
298static spinlock_t urb_list_lock;
299
300/* Used when unlinking asynchronously. */
301static struct list_head urb_unlink_list;
302
303/* for returning string descriptors in UTF-16LE */
304static int ascii2utf (char *ascii, __u8 *utf, int utfmax)
305{
306 int retval;
307
308 for (retval = 0; *ascii && utfmax > 1; utfmax -= 2, retval += 2) {
309 *utf++ = *ascii++ & 0x7f;
310 *utf++ = 0;
311 }
312 return retval;
313}
314
315static int usb_root_hub_string (int id, int serial, char *type, __u8 *data, int len)
316{
317 char buf [30];
318
319 // assert (len > (2 * (sizeof (buf) + 1)));
320 // assert (strlen (type) <= 8);
321
322 // language ids
323 if (id == 0) {
324 *data++ = 4; *data++ = 3; /* 4 bytes data */
325 *data++ = 0; *data++ = 0; /* some language id */
326 return 4;
327
328 // serial number
329 } else if (id == 1) {
330 sprintf (buf, "%x", serial);
331
332 // product description
333 } else if (id == 2) {
334 sprintf (buf, "USB %s Root Hub", type);
335
336 // id 3 == vendor description
337
338 // unsupported IDs --> "stall"
339 } else
340 return 0;
341
342 data [0] = 2 + ascii2utf (buf, data + 2, len - 2);
343 data [1] = 3;
344 return data [0];
345}
346
347/* Wrappers around the list functions (include/linux/list.h). */
348
349static inline int urb_list_empty(int epid)
350{
351 return list_empty(&urb_list[epid]);
352}
353
354/* Returns first urb for this epid, or NULL if list is empty. */
355static inline struct urb *urb_list_first(int epid)
356{
357 struct urb *first_urb = 0;
358
359 if (!urb_list_empty(epid)) {
360 /* Get the first urb (i.e. head->next). */
361 urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
362 first_urb = urb_entry->urb;
363 }
364 return first_urb;
365}
366
367/* Adds an urb_entry last in the list for this epid. */
368static inline void urb_list_add(struct urb *urb, int epid)
369{
370 urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG);
371 assert(urb_entry);
372
373 urb_entry->urb = urb;
374 list_add_tail(&urb_entry->list, &urb_list[epid]);
375}
376
377/* Search through the list for an element that contains this urb. (The list
378 is expected to be short and the one we are about to delete will often be
379 the first in the list.) */
380static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid)
381{
382 struct list_head *entry;
383 struct list_head *tmp;
384 urb_entry_t *urb_entry;
385
386 list_for_each_safe(entry, tmp, &urb_list[epid]) {
387 urb_entry = list_entry(entry, urb_entry_t, list);
388 assert(urb_entry);
389 assert(urb_entry->urb);
390
391 if (urb_entry->urb == urb) {
392 return urb_entry;
393 }
394 }
395 return 0;
396}
397
398/* Delete an urb from the list. */
399static inline void urb_list_del(struct urb *urb, int epid)
400{
401 urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
402 assert(urb_entry);
403
404 /* Delete entry and free. */
405 list_del(&urb_entry->list);
406 kfree(urb_entry);
407}
408
409/* Move an urb to the end of the list. */
410static inline void urb_list_move_last(struct urb *urb, int epid)
411{
412 urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
413 assert(urb_entry);
414
415 list_del(&urb_entry->list);
416 list_add_tail(&urb_entry->list, &urb_list[epid]);
417}
418
419/* Get the next urb in the list. */
420static inline struct urb *urb_list_next(struct urb *urb, int epid)
421{
422 urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
423
424 assert(urb_entry);
425
426 if (urb_entry->list.next != &urb_list[epid]) {
427 struct list_head *elem = urb_entry->list.next;
428 urb_entry = list_entry(elem, urb_entry_t, list);
429 return urb_entry->urb;
430 } else {
431 return NULL;
432 }
433}
434
435
436
437/* For debug purposes only. */
438static inline void urb_list_dump(int epid)
439{
440 struct list_head *entry;
441 struct list_head *tmp;
442 urb_entry_t *urb_entry;
443 int i = 0;
444
445 info("Dumping urb list for epid %d", epid);
446
447 list_for_each_safe(entry, tmp, &urb_list[epid]) {
448 urb_entry = list_entry(entry, urb_entry_t, list);
449 info(" entry %d, urb = 0x%lx", i, (unsigned long)urb_entry->urb);
450 }
451}
452
453static void init_rx_buffers(void);
454static int etrax_rh_unlink_urb(struct urb *urb);
455static void etrax_rh_send_irq(struct urb *urb);
456static void etrax_rh_init_int_timer(struct urb *urb);
457static void etrax_rh_int_timer_do(unsigned long ptr);
458
459static int etrax_usb_setup_epid(struct urb *urb);
460static int etrax_usb_lookup_epid(struct urb *urb);
461static int etrax_usb_allocate_epid(void);
462static void etrax_usb_free_epid(int epid);
463
464static int etrax_remove_from_sb_list(struct urb *urb);
465
466static void* etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size, int mem_flags, dma_addr_t *dma);
467static void etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma);
468
469static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid);
470static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid);
471static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid);
472static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid);
473
474static int etrax_usb_submit_bulk_urb(struct urb *urb);
475static int etrax_usb_submit_ctrl_urb(struct urb *urb);
476static int etrax_usb_submit_intr_urb(struct urb *urb);
477static int etrax_usb_submit_isoc_urb(struct urb *urb);
478
479static int etrax_usb_submit_urb(struct urb *urb, int mem_flags);
480static int etrax_usb_unlink_urb(struct urb *urb, int status);
481static int etrax_usb_get_frame_number(struct usb_device *usb_dev);
482
483static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc, struct pt_regs *regs);
484static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc, struct pt_regs *regs);
485static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc, struct pt_regs *regs);
486static void etrax_usb_hc_interrupt_bottom_half(void *data);
487
488static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data);
489
490
491/* The following is a list of interrupt handlers for the host controller interrupts we use.
492 They are called from etrax_usb_hc_interrupt_bottom_half. */
493static void etrax_usb_hc_isoc_eof_interrupt(void);
494static void etrax_usb_hc_bulk_eot_interrupt(int timer_induced);
495static void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg);
496static void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg);
497static void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg);
498
499static int etrax_rh_submit_urb (struct urb *urb);
500
501/* Forward declaration needed because they are used in the rx interrupt routine. */
502static void etrax_usb_complete_urb(struct urb *urb, int status);
503static void etrax_usb_complete_bulk_urb(struct urb *urb, int status);
504static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status);
505static void etrax_usb_complete_intr_urb(struct urb *urb, int status);
506static void etrax_usb_complete_isoc_urb(struct urb *urb, int status);
507
508static int etrax_usb_hc_init(void);
509static void etrax_usb_hc_cleanup(void);
510
511static struct usb_operations etrax_usb_device_operations =
512{
513 .get_frame_number = etrax_usb_get_frame_number,
514 .submit_urb = etrax_usb_submit_urb,
515 .unlink_urb = etrax_usb_unlink_urb,
516 .buffer_alloc = etrax_usb_buffer_alloc,
517 .buffer_free = etrax_usb_buffer_free
518};
519
520/* Note that these functions are always available in their "__" variants, for use in
521 error situations. The "__" missing variants are controlled by the USB_DEBUG_DESC/
522 USB_DEBUG_URB macros. */
523static void __dump_urb(struct urb* purb)
524{
525 printk("\nurb :0x%08lx\n", (unsigned long)purb);
526 printk("dev :0x%08lx\n", (unsigned long)purb->dev);
527 printk("pipe :0x%08x\n", purb->pipe);
528 printk("status :%d\n", purb->status);
529 printk("transfer_flags :0x%08x\n", purb->transfer_flags);
530 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
531 printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
532 printk("actual_length :%d\n", purb->actual_length);
533 printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
534 printk("start_frame :%d\n", purb->start_frame);
535 printk("number_of_packets :%d\n", purb->number_of_packets);
536 printk("interval :%d\n", purb->interval);
537 printk("error_count :%d\n", purb->error_count);
538 printk("context :0x%08lx\n", (unsigned long)purb->context);
539 printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
540}
541
542static void __dump_in_desc(volatile USB_IN_Desc_t *in)
543{
544 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
545 printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
546 printk(" command : 0x%04x\n", in->command);
547 printk(" next : 0x%08lx\n", in->next);
548 printk(" buf : 0x%08lx\n", in->buf);
549 printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
550 printk(" status : 0x%04x\n\n", in->status);
551}
552
553static void __dump_sb_desc(volatile USB_SB_Desc_t *sb)
554{
555 char tt = (sb->command & 0x30) >> 4;
556 char *tt_string;
557
558 switch (tt) {
559 case 0:
560 tt_string = "zout";
561 break;
562 case 1:
563 tt_string = "in";
564 break;
565 case 2:
566 tt_string = "out";
567 break;
568 case 3:
569 tt_string = "setup";
570 break;
571 default:
572 tt_string = "unknown (weird)";
573 }
574
575 printk("\n USB_SB_Desc at 0x%08lx\n", (unsigned long)sb);
576 printk(" command : 0x%04x\n", sb->command);
577 printk(" rem : %d\n", (sb->command & 0x3f00) >> 8);
578 printk(" full : %d\n", (sb->command & 0x40) >> 6);
579 printk(" tt : %d (%s)\n", tt, tt_string);
580 printk(" intr : %d\n", (sb->command & 0x8) >> 3);
581 printk(" eot : %d\n", (sb->command & 0x2) >> 1);
582 printk(" eol : %d\n", sb->command & 0x1);
583 printk(" sw_len : 0x%04x (%d)\n", sb->sw_len, sb->sw_len);
584 printk(" next : 0x%08lx\n", sb->next);
585 printk(" buf : 0x%08lx\n\n", sb->buf);
586}
587
588
589static void __dump_ep_desc(volatile USB_EP_Desc_t *ep)
590{
591 printk("\nUSB_EP_Desc at 0x%08lx\n", (unsigned long)ep);
592 printk(" command : 0x%04x\n", ep->command);
593 printk(" ep_id : %d\n", (ep->command & 0x1f00) >> 8);
594 printk(" enable : %d\n", (ep->command & 0x10) >> 4);
595 printk(" intr : %d\n", (ep->command & 0x8) >> 3);
596 printk(" eof : %d\n", (ep->command & 0x2) >> 1);
597 printk(" eol : %d\n", ep->command & 0x1);
598 printk(" hw_len : 0x%04x (%d)\n", ep->hw_len, ep->hw_len);
599 printk(" next : 0x%08lx\n", ep->next);
600 printk(" sub : 0x%08lx\n\n", ep->sub);
601}
602
603static inline void __dump_ep_list(int pipe_type)
604{
605 volatile USB_EP_Desc_t *ep;
606 volatile USB_EP_Desc_t *first_ep;
607 volatile USB_SB_Desc_t *sb;
608
609 switch (pipe_type)
610 {
611 case PIPE_BULK:
612 first_ep = &TxBulkEPList[0];
613 break;
614 case PIPE_CONTROL:
615 first_ep = &TxCtrlEPList[0];
616 break;
617 case PIPE_INTERRUPT:
618 first_ep = &TxIntrEPList[0];
619 break;
620 case PIPE_ISOCHRONOUS:
621 first_ep = &TxIsocEPList[0];
622 break;
623 default:
624 warn("Cannot dump unknown traffic type");
625 return;
626 }
627 ep = first_ep;
628
629 printk("\n\nDumping EP list...\n\n");
630
631 do {
632 __dump_ep_desc(ep);
633 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
634 sb = ep->sub ? phys_to_virt(ep->sub) : 0;
635 while (sb) {
636 __dump_sb_desc(sb);
637 sb = sb->next ? phys_to_virt(sb->next) : 0;
638 }
639 ep = (volatile USB_EP_Desc_t *)(phys_to_virt(ep->next));
640
641 } while (ep != first_ep);
642}
643
644static inline void __dump_ept_data(int epid)
645{
646 unsigned long flags;
647 __u32 r_usb_ept_data;
648
649 if (epid < 0 || epid > 31) {
650 printk("Cannot dump ept data for invalid epid %d\n", epid);
651 return;
652 }
653
654 save_flags(flags);
655 cli();
656 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
657 nop();
658 r_usb_ept_data = *R_USB_EPT_DATA;
659 restore_flags(flags);
660
661 printk("\nR_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
662 if (r_usb_ept_data == 0) {
663 /* No need for more detailed printing. */
664 return;
665 }
666 printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
667 printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
668 printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
669 printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
670 printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
671 printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
672 printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
673 printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
674 printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
675 printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
676 printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
677 printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
678}
679
680static inline void __dump_ept_data_list(void)
681{
682 int i;
683
684 printk("Dumping the whole R_USB_EPT_DATA list\n");
685
686 for (i = 0; i < 32; i++) {
687 __dump_ept_data(i);
688 }
689}
690#ifdef USB_DEBUG_DESC
691#define dump_in_desc(...) __dump_in_desc(...)
692#define dump_sb_desc(...) __dump_sb_desc(...)
693#define dump_ep_desc(...) __dump_ep_desc(...)
694#else
695#define dump_in_desc(...) do {} while (0)
696#define dump_sb_desc(...) do {} while (0)
697#define dump_ep_desc(...) do {} while (0)
698#endif
699
700#ifdef USB_DEBUG_URB
701#define dump_urb(x) __dump_urb(x)
702#else
703#define dump_urb(x) do {} while (0)
704#endif
705
706static void init_rx_buffers(void)
707{
708 int i;
709
710 DBFENTER;
711
712 for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
713 RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
714 RxDescList[i].command = 0;
715 RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
716 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
717 RxDescList[i].hw_len = 0;
718 RxDescList[i].status = 0;
719
720 /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as USB_IN_Desc
721 for the relevant fields.) */
722 prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
723
724 }
725
726 RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
727 RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
728 RxDescList[i].next = virt_to_phys(&RxDescList[0]);
729 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
730 RxDescList[i].hw_len = 0;
731 RxDescList[i].status = 0;
732
733 myNextRxDesc = &RxDescList[0];
734 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
735 myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
736
737 *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
738 *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
739
740 DBFEXIT;
741}
742
743static void init_tx_bulk_ep(void)
744{
745 int i;
746
747 DBFENTER;
748
749 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
750 CHECK_ALIGN(&TxBulkEPList[i]);
751 TxBulkEPList[i].hw_len = 0;
752 TxBulkEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
753 TxBulkEPList[i].sub = 0;
754 TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[i + 1]);
755
756 /* Initiate two EPs, disabled and with the eol flag set. No need for any
757 preserved epid. */
758
759 /* The first one has the intr flag set so we get an interrupt when the DMA
760 channel is about to become disabled. */
761 CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
762 TxBulkDummyEPList[i][0].hw_len = 0;
763 TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
764 IO_STATE(USB_EP_command, eol, yes) |
765 IO_STATE(USB_EP_command, intr, yes));
766 TxBulkDummyEPList[i][0].sub = 0;
767 TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
768
769 /* The second one. */
770 CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
771 TxBulkDummyEPList[i][1].hw_len = 0;
772 TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
773 IO_STATE(USB_EP_command, eol, yes));
774 TxBulkDummyEPList[i][1].sub = 0;
775 /* The last dummy's next pointer is the same as the current EP's next pointer. */
776 TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
777 }
778
779 /* Configure the last one. */
780 CHECK_ALIGN(&TxBulkEPList[i]);
781 TxBulkEPList[i].hw_len = 0;
782 TxBulkEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) |
783 IO_FIELD(USB_EP_command, epid, i));
784 TxBulkEPList[i].sub = 0;
785 TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[0]);
786
787 /* No need configuring dummy EPs for the last one as it will never be used for
788 bulk traffic (i == INVALD_EPID at this point). */
789
790 /* Set up to start on the last EP so we will enable it when inserting traffic
791 for the first time (imitating the situation where the DMA has stopped
792 because there was no more traffic). */
793 *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
794 /* No point in starting the bulk channel yet.
795 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
796 DBFEXIT;
797}
798
799static void init_tx_ctrl_ep(void)
800{
801 int i;
802
803 DBFENTER;
804
805 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
806 CHECK_ALIGN(&TxCtrlEPList[i]);
807 TxCtrlEPList[i].hw_len = 0;
808 TxCtrlEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
809 TxCtrlEPList[i].sub = 0;
810 TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[i + 1]);
811 }
812
813 CHECK_ALIGN(&TxCtrlEPList[i]);
814 TxCtrlEPList[i].hw_len = 0;
815 TxCtrlEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) |
816 IO_FIELD(USB_EP_command, epid, i));
817
818 TxCtrlEPList[i].sub = 0;
819 TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[0]);
820
821 *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[0]);
822 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
823
824 DBFEXIT;
825}
826
827
828static void init_tx_intr_ep(void)
829{
830 int i;
831
832 DBFENTER;
833
834 /* Read comment at zout_buffer declaration for an explanation to this. */
835 TxIntrSB_zout.sw_len = 1;
836 TxIntrSB_zout.next = 0;
837 TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
838 TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
839 IO_STATE(USB_SB_command, tt, zout) |
840 IO_STATE(USB_SB_command, full, yes) |
841 IO_STATE(USB_SB_command, eot, yes) |
842 IO_STATE(USB_SB_command, eol, yes));
843
844 for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
845 CHECK_ALIGN(&TxIntrEPList[i]);
846 TxIntrEPList[i].hw_len = 0;
847 TxIntrEPList[i].command =
848 (IO_STATE(USB_EP_command, eof, yes) |
849 IO_STATE(USB_EP_command, enable, yes) |
850 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
851 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
852 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
853 }
854
855 CHECK_ALIGN(&TxIntrEPList[i]);
856 TxIntrEPList[i].hw_len = 0;
857 TxIntrEPList[i].command =
858 (IO_STATE(USB_EP_command, eof, yes) |
859 IO_STATE(USB_EP_command, eol, yes) |
860 IO_STATE(USB_EP_command, enable, yes) |
861 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
862 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
863 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
864
865 *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
866 *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
867 DBFEXIT;
868}
869
870static void init_tx_isoc_ep(void)
871{
872 int i;
873
874 DBFENTER;
875
876 /* Read comment at zout_buffer declaration for an explanation to this. */
877 TxIsocSB_zout.sw_len = 1;
878 TxIsocSB_zout.next = 0;
879 TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
880 TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
881 IO_STATE(USB_SB_command, tt, zout) |
882 IO_STATE(USB_SB_command, full, yes) |
883 IO_STATE(USB_SB_command, eot, yes) |
884 IO_STATE(USB_SB_command, eol, yes));
885
886 /* The last isochronous EP descriptor is a dummy. */
887
888 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
889 CHECK_ALIGN(&TxIsocEPList[i]);
890 TxIsocEPList[i].hw_len = 0;
891 TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
892 TxIsocEPList[i].sub = 0;
893 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
894 }
895
896 CHECK_ALIGN(&TxIsocEPList[i]);
897 TxIsocEPList[i].hw_len = 0;
898
899 /* Must enable the last EP descr to get eof interrupt. */
900 TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
901 IO_STATE(USB_EP_command, eof, yes) |
902 IO_STATE(USB_EP_command, eol, yes) |
903 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
904 TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
905 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
906
907 *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
908 *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
909
910 DBFEXIT;
911}
912
913static void etrax_usb_unlink_intr_urb(struct urb *urb)
914{
915 volatile USB_EP_Desc_t *first_ep; /* First EP in the list. */
916 volatile USB_EP_Desc_t *curr_ep; /* Current EP, the iterator. */
917 volatile USB_EP_Desc_t *next_ep; /* The EP after current. */
918 volatile USB_EP_Desc_t *unlink_ep; /* The one we should remove from the list. */
919
920 int epid;
921
922 /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the List". */
923
924 DBFENTER;
925
926 epid = ((etrax_urb_priv_t *)urb->hcpriv)->epid;
927
928 first_ep = &TxIntrEPList[0];
929 curr_ep = first_ep;
930
931
932 /* Note that this loop removes all EP descriptors with this epid. This assumes
933 that all EP descriptors belong to the one and only urb for this epid. */
934
935 do {
936 next_ep = (USB_EP_Desc_t *)phys_to_virt(curr_ep->next);
937
938 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
939
940 dbg_intr("Found EP to unlink for epid %d", epid);
941
942 /* This is the one we should unlink. */
943 unlink_ep = next_ep;
944
945 /* Actually unlink the EP from the DMA list. */
946 curr_ep->next = unlink_ep->next;
947
948 /* Wait until the DMA is no longer at this descriptor. */
949 while (*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep));
950
951 /* Now we are free to remove it and its SB descriptor.
952 Note that it is assumed here that there is only one sb in the
953 sb list for this ep. */
954 kmem_cache_free(usb_desc_cache, phys_to_virt(unlink_ep->sub));
955 kmem_cache_free(usb_desc_cache, (USB_EP_Desc_t *)unlink_ep);
956 }
957
958 curr_ep = phys_to_virt(curr_ep->next);
959
960 } while (curr_ep != first_ep);
961 urb->hcpriv = NULL;
962}
963
964void etrax_usb_do_intr_recover(int epid)
965{
966 USB_EP_Desc_t *first_ep, *tmp_ep;
967
968 DBFENTER;
969
970 first_ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB2_EP);
971 tmp_ep = first_ep;
972
973 /* What this does is simply to walk the list of interrupt
974 ep descriptors and enable those that are disabled. */
975
976 do {
977 if (IO_EXTRACT(USB_EP_command, epid, tmp_ep->command) == epid &&
978 !(tmp_ep->command & IO_MASK(USB_EP_command, enable))) {
979 tmp_ep->command |= IO_STATE(USB_EP_command, enable, yes);
980 }
981
982 tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next);
983
984 } while (tmp_ep != first_ep);
985
986
987 DBFEXIT;
988}
989
990static int etrax_rh_unlink_urb (struct urb *urb)
991{
992 etrax_hc_t *hc;
993
994 DBFENTER;
995
996 hc = urb->dev->bus->hcpriv;
997
998 if (hc->rh.urb == urb) {
999 hc->rh.send = 0;
1000 del_timer(&hc->rh.rh_int_timer);
1001 }
1002
1003 DBFEXIT;
1004 return 0;
1005}
1006
1007static void etrax_rh_send_irq(struct urb *urb)
1008{
1009 __u16 data = 0;
1010 etrax_hc_t *hc = urb->dev->bus->hcpriv;
1011 DBFENTER;
1012
1013/*
1014 dbg_rh("R_USB_FM_NUMBER : 0x%08X", *R_USB_FM_NUMBER);
1015 dbg_rh("R_USB_FM_REMAINING: 0x%08X", *R_USB_FM_REMAINING);
1016*/
1017
1018 data |= (hc->rh.wPortChange_1) ? (1 << 1) : 0;
1019 data |= (hc->rh.wPortChange_2) ? (1 << 2) : 0;
1020
1021 *((__u16 *)urb->transfer_buffer) = cpu_to_le16(data);
1022 /* FIXME: Why is actual_length set to 1 when data is 2 bytes?
1023 Since only 1 byte is used, why not declare data as __u8? */
1024 urb->actual_length = 1;
1025 urb->status = 0;
1026
1027 if (hc->rh.send && urb->complete) {
1028 dbg_rh("wPortChange_1: 0x%04X", hc->rh.wPortChange_1);
1029 dbg_rh("wPortChange_2: 0x%04X", hc->rh.wPortChange_2);
1030
1031 urb->complete(urb, NULL);
1032 }
1033
1034 DBFEXIT;
1035}
1036
1037static void etrax_rh_init_int_timer(struct urb *urb)
1038{
1039 etrax_hc_t *hc;
1040
1041 DBFENTER;
1042
1043 hc = urb->dev->bus->hcpriv;
1044 hc->rh.interval = urb->interval;
1045 init_timer(&hc->rh.rh_int_timer);
1046 hc->rh.rh_int_timer.function = etrax_rh_int_timer_do;
1047 hc->rh.rh_int_timer.data = (unsigned long)urb;
1048 /* FIXME: Is the jiffies resolution enough? All intervals < 10 ms will be mapped
1049 to 0, and the rest to the nearest lower 10 ms. */
1050 hc->rh.rh_int_timer.expires = jiffies + ((HZ * hc->rh.interval) / 1000);
1051 add_timer(&hc->rh.rh_int_timer);
1052
1053 DBFEXIT;
1054}
1055
1056static void etrax_rh_int_timer_do(unsigned long ptr)
1057{
1058 struct urb *urb;
1059 etrax_hc_t *hc;
1060
1061 DBFENTER;
1062
1063 urb = (struct urb*)ptr;
1064 hc = urb->dev->bus->hcpriv;
1065
1066 if (hc->rh.send) {
1067 etrax_rh_send_irq(urb);
1068 }
1069
1070 DBFEXIT;
1071}
1072
1073static int etrax_usb_setup_epid(struct urb *urb)
1074{
1075 int epid;
1076 char devnum, endpoint, out_traffic, slow;
1077 int maxlen;
1078 unsigned long flags;
1079
1080 DBFENTER;
1081
1082 epid = etrax_usb_lookup_epid(urb);
1083 if ((epid != -1)){
1084 /* An epid that fits this urb has been found. */
1085 DBFEXIT;
1086 return epid;
1087 }
1088
1089 /* We must find and initiate a new epid for this urb. */
1090 epid = etrax_usb_allocate_epid();
1091
1092 if (epid == -1) {
1093 /* Failed to allocate a new epid. */
1094 DBFEXIT;
1095 return epid;
1096 }
1097
1098 /* We now have a new epid to use. Initiate it. */
1099 set_bit(epid, (void *)&epid_usage_bitmask);
1100
1101 devnum = usb_pipedevice(urb->pipe);
1102 endpoint = usb_pipeendpoint(urb->pipe);
1103 slow = usb_pipeslow(urb->pipe);
1104 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1105 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1106 /* We want both IN and OUT control traffic to be put on the same EP/SB list. */
1107 out_traffic = 1;
1108 } else {
1109 out_traffic = usb_pipeout(urb->pipe);
1110 }
1111
1112 save_flags(flags);
1113 cli();
1114
1115 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
1116 nop();
1117
1118 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1119 *R_USB_EPT_DATA_ISO = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
1120 /* FIXME: Change any to the actual port? */
1121 IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
1122 IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
1123 IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
1124 IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
1125 } else {
1126 *R_USB_EPT_DATA = IO_STATE(R_USB_EPT_DATA, valid, yes) |
1127 IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
1128 /* FIXME: Change any to the actual port? */
1129 IO_STATE(R_USB_EPT_DATA, port, any) |
1130 IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
1131 IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
1132 IO_FIELD(R_USB_EPT_DATA, dev, devnum);
1133 }
1134
1135 restore_flags(flags);
1136
1137 if (out_traffic) {
1138 set_bit(epid, (void *)&epid_out_traffic);
1139 } else {
1140 clear_bit(epid, (void *)&epid_out_traffic);
1141 }
1142
1143 dbg_epid("Setting up epid %d with devnum %d, endpoint %d and max_len %d (%s)",
1144 epid, devnum, endpoint, maxlen, out_traffic ? "OUT" : "IN");
1145
1146 DBFEXIT;
1147 return epid;
1148}
1149
1150static void etrax_usb_free_epid(int epid)
1151{
1152 unsigned long flags;
1153
1154 DBFENTER;
1155
1156 if (!test_bit(epid, (void *)&epid_usage_bitmask)) {
1157 warn("Trying to free unused epid %d", epid);
1158 DBFEXIT;
1159 return;
1160 }
1161
1162 save_flags(flags);
1163 cli();
1164
1165 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
1166 nop();
1167 while (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold));
1168 /* This will, among other things, set the valid field to 0. */
1169 *R_USB_EPT_DATA = 0;
1170 restore_flags(flags);
1171
1172 clear_bit(epid, (void *)&epid_usage_bitmask);
1173
1174
1175 dbg_epid("Freed epid %d", epid);
1176
1177 DBFEXIT;
1178}
1179
1180static int etrax_usb_lookup_epid(struct urb *urb)
1181{
1182 int i;
1183 __u32 data;
1184 char devnum, endpoint, slow, out_traffic;
1185 int maxlen;
1186 unsigned long flags;
1187
1188 DBFENTER;
1189
1190 devnum = usb_pipedevice(urb->pipe);
1191 endpoint = usb_pipeendpoint(urb->pipe);
1192 slow = usb_pipeslow(urb->pipe);
1193 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1194 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1195 /* We want both IN and OUT control traffic to be put on the same EP/SB list. */
1196 out_traffic = 1;
1197 } else {
1198 out_traffic = usb_pipeout(urb->pipe);
1199 }
1200
1201 /* Step through att epids. */
1202 for (i = 0; i < NBR_OF_EPIDS; i++) {
1203 if (test_bit(i, (void *)&epid_usage_bitmask) &&
1204 test_bit(i, (void *)&epid_out_traffic) == out_traffic) {
1205
1206 save_flags(flags);
1207 cli();
1208 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, i);
1209 nop();
1210
1211 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1212 data = *R_USB_EPT_DATA_ISO;
1213 restore_flags(flags);
1214
1215 if ((IO_MASK(R_USB_EPT_DATA_ISO, valid) & data) &&
1216 (IO_EXTRACT(R_USB_EPT_DATA_ISO, dev, data) == devnum) &&
1217 (IO_EXTRACT(R_USB_EPT_DATA_ISO, ep, data) == endpoint) &&
1218 (IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len, data) == maxlen)) {
1219 dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)",
1220 i, devnum, endpoint, out_traffic ? "OUT" : "IN");
1221 DBFEXIT;
1222 return i;
1223 }
1224 } else {
1225 data = *R_USB_EPT_DATA;
1226 restore_flags(flags);
1227
1228 if ((IO_MASK(R_USB_EPT_DATA, valid) & data) &&
1229 (IO_EXTRACT(R_USB_EPT_DATA, dev, data) == devnum) &&
1230 (IO_EXTRACT(R_USB_EPT_DATA, ep, data) == endpoint) &&
1231 (IO_EXTRACT(R_USB_EPT_DATA, low_speed, data) == slow) &&
1232 (IO_EXTRACT(R_USB_EPT_DATA, max_len, data) == maxlen)) {
1233 dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)",
1234 i, devnum, endpoint, out_traffic ? "OUT" : "IN");
1235 DBFEXIT;
1236 return i;
1237 }
1238 }
1239 }
1240 }
1241
1242 DBFEXIT;
1243 return -1;
1244}
1245
1246static int etrax_usb_allocate_epid(void)
1247{
1248 int i;
1249
1250 DBFENTER;
1251
1252 for (i = 0; i < NBR_OF_EPIDS; i++) {
1253 if (!test_bit(i, (void *)&epid_usage_bitmask)) {
1254 dbg_epid("Found free epid %d", i);
1255 DBFEXIT;
1256 return i;
1257 }
1258 }
1259
1260 dbg_epid("Found no free epids");
1261 DBFEXIT;
1262 return -1;
1263}
1264
1265static int etrax_usb_submit_urb(struct urb *urb, int mem_flags)
1266{
1267 etrax_hc_t *hc;
1268 int ret = -EINVAL;
1269
1270 DBFENTER;
1271
1272 if (!urb->dev || !urb->dev->bus) {
1273 return -ENODEV;
1274 }
1275 if (usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)) <= 0) {
1276 info("Submit urb to pipe with maxpacketlen 0, pipe 0x%X\n", urb->pipe);
1277 return -EMSGSIZE;
1278 }
1279
1280 if (urb->timeout) {
1281 /* FIXME. */
1282 warn("urb->timeout specified, ignoring.");
1283 }
1284
1285 hc = (etrax_hc_t*)urb->dev->bus->hcpriv;
1286
1287 if (usb_pipedevice(urb->pipe) == hc->rh.devnum) {
1288 /* This request is for the Virtual Root Hub. */
1289 ret = etrax_rh_submit_urb(urb);
1290
1291 } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
1292
1293 ret = etrax_usb_submit_bulk_urb(urb);
1294
1295 } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1296
1297 ret = etrax_usb_submit_ctrl_urb(urb);
1298
1299 } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
1300 int bustime;
1301
1302 if (urb->bandwidth == 0) {
1303 bustime = usb_check_bandwidth(urb->dev, urb);
1304 if (bustime < 0) {
1305 ret = bustime;
1306 } else {
1307 ret = etrax_usb_submit_intr_urb(urb);
1308 if (ret == 0)
1309 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1310 }
1311 } else {
1312 /* Bandwidth already set. */
1313 ret = etrax_usb_submit_intr_urb(urb);
1314 }
1315
1316 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1317 int bustime;
1318
1319 if (urb->bandwidth == 0) {
1320 bustime = usb_check_bandwidth(urb->dev, urb);
1321 if (bustime < 0) {
1322 ret = bustime;
1323 } else {
1324 ret = etrax_usb_submit_isoc_urb(urb);
1325 if (ret == 0)
1326 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1327 }
1328 } else {
1329 /* Bandwidth already set. */
1330 ret = etrax_usb_submit_isoc_urb(urb);
1331 }
1332 }
1333
1334 DBFEXIT;
1335
1336 if (ret != 0)
1337 printk("Submit URB error %d\n", ret);
1338
1339 return ret;
1340}
1341
1342static int etrax_usb_unlink_urb(struct urb *urb, int status)
1343{
1344 etrax_hc_t *hc;
1345 etrax_urb_priv_t *urb_priv;
1346 int epid;
1347 unsigned int flags;
1348
1349 DBFENTER;
1350
1351 if (!urb) {
1352 return -EINVAL;
1353 }
1354
1355 /* Disable interrupts here since a descriptor interrupt for the isoc epid
1356 will modify the sb list. This could possibly be done more granular, but
1357 unlink_urb should not be used frequently anyway.
1358 */
1359
1360 save_flags(flags);
1361 cli();
1362
1363 if (!urb->dev || !urb->dev->bus) {
1364 restore_flags(flags);
1365 return -ENODEV;
1366 }
1367 if (!urb->hcpriv) {
1368 /* This happens if a device driver calls unlink on an urb that
1369 was never submitted (lazy driver) or if the urb was completed
1370 while unlink was being called. */
1371 restore_flags(flags);
1372 return 0;
1373 }
1374 if (urb->transfer_flags & URB_ASYNC_UNLINK) {
1375 /* FIXME. */
1376 /* If URB_ASYNC_UNLINK is set:
1377 unlink
1378 move to a separate urb list
1379 call complete at next sof with ECONNRESET
1380
1381 If not:
1382 wait 1 ms
1383 unlink
1384 call complete with ENOENT
1385 */
1386 warn("URB_ASYNC_UNLINK set, ignoring.");
1387 }
1388
1389 /* One might think that urb->status = -EINPROGRESS would be a requirement for unlinking,
1390 but that doesn't work for interrupt and isochronous traffic since they are completed
1391 repeatedly, and urb->status is set then. That may in itself be a bug though. */
1392
1393 hc = urb->dev->bus->hcpriv;
1394 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1395 epid = urb_priv->epid;
1396
1397 /* Set the urb status (synchronous unlink). */
1398 urb->status = -ENOENT;
1399 urb_priv->urb_state = UNLINK;
1400
1401 if (usb_pipedevice(urb->pipe) == hc->rh.devnum) {
1402 int ret;
1403 ret = etrax_rh_unlink_urb(urb);
1404 DBFEXIT;
1405 restore_flags(flags);
1406 return ret;
1407
1408 } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
1409
1410 dbg_bulk("Unlink of bulk urb (0x%lx)", (unsigned long)urb);
1411
1412 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
1413 /* The EP was enabled, disable it and wait. */
1414 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
1415
1416 /* Ah, the luxury of busy-wait. */
1417 while (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[epid]));
1418 }
1419 /* Kicking dummy list out of the party. */
1420 TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
1421
1422 } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1423
1424 dbg_ctrl("Unlink of ctrl urb (0x%lx)", (unsigned long)urb);
1425
1426 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
1427 /* The EP was enabled, disable it and wait. */
1428 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
1429
1430 /* Ah, the luxury of busy-wait. */
1431 while (*R_DMA_CH8_SUB1_EP == virt_to_phys(&TxCtrlEPList[epid]));
1432 }
1433
1434 } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
1435
1436 dbg_intr("Unlink of intr urb (0x%lx)", (unsigned long)urb);
1437
1438 /* Separate function because it's a tad more complicated. */
1439 etrax_usb_unlink_intr_urb(urb);
1440
1441 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1442
1443 dbg_isoc("Unlink of isoc urb (0x%lx)", (unsigned long)urb);
1444
1445 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
1446 /* The EP was enabled, disable it and wait. */
1447 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
1448
1449 /* Ah, the luxury of busy-wait. */
1450 while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid]));
1451 }
1452 }
1453
1454 /* Note that we need to remove the urb from the urb list *before* removing its SB
1455 descriptors. (This means that the isoc eof handler might get a null urb when we
1456 are unlinking the last urb.) */
1457
1458 if (usb_pipetype(urb->pipe) == PIPE_BULK) {
1459
1460 urb_list_del(urb, epid);
1461 TxBulkEPList[epid].sub = 0;
1462 etrax_remove_from_sb_list(urb);
1463
1464 } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1465
1466 urb_list_del(urb, epid);
1467 TxCtrlEPList[epid].sub = 0;
1468 etrax_remove_from_sb_list(urb);
1469
1470 } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
1471
1472 urb_list_del(urb, epid);
1473 /* Sanity check (should never happen). */
1474 assert(urb_list_empty(epid));
1475
1476 /* Release allocated bandwidth. */
1477 usb_release_bandwidth(urb->dev, urb, 0);
1478
1479 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1480
1481 if (usb_pipeout(urb->pipe)) {
1482
1483 USB_SB_Desc_t *iter_sb, *prev_sb, *next_sb;
1484
1485 if (__urb_list_entry(urb, epid)) {
1486
1487 urb_list_del(urb, epid);
1488 iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
1489 prev_sb = 0;
1490 while (iter_sb && (iter_sb != urb_priv->first_sb)) {
1491 prev_sb = iter_sb;
1492 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
1493 }
1494
1495 if (iter_sb == 0) {
1496 /* Unlink of the URB currently being transmitted. */
1497 prev_sb = 0;
1498 iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
1499 }
1500
1501 while (iter_sb && (iter_sb != urb_priv->last_sb)) {
1502 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
1503 }
1504 if (iter_sb) {
1505 next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
1506 } else {
1507 /* This should only happen if the DMA has completed
1508 processing the SB list for this EP while interrupts
1509 are disabled. */
1510 dbg_isoc("Isoc urb not found, already sent?");
1511 next_sb = 0;
1512 }
1513 if (prev_sb) {
1514 prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
1515 } else {
1516 TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
1517 }
1518
1519 etrax_remove_from_sb_list(urb);
1520 if (urb_list_empty(epid)) {
1521 TxIsocEPList[epid].sub = 0;
1522 dbg_isoc("Last isoc out urb epid %d", epid);
1523 } else if (next_sb || prev_sb) {
1524 dbg_isoc("Re-enable isoc out epid %d", epid);
1525
1526 TxIsocEPList[epid].hw_len = 0;
1527 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
1528 } else {
1529 TxIsocEPList[epid].sub = 0;
1530 dbg_isoc("URB list non-empty and no SB list, EP disabled");
1531 }
1532 } else {
1533 dbg_isoc("Urb 0x%p not found, completed already?", urb);
1534 }
1535 } else {
1536
1537 urb_list_del(urb, epid);
1538
1539 /* For in traffic there is only one SB descriptor for each EP even
1540 though there may be several urbs (all urbs point at the same SB). */
1541 if (urb_list_empty(epid)) {
1542 /* No more urbs, remove the SB. */
1543 TxIsocEPList[epid].sub = 0;
1544 etrax_remove_from_sb_list(urb);
1545 } else {
1546 TxIsocEPList[epid].hw_len = 0;
1547 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
1548 }
1549 }
1550 /* Release allocated bandwidth. */
1551 usb_release_bandwidth(urb->dev, urb, 1);
1552 }
1553 /* Free the epid if urb list is empty. */
1554 if (urb_list_empty(epid)) {
1555 etrax_usb_free_epid(epid);
1556 }
1557 restore_flags(flags);
1558
1559 /* Must be done before calling completion handler. */
1560 kfree(urb_priv);
1561 urb->hcpriv = 0;
1562
1563 if (urb->complete) {
1564 urb->complete(urb, NULL);
1565 }
1566
1567 DBFEXIT;
1568 return 0;
1569}
1570
1571static int etrax_usb_get_frame_number(struct usb_device *usb_dev)
1572{
1573 DBFENTER;
1574 DBFEXIT;
1575 return (*R_USB_FM_NUMBER & 0x7ff);
1576}
1577
1578static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc, struct pt_regs *regs)
1579{
1580 DBFENTER;
1581
1582 /* This interrupt handler could be used when unlinking EP descriptors. */
1583
1584 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
1585 USB_EP_Desc_t *ep;
1586
1587 //dbg_bulk("dma8_sub0_descr (BULK) intr.");
1588
1589 /* It should be safe clearing the interrupt here, since we don't expect to get a new
1590 one until we restart the bulk channel. */
1591 *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
1592
1593 /* Wait while the DMA is running (though we don't expect it to be). */
1594 while (*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd));
1595
1596 /* Advance the DMA to the next EP descriptor. */
1597 ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
1598
1599 //dbg_bulk("descr intr: DMA is at 0x%lx", (unsigned long)ep);
1600
1601 /* ep->next is already a physical address; no need for a virt_to_phys. */
1602 *R_DMA_CH8_SUB0_EP = ep->next;
1603
1604 /* Start the DMA bulk channel again. */
1605 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
1606 }
1607 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
1608 struct urb *urb;
1609 int epid;
1610 etrax_urb_priv_t *urb_priv;
1611 unsigned long int flags;
1612
1613 dbg_ctrl("dma8_sub1_descr (CTRL) intr.");
1614 *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
1615
1616 /* The complete callback gets called so we cli. */
1617 save_flags(flags);
1618 cli();
1619
1620 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
1621 if ((TxCtrlEPList[epid].sub == 0) ||
1622 (epid == DUMMY_EPID) ||
1623 (epid == INVALID_EPID)) {
1624 /* Nothing here to see. */
1625 continue;
1626 }
1627
1628 /* Get the first urb (if any). */
1629 urb = urb_list_first(epid);
1630
1631 if (urb) {
1632
1633 /* Sanity check. */
1634 assert(usb_pipetype(urb->pipe) == PIPE_CONTROL);
1635
1636 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1637 assert(urb_priv);
1638
1639 if (urb_priv->urb_state == WAITING_FOR_DESCR_INTR) {
1640 assert(!(TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
1641
1642 etrax_usb_complete_urb(urb, 0);
1643 }
1644 }
1645 }
1646 restore_flags(flags);
1647 }
1648 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
1649 dbg_intr("dma8_sub2_descr (INTR) intr.");
1650 *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
1651 }
1652 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
1653 struct urb *urb;
1654 int epid;
1655 int epid_done;
1656 etrax_urb_priv_t *urb_priv;
1657 USB_SB_Desc_t *sb_desc;
1658
1659 usb_isoc_complete_data_t *comp_data = NULL;
1660
1661 /* One or more isoc out transfers are done. */
1662 dbg_isoc("dma8_sub3_descr (ISOC) intr.");
1663
1664 /* For each isoc out EP search for the first sb_desc with the intr flag
1665 set. This descriptor must be the last packet from an URB. Then
1666 traverse the URB list for the EP until the URB with urb_priv->last_sb
1667 matching the intr-marked sb_desc is found. All URBs before this have
1668 been sent.
1669 */
1670
1671 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
1672 /* Skip past epids with no SB lists, epids used for in traffic,
1673 and special (dummy, invalid) epids. */
1674 if ((TxIsocEPList[epid].sub == 0) ||
1675 (test_bit(epid, (void *)&epid_out_traffic) == 0) ||
1676 (epid == DUMMY_EPID) ||
1677 (epid == INVALID_EPID)) {
1678 /* Nothing here to see. */
1679 continue;
1680 }
1681 sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
1682
1683 /* Find the last descriptor of the currently active URB for this ep.
1684 This is the first descriptor in the sub list marked for a descriptor
1685 interrupt. */
1686 while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
1687 sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
1688 }
1689 assert(sb_desc);
1690
1691 dbg_isoc("Check epid %d, sub 0x%p, SB 0x%p",
1692 epid,
1693 phys_to_virt(TxIsocEPList[epid].sub),
1694 sb_desc);
1695
1696 epid_done = 0;
1697
1698 /* Get the first urb (if any). */
1699 urb = urb_list_first(epid);
1700 assert(urb);
1701
1702 while (urb && !epid_done) {
1703
1704 /* Sanity check. */
1705 assert(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
1706
1707 if (!usb_pipeout(urb->pipe)) {
1708 /* descr interrupts are generated only for out pipes. */
1709 epid_done = 1;
1710 continue;
1711 }
1712
1713 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1714 assert(urb_priv);
1715
1716 if (sb_desc != urb_priv->last_sb) {
1717
1718 /* This urb has been sent. */
1719 dbg_isoc("out URB 0x%p sent", urb);
1720
1721 urb_priv->urb_state = TRANSFER_DONE;
1722
1723 } else if ((sb_desc == urb_priv->last_sb) &&
1724 !(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
1725
1726 assert((sb_desc->command & IO_MASK(USB_SB_command, eol)) == IO_STATE(USB_SB_command, eol, yes));
1727 assert(sb_desc->next == 0);
1728
1729 dbg_isoc("out URB 0x%p last in list, epid disabled", urb);
1730 TxIsocEPList[epid].sub = 0;
1731 TxIsocEPList[epid].hw_len = 0;
1732 urb_priv->urb_state = TRANSFER_DONE;
1733
1734 epid_done = 1;
1735
1736 } else {
1737 epid_done = 1;
1738 }
1739 if (!epid_done) {
1740 urb = urb_list_next(urb, epid);
1741 }
1742 }
1743
1744 }
1745
1746 *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
1747
1748 comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
1749 assert(comp_data != NULL);
1750
1751 INIT_WORK(&comp_data->usb_bh, etrax_usb_isoc_descr_interrupt_bottom_half, comp_data);
1752 schedule_work(&comp_data->usb_bh);
1753 }
1754
1755 DBFEXIT;
1756 return IRQ_HANDLED;
1757}
1758
1759static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data)
1760{
1761 usb_isoc_complete_data_t *comp_data = (usb_isoc_complete_data_t*)data;
1762
1763 struct urb *urb;
1764 int epid;
1765 int epid_done;
1766 etrax_urb_priv_t *urb_priv;
1767
1768 DBFENTER;
1769
1770 dbg_isoc("dma8_sub3_descr (ISOC) bottom half.");
1771
1772 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
1773 unsigned long flags;
1774
1775 save_flags(flags);
1776 cli();
1777
1778 epid_done = 0;
1779
1780 /* The descriptor interrupt handler has marked all transmitted isoch. out
1781 URBs with TRANSFER_DONE. Now we traverse all epids and for all that
1782 have isoch. out traffic traverse its URB list and complete the
1783 transmitted URB.
1784 */
1785
1786 while (!epid_done) {
1787
1788 /* Get the first urb (if any). */
1789 urb = urb_list_first(epid);
1790 if (urb == 0) {
1791 epid_done = 1;
1792 continue;
1793 }
1794
1795 if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) {
1796 epid_done = 1;
1797 continue;
1798 }
1799
1800 if (!usb_pipeout(urb->pipe)) {
1801 /* descr interrupts are generated only for out pipes. */
1802 epid_done = 1;
1803 continue;
1804 }
1805
1806 dbg_isoc("Check epid %d, SB 0x%p", epid, (char*)TxIsocEPList[epid].sub);
1807
1808 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1809 assert(urb_priv);
1810
1811 if (urb_priv->urb_state == TRANSFER_DONE) {
1812 int i;
1813 struct usb_iso_packet_descriptor *packet;
1814
1815 /* This urb has been sent. */
1816 dbg_isoc("Completing isoc out URB 0x%p", urb);
1817
1818 for (i = 0; i < urb->number_of_packets; i++) {
1819 packet = &urb->iso_frame_desc[i];
1820 packet->status = 0;
1821 packet->actual_length = packet->length;
1822 }
1823
1824 etrax_usb_complete_isoc_urb(urb, 0);
1825
1826 if (urb_list_empty(epid)) {
1827 etrax_usb_free_epid(epid);
1828 epid_done = 1;
1829 }
1830 } else {
1831 epid_done = 1;
1832 }
1833 }
1834 restore_flags(flags);
1835
1836 }
1837 kmem_cache_free(isoc_compl_cache, comp_data);
1838
1839 DBFEXIT;
1840}
1841
1842
1843
1844static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc, struct pt_regs *regs)
1845{
1846 struct urb *urb;
1847 etrax_urb_priv_t *urb_priv;
1848 int epid = 0;
1849 unsigned long flags;
1850
1851 /* Isoc diagnostics. */
1852 static int curr_fm = 0;
1853 static int prev_fm = 0;
1854
1855 DBFENTER;
1856
1857 /* Clear this interrupt. */
1858 *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
1859
1860 /* Note that this while loop assumes that all packets span only
1861 one rx descriptor. */
1862
1863 /* The reason we cli here is that we call the driver's callback functions. */
1864 save_flags(flags);
1865 cli();
1866
1867 while (myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
1868
1869 epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
1870 urb = urb_list_first(epid);
1871
1872 //printk("eop for epid %d, first urb 0x%lx\n", epid, (unsigned long)urb);
1873
1874 if (!urb) {
1875 err("No urb for epid %d in rx interrupt", epid);
1876 __dump_ept_data(epid);
1877 goto skip_out;
1878 }
1879
1880 /* Note that we cannot indescriminately assert(usb_pipein(urb->pipe)) since
1881 ctrl pipes are not. */
1882
1883 if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
1884 __u32 r_usb_ept_data;
1885 int no_error = 0;
1886
1887 assert(test_bit(epid, (void *)&epid_usage_bitmask));
1888
1889 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
1890 nop();
1891 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1892 r_usb_ept_data = *R_USB_EPT_DATA_ISO;
1893
1894 if ((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
1895 (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
1896 (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
1897 /* Not an error, just a failure to receive an expected iso
1898 in packet in this frame. This is not documented
1899 in the designers reference.
1900 */
1901 no_error++;
1902 } else {
1903 warn("R_USB_EPT_DATA_ISO for epid %d = 0x%x", epid, r_usb_ept_data);
1904 }
1905 } else {
1906 r_usb_ept_data = *R_USB_EPT_DATA;
1907 warn("R_USB_EPT_DATA for epid %d = 0x%x", epid, r_usb_ept_data);
1908 }
1909
1910 if (!no_error){
1911 warn("error in rx desc->status, epid %d, first urb = 0x%lx",
1912 epid, (unsigned long)urb);
1913 __dump_in_desc(myNextRxDesc);
1914
1915 warn("R_USB_STATUS = 0x%x", *R_USB_STATUS);
1916
1917 /* Check that ept was disabled when error occurred. */
1918 switch (usb_pipetype(urb->pipe)) {
1919 case PIPE_BULK:
1920 assert(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
1921 break;
1922 case PIPE_CONTROL:
1923 assert(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
1924 break;
1925 case PIPE_INTERRUPT:
1926 assert(!(TxIntrEPList[epid].command & IO_MASK(USB_EP_command, enable)));
1927 break;
1928 case PIPE_ISOCHRONOUS:
1929 assert(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)));
1930 break;
1931 default:
1932 warn("etrax_usb_rx_interrupt: bad pipetype %d in urb 0x%p",
1933 usb_pipetype(urb->pipe),
1934 urb);
1935 }
1936 etrax_usb_complete_urb(urb, -EPROTO);
1937 goto skip_out;
1938 }
1939 }
1940
1941 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1942 assert(urb_priv);
1943
1944 if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
1945 (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
1946 (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
1947
1948 if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
1949 /* We get nodata for empty data transactions, and the rx descriptor's
1950 hw_len field is not valid in that case. No data to copy in other
1951 words. */
1952 } else {
1953 /* Make sure the data fits in the buffer. */
1954 assert(urb_priv->rx_offset + myNextRxDesc->hw_len
1955 <= urb->transfer_buffer_length);
1956
1957 memcpy(urb->transfer_buffer + urb_priv->rx_offset,
1958 phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
1959 urb_priv->rx_offset += myNextRxDesc->hw_len;
1960 }
1961
1962 if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
1963 if ((usb_pipetype(urb->pipe) == PIPE_CONTROL) &&
1964 ((TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)) ==
1965 IO_STATE(USB_EP_command, enable, yes))) {
1966 /* The EP is still enabled, so the OUT packet used to ack
1967 the in data is probably not processed yet. If the EP
1968 sub pointer has not moved beyond urb_priv->last_sb mark
1969 it for a descriptor interrupt and complete the urb in
1970 the descriptor interrupt handler.
1971 */
1972 USB_SB_Desc_t *sub = TxCtrlEPList[urb_priv->epid].sub ? phys_to_virt(TxCtrlEPList[urb_priv->epid].sub) : 0;
1973
1974 while ((sub != NULL) && (sub != urb_priv->last_sb)) {
1975 sub = sub->next ? phys_to_virt(sub->next) : 0;
1976 }
1977 if (sub != NULL) {
1978 /* The urb has not been fully processed. */
1979 urb_priv->urb_state = WAITING_FOR_DESCR_INTR;
1980 } else {
1981 warn("(CTRL) epid enabled and urb (0x%p) processed, ep->sub=0x%p", urb, (char*)TxCtrlEPList[urb_priv->epid].sub);
1982 etrax_usb_complete_urb(urb, 0);
1983 }
1984 } else {
1985 etrax_usb_complete_urb(urb, 0);
1986 }
1987 }
1988
1989 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1990
1991 struct usb_iso_packet_descriptor *packet;
1992
1993 if (urb_priv->urb_state == UNLINK) {
1994 info("Ignoring rx data for urb being unlinked.");
1995 goto skip_out;
1996 } else if (urb_priv->urb_state == NOT_STARTED) {
1997 info("What? Got rx data for urb that isn't started?");
1998 goto skip_out;
1999 }
2000
2001 packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
2002 packet->status = 0;
2003
2004 if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
2005 /* We get nodata for empty data transactions, and the rx descriptor's
2006 hw_len field is not valid in that case. We copy 0 bytes however to
2007 stay in synch. */
2008 packet->actual_length = 0;
2009 } else {
2010 packet->actual_length = myNextRxDesc->hw_len;
2011 /* Make sure the data fits in the buffer. */
2012 assert(packet->actual_length <= packet->length);
2013 memcpy(urb->transfer_buffer + packet->offset,
2014 phys_to_virt(myNextRxDesc->buf), packet->actual_length);
2015 }
2016
2017 /* Increment the packet counter. */
2018 urb_priv->isoc_packet_counter++;
2019
2020 /* Note that we don't care about the eot field in the rx descriptor's status.
2021 It will always be set for isoc traffic. */
2022 if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
2023
2024 /* Out-of-synch diagnostics. */
2025 curr_fm = (*R_USB_FM_NUMBER & 0x7ff);
2026 if (((prev_fm + urb_priv->isoc_packet_counter) % (0x7ff + 1)) != curr_fm) {
2027 /* This test is wrong, if there is more than one isoc
2028 in endpoint active it will always calculate wrong
2029 since prev_fm is shared by all endpoints.
2030
2031 FIXME Make this check per URB using urb->start_frame.
2032 */
2033 dbg_isoc("Out of synch? Previous frame = %d, current frame = %d",
2034 prev_fm, curr_fm);
2035
2036 }
2037 prev_fm = curr_fm;
2038
2039 /* Complete the urb with status OK. */
2040 etrax_usb_complete_isoc_urb(urb, 0);
2041 }
2042 }
2043
2044 skip_out:
2045
2046 /* DMA IN cache bug. Flush the DMA IN buffer from the cache. (struct etrax_dma_descr
2047 has the same layout as USB_IN_Desc for the relevant fields.) */
2048 prepare_rx_descriptor((struct etrax_dma_descr*)myNextRxDesc);
2049
2050 myPrevRxDesc = myNextRxDesc;
2051 myPrevRxDesc->command |= IO_MASK(USB_IN_command, eol);
2052 myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
2053 myLastRxDesc = myPrevRxDesc;
2054
2055 myNextRxDesc->status = 0;
2056 myNextRxDesc = phys_to_virt(myNextRxDesc->next);
2057 }
2058
2059 restore_flags(flags);
2060
2061 DBFEXIT;
2062
2063 return IRQ_HANDLED;
2064}
2065
2066
2067/* This function will unlink the SB descriptors associated with this urb. */
2068static int etrax_remove_from_sb_list(struct urb *urb)
2069{
2070 USB_SB_Desc_t *next_sb, *first_sb, *last_sb;
2071 etrax_urb_priv_t *urb_priv;
2072 int i = 0;
2073
2074 DBFENTER;
2075
2076 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2077 assert(urb_priv);
2078
2079 /* Just a sanity check. Since we don't fiddle with the DMA list the EP descriptor
2080 doesn't really need to be disabled, it's just that we expect it to be. */
2081 if (usb_pipetype(urb->pipe) == PIPE_BULK) {
2082 assert(!(TxBulkEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
2083 } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
2084 assert(!(TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
2085 }
2086
2087 first_sb = urb_priv->first_sb;
2088 last_sb = urb_priv->last_sb;
2089
2090 assert(first_sb);
2091 assert(last_sb);
2092
2093 while (first_sb != last_sb) {
2094 next_sb = (USB_SB_Desc_t *)phys_to_virt(first_sb->next);
2095 kmem_cache_free(usb_desc_cache, first_sb);
2096 first_sb = next_sb;
2097 i++;
2098 }
2099 kmem_cache_free(usb_desc_cache, last_sb);
2100 i++;
2101 dbg_sb("%d SB descriptors freed", i);
2102 /* Compare i with urb->number_of_packets for Isoc traffic.
2103 Should be same when calling unlink_urb */
2104
2105 DBFEXIT;
2106
2107 return i;
2108}
2109
2110static int etrax_usb_submit_bulk_urb(struct urb *urb)
2111{
2112 int epid;
2113 int empty;
2114 unsigned long flags;
2115 etrax_urb_priv_t *urb_priv;
2116
2117 DBFENTER;
2118
2119 /* Epid allocation, empty check and list add must be protected.
2120 Read about this in etrax_usb_submit_ctrl_urb. */
2121
2122 spin_lock_irqsave(&urb_list_lock, flags);
2123 epid = etrax_usb_setup_epid(urb);
2124 if (epid == -1) {
2125 DBFEXIT;
2126 spin_unlock_irqrestore(&urb_list_lock, flags);
2127 return -ENOMEM;
2128 }
2129 empty = urb_list_empty(epid);
2130 urb_list_add(urb, epid);
2131 spin_unlock_irqrestore(&urb_list_lock, flags);
2132
2133 dbg_bulk("Adding bulk %s urb 0x%lx to %s list, epid %d",
2134 usb_pipein(urb->pipe) ? "IN" : "OUT", (unsigned long)urb, empty ? "empty" : "", epid);
2135
2136 /* Mark the urb as being in progress. */
2137 urb->status = -EINPROGRESS;
2138
2139 /* Setup the hcpriv data. */
2140 urb_priv = kmalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
2141 assert(urb_priv != NULL);
2142 /* This sets rx_offset to 0. */
2143 memset(urb_priv, 0, sizeof(etrax_urb_priv_t));
2144 urb_priv->urb_state = NOT_STARTED;
2145 urb->hcpriv = urb_priv;
2146
2147 if (empty) {
2148 etrax_usb_add_to_bulk_sb_list(urb, epid);
2149 }
2150
2151 DBFEXIT;
2152
2153 return 0;
2154}
2155
2156static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid)
2157{
2158 USB_SB_Desc_t *sb_desc;
2159 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2160 unsigned long flags;
2161 char maxlen;
2162
2163 DBFENTER;
2164
2165 dbg_bulk("etrax_usb_add_to_bulk_sb_list, urb 0x%lx", (unsigned long)urb);
2166
2167 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2168
2169 sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2170 assert(sb_desc != NULL);
2171 memset(sb_desc, 0, sizeof(USB_SB_Desc_t));
2172
2173
2174 if (usb_pipeout(urb->pipe)) {
2175
2176 dbg_bulk("Grabbing bulk OUT, urb 0x%lx, epid %d", (unsigned long)urb, epid);
2177
2178 /* This is probably a sanity check of the bulk transaction length
2179 not being larger than 64 kB. */
2180 if (urb->transfer_buffer_length > 0xffff) {
2181 panic("urb->transfer_buffer_length > 0xffff");
2182 }
2183
2184 sb_desc->sw_len = urb->transfer_buffer_length;
2185
2186 /* The rem field is don't care if it's not a full-length transfer, so setting
2187 it shouldn't hurt. Also, rem isn't used for OUT traffic. */
2188 sb_desc->command = (IO_FIELD(USB_SB_command, rem, 0) |
2189 IO_STATE(USB_SB_command, tt, out) |
2190 IO_STATE(USB_SB_command, eot, yes) |
2191 IO_STATE(USB_SB_command, eol, yes));
2192
2193 /* The full field is set to yes, even if we don't actually check that this is
2194 a full-length transfer (i.e., that transfer_buffer_length % maxlen = 0).
2195 Setting full prevents the USB controller from sending an empty packet in
2196 that case. However, if URB_ZERO_PACKET was set we want that. */
2197 if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
2198 sb_desc->command |= IO_STATE(USB_SB_command, full, yes);
2199 }
2200
2201 sb_desc->buf = virt_to_phys(urb->transfer_buffer);
2202 sb_desc->next = 0;
2203
2204 } else if (usb_pipein(urb->pipe)) {
2205
2206 dbg_bulk("Grabbing bulk IN, urb 0x%lx, epid %d", (unsigned long)urb, epid);
2207
2208 sb_desc->sw_len = urb->transfer_buffer_length ?
2209 (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
2210
2211 /* The rem field is don't care if it's not a full-length transfer, so setting
2212 it shouldn't hurt. */
2213 sb_desc->command =
2214 (IO_FIELD(USB_SB_command, rem,
2215 urb->transfer_buffer_length % maxlen) |
2216 IO_STATE(USB_SB_command, tt, in) |
2217 IO_STATE(USB_SB_command, eot, yes) |
2218 IO_STATE(USB_SB_command, eol, yes));
2219
2220 sb_desc->buf = 0;
2221 sb_desc->next = 0;
2222 }
2223
2224 urb_priv->first_sb = sb_desc;
2225 urb_priv->last_sb = sb_desc;
2226 urb_priv->epid = epid;
2227
2228 urb->hcpriv = urb_priv;
2229
2230 /* Reset toggle bits and reset error count. */
2231 save_flags(flags);
2232 cli();
2233
2234 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2235 nop();
2236
2237 /* FIXME: Is this a special case since the hold field is checked,
2238 or should we check hold in a lot of other cases as well? */
2239 if (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) {
2240 panic("Hold was set in %s", __FUNCTION__);
2241 }
2242
2243 /* Reset error counters (regardless of which direction this traffic is). */
2244 *R_USB_EPT_DATA &=
2245 ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
2246 IO_MASK(R_USB_EPT_DATA, error_count_out));
2247
2248 /* Software must preset the toggle bits. */
2249 if (usb_pipeout(urb->pipe)) {
2250 char toggle =
2251 usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
2252 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
2253 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
2254 } else {
2255 char toggle =
2256 usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
2257 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
2258 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
2259 }
2260
2261 /* Assert that the EP descriptor is disabled. */
2262 assert(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
2263
2264 /* The reason we set the EP's sub pointer directly instead of
2265 walking the SB list and linking it last in the list is that we only
2266 have one active urb at a time (the rest are queued). */
2267
2268 /* Note that we cannot have interrupts running when we have set the SB descriptor
2269 but the EP is not yet enabled. If a bulk eot happens for another EP, we will
2270 find this EP disabled and with a SB != 0, which will make us think that it's done. */
2271 TxBulkEPList[epid].sub = virt_to_phys(sb_desc);
2272 TxBulkEPList[epid].hw_len = 0;
2273 /* Note that we don't have to fill in the ep_id field since this
2274 was done when we allocated the EP descriptors in init_tx_bulk_ep. */
2275
2276 /* Check if the dummy list is already with us (if several urbs were queued). */
2277 if (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0])) {
2278
2279 dbg_bulk("Inviting dummy list to the party for urb 0x%lx, epid %d",
2280 (unsigned long)urb, epid);
2281
2282 /* The last EP in the dummy list already has its next pointer set to
2283 TxBulkEPList[epid].next. */
2284
2285 /* We don't need to check if the DMA is at this EP or not before changing the
2286 next pointer, since we will do it in one 32-bit write (EP descriptors are
2287 32-bit aligned). */
2288 TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
2289 }
2290 /* Enable the EP descr. */
2291 dbg_bulk("Enabling bulk EP for urb 0x%lx, epid %d", (unsigned long)urb, epid);
2292 TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
2293
2294 /* Everything is set up, safe to enable interrupts again. */
2295 restore_flags(flags);
2296
2297 /* If the DMA bulk channel isn't running, we need to restart it if it
2298 has stopped at the last EP descriptor (DMA stopped because there was
2299 no more traffic) or if it has stopped at a dummy EP with the intr flag
2300 set (DMA stopped because we were too slow in inserting new traffic). */
2301 if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
2302
2303 USB_EP_Desc_t *ep;
2304 ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
2305 dbg_bulk("DMA channel not running in add");
2306 dbg_bulk("DMA is at 0x%lx", (unsigned long)ep);
2307
2308 if (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[NBR_OF_EPIDS - 1]) ||
2309 (ep->command & 0x8) >> 3) {
2310 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
2311 /* Update/restart the bulk start timer since we just started the channel. */
2312 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
2313 /* Update/restart the bulk eot timer since we just inserted traffic. */
2314 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
2315 }
2316 }
2317
2318 DBFEXIT;
2319}
2320
2321static void etrax_usb_complete_bulk_urb(struct urb *urb, int status)
2322{
2323 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2324 int epid = urb_priv->epid;
2325 unsigned long flags;
2326
2327 DBFENTER;
2328
2329 if (status)
2330 warn("Completing bulk urb with status %d.", status);
2331
2332 dbg_bulk("Completing bulk urb 0x%lx for epid %d", (unsigned long)urb, epid);
2333
2334 /* Update the urb list. */
2335 urb_list_del(urb, epid);
2336
2337 /* For an IN pipe, we always set the actual length, regardless of whether there was
2338 an error or not (which means the device driver can use the data if it wants to). */
2339 if (usb_pipein(urb->pipe)) {
2340 urb->actual_length = urb_priv->rx_offset;
2341 } else {
2342 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2343 to want that. We wouldn't know of any partial writes if there was an error. */
2344 if (status == 0) {
2345 urb->actual_length = urb->transfer_buffer_length;
2346 } else {
2347 urb->actual_length = 0;
2348 }
2349 }
2350
2351 /* FIXME: Is there something of the things below we shouldn't do if there was an error?
2352 Like, maybe we shouldn't toggle the toggle bits, or maybe we shouldn't insert more traffic. */
2353
2354 save_flags(flags);
2355 cli();
2356
2357 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2358 nop();
2359
2360 /* We need to fiddle with the toggle bits because the hardware doesn't do it for us. */
2361 if (usb_pipeout(urb->pipe)) {
2362 char toggle =
2363 IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
2364 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
2365 usb_pipeout(urb->pipe), toggle);
2366 } else {
2367 char toggle =
2368 IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
2369 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
2370 usb_pipeout(urb->pipe), toggle);
2371 }
2372 restore_flags(flags);
2373
2374 /* Remember to free the SBs. */
2375 etrax_remove_from_sb_list(urb);
2376 kfree(urb_priv);
2377 urb->hcpriv = 0;
2378
2379 /* If there are any more urb's in the list we'd better start sending */
2380 if (!urb_list_empty(epid)) {
2381
2382 struct urb *new_urb;
2383
2384 /* Get the first urb. */
2385 new_urb = urb_list_first(epid);
2386 assert(new_urb);
2387
2388 dbg_bulk("More bulk for epid %d", epid);
2389
2390 etrax_usb_add_to_bulk_sb_list(new_urb, epid);
2391 }
2392
2393 urb->status = status;
2394
2395 /* We let any non-zero status from the layer above have precedence. */
2396 if (status == 0) {
2397 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
2398 is to be treated as an error. */
2399 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
2400 if (usb_pipein(urb->pipe) &&
2401 (urb->actual_length !=
2402 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))) {
2403 urb->status = -EREMOTEIO;
2404 }
2405 }
2406 }
2407
2408 if (urb->complete) {
2409 urb->complete(urb, NULL);
2410 }
2411
2412 if (urb_list_empty(epid)) {
2413 /* This means that this EP is now free, deconfigure it. */
2414 etrax_usb_free_epid(epid);
2415
2416 /* No more traffic; time to clean up.
2417 Must set sub pointer to 0, since we look at the sub pointer when handling
2418 the bulk eot interrupt. */
2419
2420 dbg_bulk("No bulk for epid %d", epid);
2421
2422 TxBulkEPList[epid].sub = 0;
2423
2424 /* Unlink the dummy list. */
2425
2426 dbg_bulk("Kicking dummy list out of party for urb 0x%lx, epid %d",
2427 (unsigned long)urb, epid);
2428
2429 /* No need to wait for the DMA before changing the next pointer.
2430 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2431 the last one (INVALID_EPID) for actual traffic. */
2432 TxBulkEPList[epid].next =
2433 virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2434 }
2435
2436 DBFEXIT;
2437}
2438
2439static int etrax_usb_submit_ctrl_urb(struct urb *urb)
2440{
2441 int epid;
2442 int empty;
2443 unsigned long flags;
2444 etrax_urb_priv_t *urb_priv;
2445
2446 DBFENTER;
2447
2448 /* FIXME: Return -ENXIO if there is already a queued urb for this endpoint? */
2449
2450 /* Epid allocation, empty check and list add must be protected.
2451
2452 Epid allocation because if we find an existing epid for this endpoint an urb might be
2453 completed (emptying the list) before we add the new urb to the list, causing the epid
2454 to be de-allocated. We would then start the transfer with an invalid epid -> epid attn.
2455
2456 Empty check and add because otherwise we might conclude that the list is not empty,
2457 after which it becomes empty before we add the new urb to the list, causing us not to
2458 insert the new traffic into the SB list. */
2459
2460 spin_lock_irqsave(&urb_list_lock, flags);
2461 epid = etrax_usb_setup_epid(urb);
2462 if (epid == -1) {
2463 spin_unlock_irqrestore(&urb_list_lock, flags);
2464 DBFEXIT;
2465 return -ENOMEM;
2466 }
2467 empty = urb_list_empty(epid);
2468 urb_list_add(urb, epid);
2469 spin_unlock_irqrestore(&urb_list_lock, flags);
2470
2471 dbg_ctrl("Adding ctrl urb 0x%lx to %s list, epid %d",
2472 (unsigned long)urb, empty ? "empty" : "", epid);
2473
2474 /* Mark the urb as being in progress. */
2475 urb->status = -EINPROGRESS;
2476
2477 /* Setup the hcpriv data. */
2478 urb_priv = kmalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
2479 assert(urb_priv != NULL);
2480 /* This sets rx_offset to 0. */
2481 memset(urb_priv, 0, sizeof(etrax_urb_priv_t));
2482 urb_priv->urb_state = NOT_STARTED;
2483 urb->hcpriv = urb_priv;
2484
2485 if (empty) {
2486 etrax_usb_add_to_ctrl_sb_list(urb, epid);
2487 }
2488
2489 DBFEXIT;
2490
2491 return 0;
2492}
2493
2494static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid)
2495{
2496 USB_SB_Desc_t *sb_desc_setup;
2497 USB_SB_Desc_t *sb_desc_data;
2498 USB_SB_Desc_t *sb_desc_status;
2499
2500 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2501
2502 unsigned long flags;
2503 char maxlen;
2504
2505 DBFENTER;
2506
2507 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2508
2509 sb_desc_setup = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2510 assert(sb_desc_setup != NULL);
2511 sb_desc_status = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2512 assert(sb_desc_status != NULL);
2513
2514 /* Initialize the mandatory setup SB descriptor (used only in control transfers) */
2515 sb_desc_setup->sw_len = 8;
2516 sb_desc_setup->command = (IO_FIELD(USB_SB_command, rem, 0) |
2517 IO_STATE(USB_SB_command, tt, setup) |
2518 IO_STATE(USB_SB_command, full, yes) |
2519 IO_STATE(USB_SB_command, eot, yes));
2520
2521 sb_desc_setup->buf = virt_to_phys(urb->setup_packet);
2522
2523 if (usb_pipeout(urb->pipe)) {
2524 dbg_ctrl("Transfer for epid %d is OUT", epid);
2525
2526 /* If this Control OUT transfer has an optional data stage we add an OUT token
2527 before the mandatory IN (status) token, hence the reordered SB list */
2528
2529 sb_desc_setup->next = virt_to_phys(sb_desc_status);
2530 if (urb->transfer_buffer) {
2531
2532 dbg_ctrl("This OUT transfer has an extra data stage");
2533
2534 sb_desc_data = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2535 assert(sb_desc_data != NULL);
2536
2537 sb_desc_setup->next = virt_to_phys(sb_desc_data);
2538
2539 sb_desc_data->sw_len = urb->transfer_buffer_length;
2540 sb_desc_data->command = (IO_STATE(USB_SB_command, tt, out) |
2541 IO_STATE(USB_SB_command, full, yes) |
2542 IO_STATE(USB_SB_command, eot, yes));
2543 sb_desc_data->buf = virt_to_phys(urb->transfer_buffer);
2544 sb_desc_data->next = virt_to_phys(sb_desc_status);
2545 }
2546
2547 sb_desc_status->sw_len = 1;
2548 sb_desc_status->command = (IO_FIELD(USB_SB_command, rem, 0) |
2549 IO_STATE(USB_SB_command, tt, in) |
2550 IO_STATE(USB_SB_command, eot, yes) |
2551 IO_STATE(USB_SB_command, intr, yes) |
2552 IO_STATE(USB_SB_command, eol, yes));
2553
2554 sb_desc_status->buf = 0;
2555 sb_desc_status->next = 0;
2556
2557 } else if (usb_pipein(urb->pipe)) {
2558
2559 dbg_ctrl("Transfer for epid %d is IN", epid);
2560 dbg_ctrl("transfer_buffer_length = %d", urb->transfer_buffer_length);
2561 dbg_ctrl("rem is calculated to %d", urb->transfer_buffer_length % maxlen);
2562
2563 sb_desc_data = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2564 assert(sb_desc_data != NULL);
2565
2566 sb_desc_setup->next = virt_to_phys(sb_desc_data);
2567
2568 sb_desc_data->sw_len = urb->transfer_buffer_length ?
2569 (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
2570 dbg_ctrl("sw_len got %d", sb_desc_data->sw_len);
2571
2572 sb_desc_data->command =
2573 (IO_FIELD(USB_SB_command, rem,
2574 urb->transfer_buffer_length % maxlen) |
2575 IO_STATE(USB_SB_command, tt, in) |
2576 IO_STATE(USB_SB_command, eot, yes));
2577
2578 sb_desc_data->buf = 0;
2579 sb_desc_data->next = virt_to_phys(sb_desc_status);
2580
2581 /* Read comment at zout_buffer declaration for an explanation to this. */
2582 sb_desc_status->sw_len = 1;
2583 sb_desc_status->command = (IO_FIELD(USB_SB_command, rem, 0) |
2584 IO_STATE(USB_SB_command, tt, zout) |
2585 IO_STATE(USB_SB_command, full, yes) |
2586 IO_STATE(USB_SB_command, eot, yes) |
2587 IO_STATE(USB_SB_command, intr, yes) |
2588 IO_STATE(USB_SB_command, eol, yes));
2589
2590 sb_desc_status->buf = virt_to_phys(&zout_buffer[0]);
2591 sb_desc_status->next = 0;
2592 }
2593
2594 urb_priv->first_sb = sb_desc_setup;
2595 urb_priv->last_sb = sb_desc_status;
2596 urb_priv->epid = epid;
2597
2598 urb_priv->urb_state = STARTED;
2599
2600 /* Reset toggle bits and reset error count, remember to di and ei */
2601 /* Warning: it is possible that this locking doesn't work with bottom-halves */
2602
2603 save_flags(flags);
2604 cli();
2605
2606 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2607 nop();
2608 if (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) {
2609 panic("Hold was set in %s", __FUNCTION__);
2610 }
2611
2612
2613 /* FIXME: Compare with etrax_usb_add_to_bulk_sb_list where the toggle bits
2614 are set to a specific value. Why the difference? Read "Transfer and Toggle Bits
2615 in Designer's Reference, p. 8 - 11. */
2616 *R_USB_EPT_DATA &=
2617 ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
2618 IO_MASK(R_USB_EPT_DATA, error_count_out) |
2619 IO_MASK(R_USB_EPT_DATA, t_in) |
2620 IO_MASK(R_USB_EPT_DATA, t_out));
2621
2622 /* Since we use the rx interrupt to complete ctrl urbs, we can enable interrupts now
2623 (i.e. we don't check the sub pointer on an eot interrupt like we do for bulk traffic). */
2624 restore_flags(flags);
2625
2626 /* Assert that the EP descriptor is disabled. */
2627 assert(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
2628
2629 /* Set up and enable the EP descriptor. */
2630 TxCtrlEPList[epid].sub = virt_to_phys(sb_desc_setup);
2631 TxCtrlEPList[epid].hw_len = 0;
2632 TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
2633
2634 /* We start the DMA sub channel without checking if it's running or not, because:
2635 1) If it's already running, issuing the start command is a nop.
2636 2) We avoid a test-and-set race condition. */
2637 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
2638
2639 DBFEXIT;
2640}
2641
2642static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status)
2643{
2644 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2645 int epid = urb_priv->epid;
2646
2647 DBFENTER;
2648
2649 if (status)
2650 warn("Completing ctrl urb with status %d.", status);
2651
2652 dbg_ctrl("Completing ctrl epid %d, urb 0x%lx", epid, (unsigned long)urb);
2653
2654 /* Remove this urb from the list. */
2655 urb_list_del(urb, epid);
2656
2657 /* For an IN pipe, we always set the actual length, regardless of whether there was
2658 an error or not (which means the device driver can use the data if it wants to). */
2659 if (usb_pipein(urb->pipe)) {
2660 urb->actual_length = urb_priv->rx_offset;
2661 }
2662
2663 /* FIXME: Is there something of the things below we shouldn't do if there was an error?
2664 Like, maybe we shouldn't insert more traffic. */
2665
2666 /* Remember to free the SBs. */
2667 etrax_remove_from_sb_list(urb);
2668 kfree(urb_priv);
2669 urb->hcpriv = 0;
2670
2671 /* If there are any more urbs in the list we'd better start sending. */
2672 if (!urb_list_empty(epid)) {
2673 struct urb *new_urb;
2674
2675 /* Get the first urb. */
2676 new_urb = urb_list_first(epid);
2677 assert(new_urb);
2678
2679 dbg_ctrl("More ctrl for epid %d, first urb = 0x%lx", epid, (unsigned long)new_urb);
2680
2681 etrax_usb_add_to_ctrl_sb_list(new_urb, epid);
2682 }
2683
2684 urb->status = status;
2685
2686 /* We let any non-zero status from the layer above have precedence. */
2687 if (status == 0) {
2688 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
2689 is to be treated as an error. */
2690 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
2691 if (usb_pipein(urb->pipe) &&
2692 (urb->actual_length !=
2693 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))) {
2694 urb->status = -EREMOTEIO;
2695 }
2696 }
2697 }
2698
2699 if (urb->complete) {
2700 urb->complete(urb, NULL);
2701 }
2702
2703 if (urb_list_empty(epid)) {
2704 /* No more traffic. Time to clean up. */
2705 etrax_usb_free_epid(epid);
2706 /* Must set sub pointer to 0. */
2707 dbg_ctrl("No ctrl for epid %d", epid);
2708 TxCtrlEPList[epid].sub = 0;
2709 }
2710
2711 DBFEXIT;
2712}
2713
2714static int etrax_usb_submit_intr_urb(struct urb *urb)
2715{
2716
2717 int epid;
2718
2719 DBFENTER;
2720
2721 if (usb_pipeout(urb->pipe)) {
2722 /* Unsupported transfer type.
2723 We don't support interrupt out traffic. (If we do, we can't support
2724 intervals for neither in or out traffic, but are forced to schedule all
2725 interrupt traffic in one frame.) */
2726 return -EINVAL;
2727 }
2728
2729 epid = etrax_usb_setup_epid(urb);
2730 if (epid == -1) {
2731 DBFEXIT;
2732 return -ENOMEM;
2733 }
2734
2735 if (!urb_list_empty(epid)) {
2736 /* There is already a queued urb for this endpoint. */
2737 etrax_usb_free_epid(epid);
2738 return -ENXIO;
2739 }
2740
2741 urb->status = -EINPROGRESS;
2742
2743 dbg_intr("Add intr urb 0x%lx, to list, epid %d", (unsigned long)urb, epid);
2744
2745 urb_list_add(urb, epid);
2746 etrax_usb_add_to_intr_sb_list(urb, epid);
2747
2748 return 0;
2749
2750 DBFEXIT;
2751}
2752
2753static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid)
2754{
2755
2756 volatile USB_EP_Desc_t *tmp_ep;
2757 volatile USB_EP_Desc_t *first_ep;
2758
2759 char maxlen;
2760 int interval;
2761 int i;
2762
2763 etrax_urb_priv_t *urb_priv;
2764
2765 DBFENTER;
2766
2767 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2768 interval = urb->interval;
2769
2770 urb_priv = kmalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
2771 assert(urb_priv != NULL);
2772 memset(urb_priv, 0, sizeof(etrax_urb_priv_t));
2773 urb->hcpriv = urb_priv;
2774
2775 first_ep = &TxIntrEPList[0];
2776
2777 /* Round of the interval to 2^n, it is obvious that this code favours
2778 smaller numbers, but that is actually a good thing */
2779 /* FIXME: The "rounding error" for larger intervals will be quite
2780 large. For in traffic this shouldn't be a problem since it will only
2781 mean that we "poll" more often. */
2782 for (i = 0; interval; i++) {
2783 interval = interval >> 1;
2784 }
2785 interval = 1 << (i - 1);
2786
2787 dbg_intr("Interval rounded to %d", interval);
2788
2789 tmp_ep = first_ep;
2790 i = 0;
2791 do {
2792 if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
2793 if ((i % interval) == 0) {
2794 /* Insert the traffic ep after tmp_ep */
2795 USB_EP_Desc_t *ep_desc;
2796 USB_SB_Desc_t *sb_desc;
2797
2798 dbg_intr("Inserting EP for epid %d", epid);
2799
2800 ep_desc = (USB_EP_Desc_t *)
2801 kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2802 sb_desc = (USB_SB_Desc_t *)
2803 kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2804 assert(ep_desc != NULL);
2805 CHECK_ALIGN(ep_desc);
2806 assert(sb_desc != NULL);
2807
2808 ep_desc->sub = virt_to_phys(sb_desc);
2809 ep_desc->hw_len = 0;
2810 ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
2811 IO_STATE(USB_EP_command, enable, yes));
2812
2813
2814 /* Round upwards the number of packets of size maxlen
2815 that this SB descriptor should receive. */
2816 sb_desc->sw_len = urb->transfer_buffer_length ?
2817 (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
2818 sb_desc->next = 0;
2819 sb_desc->buf = 0;
2820 sb_desc->command =
2821 (IO_FIELD(USB_SB_command, rem, urb->transfer_buffer_length % maxlen) |
2822 IO_STATE(USB_SB_command, tt, in) |
2823 IO_STATE(USB_SB_command, eot, yes) |
2824 IO_STATE(USB_SB_command, eol, yes));
2825
2826 ep_desc->next = tmp_ep->next;
2827 tmp_ep->next = virt_to_phys(ep_desc);
2828 }
2829 i++;
2830 }
2831 tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next);
2832 } while (tmp_ep != first_ep);
2833
2834
2835 /* Note that first_sb/last_sb doesn't apply to interrupt traffic. */
2836 urb_priv->epid = epid;
2837
2838 /* We start the DMA sub channel without checking if it's running or not, because:
2839 1) If it's already running, issuing the start command is a nop.
2840 2) We avoid a test-and-set race condition. */
2841 *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
2842
2843 DBFEXIT;
2844}
2845
2846
2847
2848static void etrax_usb_complete_intr_urb(struct urb *urb, int status)
2849{
2850 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2851 int epid = urb_priv->epid;
2852
2853 DBFENTER;
2854
2855 if (status)
2856 warn("Completing intr urb with status %d.", status);
2857
2858 dbg_intr("Completing intr epid %d, urb 0x%lx", epid, (unsigned long)urb);
2859
2860 urb->status = status;
2861 urb->actual_length = urb_priv->rx_offset;
2862
2863 dbg_intr("interrupt urb->actual_length = %d", urb->actual_length);
2864
2865 /* We let any non-zero status from the layer above have precedence. */
2866 if (status == 0) {
2867 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
2868 is to be treated as an error. */
2869 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
2870 if (urb->actual_length !=
2871 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
2872 urb->status = -EREMOTEIO;
2873 }
2874 }
2875 }
2876
2877 /* The driver will resubmit the URB so we need to remove it first */
2878 etrax_usb_unlink_urb(urb, 0);
2879 if (urb->complete) {
2880 urb->complete(urb, NULL);
2881 }
2882
2883 DBFEXIT;
2884}
2885
2886
2887static int etrax_usb_submit_isoc_urb(struct urb *urb)
2888{
2889 int epid;
2890 unsigned long flags;
2891
2892 DBFENTER;
2893
2894 dbg_isoc("Submitting isoc urb = 0x%lx", (unsigned long)urb);
2895
2896 /* Epid allocation, empty check and list add must be protected.
2897 Read about this in etrax_usb_submit_ctrl_urb. */
2898
2899 spin_lock_irqsave(&urb_list_lock, flags);
2900 /* Is there an active epid for this urb ? */
2901 epid = etrax_usb_setup_epid(urb);
2902 if (epid == -1) {
2903 DBFEXIT;
2904 spin_unlock_irqrestore(&urb_list_lock, flags);
2905 return -ENOMEM;
2906 }
2907
2908 /* Ok, now we got valid endpoint, lets insert some traffic */
2909
2910 urb->status = -EINPROGRESS;
2911
2912 /* Find the last urb in the URB_List and add this urb after that one.
2913 Also add the traffic, that is do an etrax_usb_add_to_isoc_sb_list. This
2914 is important to make this in "real time" since isochronous traffic is
2915 time sensitive. */
2916
2917 dbg_isoc("Adding isoc urb to (possibly empty) list");
2918 urb_list_add(urb, epid);
2919 etrax_usb_add_to_isoc_sb_list(urb, epid);
2920 spin_unlock_irqrestore(&urb_list_lock, flags);
2921
2922 DBFEXIT;
2923
2924 return 0;
2925}
2926
2927static void etrax_usb_check_error_isoc_ep(const int epid)
2928{
2929 unsigned long int flags;
2930 int error_code;
2931 __u32 r_usb_ept_data;
2932
2933 /* We can't read R_USB_EPID_ATTN here since it would clear the iso_eof,
2934 bulk_eot and epid_attn interrupts. So we just check the status of
2935 the epid without testing if for it in R_USB_EPID_ATTN. */
2936
2937
2938 save_flags(flags);
2939 cli();
2940 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2941 nop();
2942 /* Note that although there are separate R_USB_EPT_DATA and R_USB_EPT_DATA_ISO
2943 registers, they are located at the same address and are of the same size.
2944 In other words, this read should be ok for isoc also. */
2945 r_usb_ept_data = *R_USB_EPT_DATA;
2946 restore_flags(flags);
2947
2948 error_code = IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data);
2949
2950 if (r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, hold)) {
2951 warn("Hold was set for epid %d.", epid);
2952 return;
2953 }
2954
2955 if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, no_error)) {
2956
2957 /* This indicates that the SB list of the ept was completed before
2958 new data was appended to it. This is not an error, but indicates
2959 large system or USB load and could possibly cause trouble for
2960 very timing sensitive USB device drivers so we log it.
2961 */
2962 info("Isoc. epid %d disabled with no error", epid);
2963 return;
2964
2965 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, stall)) {
2966 /* Not really a protocol error, just says that the endpoint gave
2967 a stall response. Note that error_code cannot be stall for isoc. */
2968 panic("Isoc traffic cannot stall");
2969
2970 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, bus_error)) {
2971 /* Two devices responded to a transaction request. Must be resolved
2972 by software. FIXME: Reset ports? */
2973 panic("Bus error for epid %d."
2974 " Two devices responded to transaction request",
2975 epid);
2976
2977 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, buffer_error)) {
2978 /* DMA overrun or underrun. */
2979 warn("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
2980
2981 /* It seems that error_code = buffer_error in
2982 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
2983 are the same error. */
2984 }
2985}
2986
2987
2988static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid)
2989{
2990
2991 int i = 0;
2992
2993 etrax_urb_priv_t *urb_priv;
2994 USB_SB_Desc_t *prev_sb_desc, *next_sb_desc, *temp_sb_desc;
2995
2996 DBFENTER;
2997
2998 prev_sb_desc = next_sb_desc = temp_sb_desc = NULL;
2999
3000 urb_priv = kmalloc(sizeof(etrax_urb_priv_t), GFP_ATOMIC);
3001 assert(urb_priv != NULL);
3002 memset(urb_priv, 0, sizeof(etrax_urb_priv_t));
3003
3004 urb->hcpriv = urb_priv;
3005 urb_priv->epid = epid;
3006
3007 if (usb_pipeout(urb->pipe)) {
3008
3009 if (urb->number_of_packets == 0) panic("etrax_usb_add_to_isoc_sb_list 0 packets\n");
3010
3011 dbg_isoc("Transfer for epid %d is OUT", epid);
3012 dbg_isoc("%d packets in URB", urb->number_of_packets);
3013
3014 /* Create one SB descriptor for each packet and link them together. */
3015 for (i = 0; i < urb->number_of_packets; i++) {
3016 if (!urb->iso_frame_desc[i].length)
3017 continue;
3018
3019 next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
3020 assert(next_sb_desc != NULL);
3021
3022 if (urb->iso_frame_desc[i].length > 0) {
3023
3024 next_sb_desc->command = (IO_STATE(USB_SB_command, tt, out) |
3025 IO_STATE(USB_SB_command, eot, yes));
3026
3027 next_sb_desc->sw_len = urb->iso_frame_desc[i].length;
3028 next_sb_desc->buf = virt_to_phys((char*)urb->transfer_buffer + urb->iso_frame_desc[i].offset);
3029
3030 /* Check if full length transfer. */
3031 if (urb->iso_frame_desc[i].length ==
3032 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
3033 next_sb_desc->command |= IO_STATE(USB_SB_command, full, yes);
3034 }
3035 } else {
3036 dbg_isoc("zero len packet");
3037 next_sb_desc->command = (IO_FIELD(USB_SB_command, rem, 0) |
3038 IO_STATE(USB_SB_command, tt, zout) |
3039 IO_STATE(USB_SB_command, eot, yes) |
3040 IO_STATE(USB_SB_command, full, yes));
3041
3042 next_sb_desc->sw_len = 1;
3043 next_sb_desc->buf = virt_to_phys(&zout_buffer[0]);
3044 }
3045
3046 /* First SB descriptor that belongs to this urb */
3047 if (i == 0)
3048 urb_priv->first_sb = next_sb_desc;
3049 else
3050 prev_sb_desc->next = virt_to_phys(next_sb_desc);
3051
3052 prev_sb_desc = next_sb_desc;
3053 }
3054
3055 next_sb_desc->command |= (IO_STATE(USB_SB_command, intr, yes) |
3056 IO_STATE(USB_SB_command, eol, yes));
3057 next_sb_desc->next = 0;
3058 urb_priv->last_sb = next_sb_desc;
3059
3060 } else if (usb_pipein(urb->pipe)) {
3061
3062 dbg_isoc("Transfer for epid %d is IN", epid);
3063 dbg_isoc("transfer_buffer_length = %d", urb->transfer_buffer_length);
3064 dbg_isoc("rem is calculated to %d", urb->iso_frame_desc[urb->number_of_packets - 1].length);
3065
3066 /* Note that in descriptors for periodic traffic are not consumed. This means that
3067 the USB controller never propagates in the SB list. In other words, if there already
3068 is an SB descriptor in the list for this EP we don't have to do anything. */
3069 if (TxIsocEPList[epid].sub == 0) {
3070 dbg_isoc("Isoc traffic not already running, allocating SB");
3071
3072 next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
3073 assert(next_sb_desc != NULL);
3074
3075 next_sb_desc->command = (IO_STATE(USB_SB_command, tt, in) |
3076 IO_STATE(USB_SB_command, eot, yes) |
3077 IO_STATE(USB_SB_command, eol, yes));
3078
3079 next_sb_desc->next = 0;
3080 next_sb_desc->sw_len = 1; /* Actual number of packets is not relevant
3081 for periodic in traffic as long as it is more
3082 than zero. Set to 1 always. */
3083 next_sb_desc->buf = 0;
3084
3085 /* The rem field is don't care for isoc traffic, so we don't set it. */
3086
3087 /* Only one SB descriptor that belongs to this urb. */
3088 urb_priv->first_sb = next_sb_desc;
3089 urb_priv->last_sb = next_sb_desc;
3090
3091 } else {
3092
3093 dbg_isoc("Isoc traffic already running, just setting first/last_sb");
3094
3095 /* Each EP for isoc in will have only one SB descriptor, setup when submitting the
3096 already active urb. Note that even though we may have several first_sb/last_sb
3097 pointing at the same SB descriptor, they are freed only once (when the list has
3098 become empty). */
3099 urb_priv->first_sb = phys_to_virt(TxIsocEPList[epid].sub);
3100 urb_priv->last_sb = phys_to_virt(TxIsocEPList[epid].sub);
3101 return;
3102 }
3103
3104 }
3105
3106 /* Find the spot to insert this urb and add it. */
3107 if (TxIsocEPList[epid].sub == 0) {
3108 /* First SB descriptor inserted in this list (in or out). */
3109 dbg_isoc("Inserting SB desc first in list");
3110 TxIsocEPList[epid].hw_len = 0;
3111 TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3112
3113 } else {
3114 /* Isochronous traffic is already running, insert new traffic last (only out). */
3115 dbg_isoc("Inserting SB desc last in list");
3116 temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
3117 while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
3118 IO_STATE(USB_SB_command, eol, yes)) {
3119 assert(temp_sb_desc->next);
3120 temp_sb_desc = phys_to_virt(temp_sb_desc->next);
3121 }
3122 dbg_isoc("Appending list on desc 0x%p", temp_sb_desc);
3123
3124 /* Next pointer must be set before eol is removed. */
3125 temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
3126 /* Clear the previous end of list flag since there is a new in the
3127 added SB descriptor list. */
3128 temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
3129
3130 if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
3131 /* 8.8.5 in Designer's Reference says we should check for and correct
3132 any errors in the EP here. That should not be necessary if epid_attn
3133 is handled correctly, so we assume all is ok. */
3134 dbg_isoc("EP disabled");
3135 etrax_usb_check_error_isoc_ep(epid);
3136
3137 /* The SB list was exhausted. */
3138 if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
3139 /* The new sublist did not get processed before the EP was
3140 disabled. Setup the EP again. */
3141 dbg_isoc("Set EP sub to new list");
3142 TxIsocEPList[epid].hw_len = 0;
3143 TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3144 }
3145 }
3146 }
3147
3148 if (urb->transfer_flags & URB_ISO_ASAP) {
3149 /* The isoc transfer should be started as soon as possible. The start_frame
3150 field is a return value if URB_ISO_ASAP was set. Comparing R_USB_FM_NUMBER
3151 with a USB Chief trace shows that the first isoc IN token is sent 2 frames
3152 later. I'm not sure how this affects usage of the start_frame field by the
3153 device driver, or how it affects things when USB_ISO_ASAP is not set, so
3154 therefore there's no compensation for the 2 frame "lag" here. */
3155 urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
3156 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3157 urb_priv->urb_state = STARTED;
3158 dbg_isoc("URB_ISO_ASAP set, urb->start_frame set to %d", urb->start_frame);
3159 } else {
3160 /* Not started yet. */
3161 urb_priv->urb_state = NOT_STARTED;
3162 dbg_isoc("urb_priv->urb_state set to NOT_STARTED");
3163 }
3164
3165 /* We start the DMA sub channel without checking if it's running or not, because:
3166 1) If it's already running, issuing the start command is a nop.
3167 2) We avoid a test-and-set race condition. */
3168 *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
3169
3170 DBFEXIT;
3171}
3172
3173static void etrax_usb_complete_isoc_urb(struct urb *urb, int status)
3174{
3175 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
3176 int epid = urb_priv->epid;
3177 int auto_resubmit = 0;
3178
3179 DBFENTER;
3180 dbg_isoc("complete urb 0x%p, status %d", urb, status);
3181
3182 if (status)
3183 warn("Completing isoc urb with status %d.", status);
3184
3185 if (usb_pipein(urb->pipe)) {
3186 int i;
3187
3188 /* Make that all isoc packets have status and length set before
3189 completing the urb. */
3190 for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++) {
3191 urb->iso_frame_desc[i].actual_length = 0;
3192 urb->iso_frame_desc[i].status = -EPROTO;
3193 }
3194
3195 urb_list_del(urb, epid);
3196
3197 if (!list_empty(&urb_list[epid])) {
3198 ((etrax_urb_priv_t *)(urb_list_first(epid)->hcpriv))->urb_state = STARTED;
3199 } else {
3200 unsigned long int flags;
3201 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
3202 /* The EP was enabled, disable it and wait. */
3203 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
3204
3205 /* Ah, the luxury of busy-wait. */
3206 while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid]));
3207 }
3208
3209 etrax_remove_from_sb_list(urb);
3210 TxIsocEPList[epid].sub = 0;
3211 TxIsocEPList[epid].hw_len = 0;
3212
3213 save_flags(flags);
3214 cli();
3215 etrax_usb_free_epid(epid);
3216 restore_flags(flags);
3217 }
3218
3219 urb->hcpriv = 0;
3220 kfree(urb_priv);
3221
3222 /* Release allocated bandwidth. */
3223 usb_release_bandwidth(urb->dev, urb, 0);
3224 } else if (usb_pipeout(urb->pipe)) {
3225 int freed_descr;
3226
3227 dbg_isoc("Isoc out urb complete 0x%p", urb);
3228
3229 /* Update the urb list. */
3230 urb_list_del(urb, epid);
3231
3232 freed_descr = etrax_remove_from_sb_list(urb);
3233 dbg_isoc("freed %d descriptors of %d packets", freed_descr, urb->number_of_packets);
3234 assert(freed_descr == urb->number_of_packets);
3235 urb->hcpriv = 0;
3236 kfree(urb_priv);
3237
3238 /* Release allocated bandwidth. */
3239 usb_release_bandwidth(urb->dev, urb, 0);
3240 }
3241
3242 urb->status = status;
3243 if (urb->complete) {
3244 urb->complete(urb, NULL);
3245 }
3246
3247 if (auto_resubmit) {
3248 /* Check that urb was not unlinked by the complete callback. */
3249 if (__urb_list_entry(urb, epid)) {
3250 /* Move this one down the list. */
3251 urb_list_move_last(urb, epid);
3252
3253 /* Mark the now first urb as started (may already be). */
3254 ((etrax_urb_priv_t *)(urb_list_first(epid)->hcpriv))->urb_state = STARTED;
3255
3256 /* Must set this to 0 since this urb is still active after
3257 completion. */
3258 urb_priv->isoc_packet_counter = 0;
3259 } else {
3260 warn("(ISOC) automatic resubmit urb 0x%p removed by complete.", urb);
3261 }
3262 }
3263
3264 DBFEXIT;
3265}
3266
3267static void etrax_usb_complete_urb(struct urb *urb, int status)
3268{
3269 switch (usb_pipetype(urb->pipe)) {
3270 case PIPE_BULK:
3271 etrax_usb_complete_bulk_urb(urb, status);
3272 break;
3273 case PIPE_CONTROL:
3274 etrax_usb_complete_ctrl_urb(urb, status);
3275 break;
3276 case PIPE_INTERRUPT:
3277 etrax_usb_complete_intr_urb(urb, status);
3278 break;
3279 case PIPE_ISOCHRONOUS:
3280 etrax_usb_complete_isoc_urb(urb, status);
3281 break;
3282 default:
3283 err("Unknown pipetype");
3284 }
3285}
3286
3287
3288
3289static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc, struct pt_regs *regs)
3290{
3291 usb_interrupt_registers_t *reg;
3292 unsigned long flags;
3293 __u32 irq_mask;
3294 __u8 status;
3295 __u32 epid_attn;
3296 __u16 port_status_1;
3297 __u16 port_status_2;
3298 __u32 fm_number;
3299
3300 DBFENTER;
3301
3302 /* Read critical registers into local variables, do kmalloc afterwards. */
3303 save_flags(flags);
3304 cli();
3305
3306 irq_mask = *R_USB_IRQ_MASK_READ;
3307 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that R_USB_STATUS
3308 must be read before R_USB_EPID_ATTN since reading the latter clears the
3309 ourun and perror fields of R_USB_STATUS. */
3310 status = *R_USB_STATUS;
3311
3312 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn interrupts. */
3313 epid_attn = *R_USB_EPID_ATTN;
3314
3315 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
3316 port_status interrupt. */
3317 port_status_1 = *R_USB_RH_PORT_STATUS_1;
3318 port_status_2 = *R_USB_RH_PORT_STATUS_2;
3319
3320 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
3321 /* Note: the lower 11 bits contain the actual frame number, sent with each sof. */
3322 fm_number = *R_USB_FM_NUMBER;
3323
3324 restore_flags(flags);
3325
3326 reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, SLAB_ATOMIC);
3327
3328 assert(reg != NULL);
3329
3330 reg->hc = (etrax_hc_t *)vhc;
3331
3332 /* Now put register values into kmalloc'd area. */
3333 reg->r_usb_irq_mask_read = irq_mask;
3334 reg->r_usb_status = status;
3335 reg->r_usb_epid_attn = epid_attn;
3336 reg->r_usb_rh_port_status_1 = port_status_1;
3337 reg->r_usb_rh_port_status_2 = port_status_2;
3338 reg->r_usb_fm_number = fm_number;
3339
3340 INIT_WORK(&reg->usb_bh, etrax_usb_hc_interrupt_bottom_half, reg);
3341 schedule_work(&reg->usb_bh);
3342
3343 DBFEXIT;
3344
3345 return IRQ_HANDLED;
3346}
3347
3348static void etrax_usb_hc_interrupt_bottom_half(void *data)
3349{
3350 usb_interrupt_registers_t *reg = (usb_interrupt_registers_t *)data;
3351 __u32 irq_mask = reg->r_usb_irq_mask_read;
3352
3353 DBFENTER;
3354
3355 /* Interrupts are handled in order of priority. */
3356 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
3357 etrax_usb_hc_epid_attn_interrupt(reg);
3358 }
3359 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
3360 etrax_usb_hc_port_status_interrupt(reg);
3361 }
3362 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
3363 etrax_usb_hc_ctl_status_interrupt(reg);
3364 }
3365 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
3366 etrax_usb_hc_isoc_eof_interrupt();
3367 }
3368 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
3369 /* Update/restart the bulk start timer since obviously the channel is running. */
3370 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
3371 /* Update/restart the bulk eot timer since we just received an bulk eot interrupt. */
3372 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
3373
3374 etrax_usb_hc_bulk_eot_interrupt(0);
3375 }
3376
3377 kmem_cache_free(top_half_reg_cache, reg);
3378
3379 DBFEXIT;
3380}
3381
3382
3383void etrax_usb_hc_isoc_eof_interrupt(void)
3384{
3385 struct urb *urb;
3386 etrax_urb_priv_t *urb_priv;
3387 int epid;
3388 unsigned long flags;
3389
3390 DBFENTER;
3391
3392 /* Do not check the invalid epid (it has a valid sub pointer). */
3393 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
3394
3395 /* Do not check the invalid epid (it has a valid sub pointer). */
3396 if ((epid == DUMMY_EPID) || (epid == INVALID_EPID))
3397 continue;
3398
3399 /* Disable interrupts to block the isoc out descriptor interrupt handler
3400 from being called while the isoc EPID list is being checked.
3401 */
3402 save_flags(flags);
3403 cli();
3404
3405 if (TxIsocEPList[epid].sub == 0) {
3406 /* Nothing here to see. */
3407 restore_flags(flags);
3408 continue;
3409 }
3410
3411 /* Get the first urb (if any). */
3412 urb = urb_list_first(epid);
3413 if (urb == 0) {
3414 warn("Ignoring NULL urb");
3415 restore_flags(flags);
3416 continue;
3417 }
3418 if (usb_pipein(urb->pipe)) {
3419
3420 /* Sanity check. */
3421 assert(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
3422
3423 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
3424 assert(urb_priv);
3425
3426 if (urb_priv->urb_state == NOT_STARTED) {
3427
3428 /* If ASAP is not set and urb->start_frame is the current frame,
3429 start the transfer. */
3430 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3431 (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
3432
3433 dbg_isoc("Enabling isoc IN EP descr for epid %d", epid);
3434 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3435
3436 /* This urb is now active. */
3437 urb_priv->urb_state = STARTED;
3438 continue;
3439 }
3440 }
3441 }
3442 restore_flags(flags);
3443 }
3444
3445 DBFEXIT;
3446
3447}
3448
3449void etrax_usb_hc_bulk_eot_interrupt(int timer_induced)
3450{
3451 int epid;
3452
3453 /* The technique is to run one urb at a time, wait for the eot interrupt at which
3454 point the EP descriptor has been disabled. */
3455
3456 DBFENTER;
3457 dbg_bulk("bulk eot%s", timer_induced ? ", called by timer" : "");
3458
3459 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
3460
3461 if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
3462 (TxBulkEPList[epid].sub != 0)) {
3463
3464 struct urb *urb;
3465 etrax_urb_priv_t *urb_priv;
3466 unsigned long flags;
3467 __u32 r_usb_ept_data;
3468
3469 /* Found a disabled EP descriptor which has a non-null sub pointer.
3470 Verify that this ctrl EP descriptor got disabled no errors.
3471 FIXME: Necessary to check error_code? */
3472 dbg_bulk("for epid %d?", epid);
3473
3474 /* Get the first urb. */
3475 urb = urb_list_first(epid);
3476
3477 /* FIXME: Could this happen for valid reasons? Why did it disappear? Because of
3478 wrong unlinking? */
3479 if (!urb) {
3480 warn("NULL urb for epid %d", epid);
3481 continue;
3482 }
3483
3484 assert(urb);
3485 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
3486 assert(urb_priv);
3487
3488 /* Sanity checks. */
3489 assert(usb_pipetype(urb->pipe) == PIPE_BULK);
3490 if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
3491 err("bulk endpoint got disabled before reaching last sb");
3492 }
3493
3494 /* For bulk IN traffic, there seems to be a race condition between
3495 between the bulk eot and eop interrupts, or rather an uncertainty regarding
3496 the order in which they happen. Normally we expect the eop interrupt from
3497 DMA channel 9 to happen before the eot interrupt.
3498
3499 Therefore, we complete the bulk IN urb in the rx interrupt handler instead. */
3500
3501 if (usb_pipein(urb->pipe)) {
3502 dbg_bulk("in urb, continuing");
3503 continue;
3504 }
3505
3506 save_flags(flags);
3507 cli();
3508 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
3509 nop();
3510 r_usb_ept_data = *R_USB_EPT_DATA;
3511 restore_flags(flags);
3512
3513 if (IO_EXTRACT(R_USB_EPT_DATA, error_code, r_usb_ept_data) ==
3514 IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
3515 /* This means that the endpoint has no error, is disabled
3516 and had inserted traffic, i.e. transfer successfully completed. */
3517 etrax_usb_complete_bulk_urb(urb, 0);
3518 } else {
3519 /* Shouldn't happen. We expect errors to be caught by epid attention. */
3520 err("Found disabled bulk EP desc, error_code != no_error");
3521 }
3522 }
3523 }
3524
3525 /* Normally, we should find (at least) one disabled EP descriptor with a valid sub pointer.
3526 However, because of the uncertainty in the deliverance of the eop/eot interrupts, we may
3527 not. Also, we might find two disabled EPs when handling an eot interrupt, and then find
3528 none the next time. */
3529
3530 DBFEXIT;
3531
3532}
3533
3534void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg)
3535{
3536 /* This function handles the epid attention interrupt. There are a variety of reasons
3537 for this interrupt to happen (Designer's Reference, p. 8 - 22 for the details):
3538
3539 invalid ep_id - Invalid epid in an EP (EP disabled).
3540 stall - Not strictly an error condition (EP disabled).
3541 3rd error - Three successive transaction errors (EP disabled).
3542 buffer ourun - Buffer overrun or underrun (EP disabled).
3543 past eof1 - Intr or isoc transaction proceeds past EOF1.
3544 near eof - Intr or isoc transaction would not fit inside the frame.
3545 zout transfer - If zout transfer for a bulk endpoint (EP disabled).
3546 setup transfer - If setup transfer for a non-ctrl endpoint (EP disabled). */
3547
3548 int epid;
3549
3550
3551 DBFENTER;
3552
3553 assert(reg != NULL);
3554
3555 /* Note that we loop through all epids. We still want to catch errors for
3556 the invalid one, even though we might handle them differently. */
3557 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
3558
3559 if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
3560
3561 struct urb *urb;
3562 __u32 r_usb_ept_data;
3563 unsigned long flags;
3564 int error_code;
3565
3566 save_flags(flags);
3567 cli();
3568 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
3569 nop();
3570 /* Note that although there are separate R_USB_EPT_DATA and R_USB_EPT_DATA_ISO
3571 registers, they are located at the same address and are of the same size.
3572 In other words, this read should be ok for isoc also. */
3573 r_usb_ept_data = *R_USB_EPT_DATA;
3574 restore_flags(flags);
3575
3576 /* First some sanity checks. */
3577 if (epid == INVALID_EPID) {
3578 /* FIXME: What if it became disabled? Could seriously hurt interrupt
3579 traffic. (Use do_intr_recover.) */
3580 warn("Got epid_attn for INVALID_EPID (%d).", epid);
3581 err("R_USB_EPT_DATA = 0x%x", r_usb_ept_data);
3582 err("R_USB_STATUS = 0x%x", reg->r_usb_status);
3583 continue;
3584 } else if (epid == DUMMY_EPID) {
3585 /* We definitely don't care about these ones. Besides, they are
3586 always disabled, so any possible disabling caused by the
3587 epid attention interrupt is irrelevant. */
3588 warn("Got epid_attn for DUMMY_EPID (%d).", epid);
3589 continue;
3590 }
3591
3592 /* Get the first urb in the urb list for this epid. We blatantly assume
3593 that only the first urb could have caused the epid attention.
3594 (For bulk and ctrl, only one urb is active at any one time. For intr
3595 and isoc we remove them once they are completed.) */
3596 urb = urb_list_first(epid);
3597
3598 if (urb == NULL) {
3599 err("Got epid_attn for epid %i with no urb.", epid);
3600 err("R_USB_EPT_DATA = 0x%x", r_usb_ept_data);
3601 err("R_USB_STATUS = 0x%x", reg->r_usb_status);
3602 continue;
3603 }
3604
3605 switch (usb_pipetype(urb->pipe)) {
3606 case PIPE_BULK:
3607 warn("Got epid attn for bulk endpoint, epid %d", epid);
3608 break;
3609 case PIPE_CONTROL:
3610 warn("Got epid attn for control endpoint, epid %d", epid);
3611 break;
3612 case PIPE_INTERRUPT:
3613 warn("Got epid attn for interrupt endpoint, epid %d", epid);
3614 break;
3615 case PIPE_ISOCHRONOUS:
3616 warn("Got epid attn for isochronous endpoint, epid %d", epid);
3617 break;
3618 }
3619
3620 if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) {
3621 if (r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, hold)) {
3622 warn("Hold was set for epid %d.", epid);
3623 continue;
3624 }
3625 }
3626
3627 /* Even though error_code occupies bits 22 - 23 in both R_USB_EPT_DATA and
3628 R_USB_EPT_DATA_ISOC, we separate them here so we don't forget in other places. */
3629 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
3630 error_code = IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data);
3631 } else {
3632 error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, r_usb_ept_data);
3633 }
3634
3635 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
3636 if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
3637
3638 /* Isoc traffic doesn't have error_count_in/error_count_out. */
3639 if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
3640 (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, r_usb_ept_data) == 3 ||
3641 IO_EXTRACT(R_USB_EPT_DATA, error_count_out, r_usb_ept_data) == 3)) {
3642 /* 3rd error. */
3643 warn("3rd error for epid %i", epid);
3644 etrax_usb_complete_urb(urb, -EPROTO);
3645
3646 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
3647
3648 warn("Perror for epid %d", epid);
3649
3650 if (!(r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
3651 /* invalid ep_id */
3652 panic("Perror because of invalid epid."
3653 " Deconfigured too early?");
3654 } else {
3655 /* past eof1, near eof, zout transfer, setup transfer */
3656
3657 /* Dump the urb and the relevant EP descriptor list. */
3658
3659 __dump_urb(urb);
3660 __dump_ept_data(epid);
3661 __dump_ep_list(usb_pipetype(urb->pipe));
3662
3663 panic("Something wrong with DMA descriptor contents."
3664 " Too much traffic inserted?");
3665 }
3666 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
3667 /* buffer ourun */
3668 panic("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
3669 }
3670
3671 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, stall)) {
3672 /* Not really a protocol error, just says that the endpoint gave
3673 a stall response. Note that error_code cannot be stall for isoc. */
3674 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
3675 panic("Isoc traffic cannot stall");
3676 }
3677
3678 warn("Stall for epid %d", epid);
3679 etrax_usb_complete_urb(urb, -EPIPE);
3680
3681 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, bus_error)) {
3682 /* Two devices responded to a transaction request. Must be resolved
3683 by software. FIXME: Reset ports? */
3684 panic("Bus error for epid %d."
3685 " Two devices responded to transaction request",
3686 epid);
3687
3688 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, buffer_error)) {
3689 /* DMA overrun or underrun. */
3690 warn("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
3691
3692 /* It seems that error_code = buffer_error in
3693 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
3694 are the same error. */
3695 etrax_usb_complete_urb(urb, -EPROTO);
3696 }
3697 }
3698 }
3699
3700 DBFEXIT;
3701
3702}
3703
3704void etrax_usb_bulk_start_timer_func(unsigned long dummy)
3705{
3706
3707 /* We might enable an EP descriptor behind the current DMA position when it's about
3708 to decide that there are no more bulk traffic and it should stop the bulk channel.
3709 Therefore we periodically check if the bulk channel is stopped and there is an
3710 enabled bulk EP descriptor, in which case we start the bulk channel. */
3711 dbg_bulk("bulk_start_timer timed out.");
3712
3713 if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
3714 int epid;
3715
3716 dbg_bulk("Bulk DMA channel not running.");
3717
3718 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
3719 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
3720 dbg_bulk("Found enabled EP for epid %d, starting bulk channel.\n",
3721 epid);
3722 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
3723
3724 /* Restart the bulk eot timer since we just started the bulk channel. */
3725 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
3726
3727 /* No need to search any further. */
3728 break;
3729 }
3730 }
3731 } else {
3732 dbg_bulk("Bulk DMA channel running.");
3733 }
3734}
3735
3736void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg)
3737{
3738 etrax_hc_t *hc = reg->hc;
3739 __u16 r_usb_rh_port_status_1 = reg->r_usb_rh_port_status_1;
3740 __u16 r_usb_rh_port_status_2 = reg->r_usb_rh_port_status_2;
3741
3742 DBFENTER;
3743
3744 /* The Etrax RH does not include a wPortChange register, so this has to be handled in software
3745 (by saving the old port status value for comparison when the port status interrupt happens).
3746 See section 11.16.2.6.2 in the USB 1.1 spec for details. */
3747
3748 dbg_rh("hc->rh.prev_wPortStatus_1 = 0x%x", hc->rh.prev_wPortStatus_1);
3749 dbg_rh("hc->rh.prev_wPortStatus_2 = 0x%x", hc->rh.prev_wPortStatus_2);
3750 dbg_rh("r_usb_rh_port_status_1 = 0x%x", r_usb_rh_port_status_1);
3751 dbg_rh("r_usb_rh_port_status_2 = 0x%x", r_usb_rh_port_status_2);
3752
3753 /* C_PORT_CONNECTION is set on any transition. */
3754 hc->rh.wPortChange_1 |=
3755 ((r_usb_rh_port_status_1 & (1 << RH_PORT_CONNECTION)) !=
3756 (hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_CONNECTION))) ?
3757 (1 << RH_PORT_CONNECTION) : 0;
3758
3759 hc->rh.wPortChange_2 |=
3760 ((r_usb_rh_port_status_2 & (1 << RH_PORT_CONNECTION)) !=
3761 (hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_CONNECTION))) ?
3762 (1 << RH_PORT_CONNECTION) : 0;
3763
3764 /* C_PORT_ENABLE is _only_ set on a one to zero transition, i.e. when
3765 the port is disabled, not when it's enabled. */
3766 hc->rh.wPortChange_1 |=
3767 ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_ENABLE))
3768 && !(r_usb_rh_port_status_1 & (1 << RH_PORT_ENABLE))) ?
3769 (1 << RH_PORT_ENABLE) : 0;
3770
3771 hc->rh.wPortChange_2 |=
3772 ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_ENABLE))
3773 && !(r_usb_rh_port_status_2 & (1 << RH_PORT_ENABLE))) ?
3774 (1 << RH_PORT_ENABLE) : 0;
3775
3776 /* C_PORT_SUSPEND is set to one when the device has transitioned out
3777 of the suspended state, i.e. when suspend goes from one to zero. */
3778 hc->rh.wPortChange_1 |=
3779 ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_SUSPEND))
3780 && !(r_usb_rh_port_status_1 & (1 << RH_PORT_SUSPEND))) ?
3781 (1 << RH_PORT_SUSPEND) : 0;
3782
3783 hc->rh.wPortChange_2 |=
3784 ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_SUSPEND))
3785 && !(r_usb_rh_port_status_2 & (1 << RH_PORT_SUSPEND))) ?
3786 (1 << RH_PORT_SUSPEND) : 0;
3787
3788
3789 /* C_PORT_RESET is set when reset processing on this port is complete. */
3790 hc->rh.wPortChange_1 |=
3791 ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_RESET))
3792 && !(r_usb_rh_port_status_1 & (1 << RH_PORT_RESET))) ?
3793 (1 << RH_PORT_RESET) : 0;
3794
3795 hc->rh.wPortChange_2 |=
3796 ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_RESET))
3797 && !(r_usb_rh_port_status_2 & (1 << RH_PORT_RESET))) ?
3798 (1 << RH_PORT_RESET) : 0;
3799
3800 /* Save the new values for next port status change. */
3801 hc->rh.prev_wPortStatus_1 = r_usb_rh_port_status_1;
3802 hc->rh.prev_wPortStatus_2 = r_usb_rh_port_status_2;
3803
3804 dbg_rh("hc->rh.wPortChange_1 set to 0x%x", hc->rh.wPortChange_1);
3805 dbg_rh("hc->rh.wPortChange_2 set to 0x%x", hc->rh.wPortChange_2);
3806
3807 DBFEXIT;
3808
3809}
3810
3811void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg)
3812{
3813 DBFENTER;
3814
3815 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
3816 list for the corresponding epid? */
3817 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
3818 panic("USB controller got ourun.");
3819 }
3820 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
3821
3822 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
3823 an interrupt pipe. I don't see how re-enabling all EP descriptors
3824 will help if there was a programming error. */
3825 panic("USB controller got perror.");
3826 }
3827
3828 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
3829 /* We should never operate in device mode. */
3830 panic("USB controller in device mode.");
3831 }
3832
3833 /* These if-statements could probably be nested. */
3834 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, host_mode)) {
3835 info("USB controller in host mode.");
3836 }
3837 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, started)) {
3838 info("USB controller started.");
3839 }
3840 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, running)) {
3841 info("USB controller running.");
3842 }
3843
3844 DBFEXIT;
3845
3846}
3847
3848
3849static int etrax_rh_submit_urb(struct urb *urb)
3850{
3851 struct usb_device *usb_dev = urb->dev;
3852 etrax_hc_t *hc = usb_dev->bus->hcpriv;
3853 unsigned int pipe = urb->pipe;
3854 struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
3855 void *data = urb->transfer_buffer;
3856 int leni = urb->transfer_buffer_length;
3857 int len = 0;
3858 int stat = 0;
3859
3860 __u16 bmRType_bReq;
3861 __u16 wValue;
3862 __u16 wIndex;
3863 __u16 wLength;
3864
3865 DBFENTER;
3866
3867 /* FIXME: What is this interrupt urb that is sent to the root hub? */
3868 if (usb_pipetype (pipe) == PIPE_INTERRUPT) {
3869 dbg_rh("Root-Hub submit IRQ: every %d ms", urb->interval);
3870 hc->rh.urb = urb;
3871 hc->rh.send = 1;
3872 /* FIXME: We could probably remove this line since it's done
3873 in etrax_rh_init_int_timer. (Don't remove it from
3874 etrax_rh_init_int_timer though.) */
3875 hc->rh.interval = urb->interval;
3876 etrax_rh_init_int_timer(urb);
3877 DBFEXIT;
3878
3879 return 0;
3880 }
3881
3882 bmRType_bReq = cmd->bRequestType | (cmd->bRequest << 8);
3883 wValue = le16_to_cpu(cmd->wValue);
3884 wIndex = le16_to_cpu(cmd->wIndex);
3885 wLength = le16_to_cpu(cmd->wLength);
3886
3887 dbg_rh("bmRType_bReq : 0x%04x (%d)", bmRType_bReq, bmRType_bReq);
3888 dbg_rh("wValue : 0x%04x (%d)", wValue, wValue);
3889 dbg_rh("wIndex : 0x%04x (%d)", wIndex, wIndex);
3890 dbg_rh("wLength : 0x%04x (%d)", wLength, wLength);
3891
3892 switch (bmRType_bReq) {
3893
3894 /* Request Destination:
3895 without flags: Device,
3896 RH_INTERFACE: interface,
3897 RH_ENDPOINT: endpoint,
3898 RH_CLASS means HUB here,
3899 RH_OTHER | RH_CLASS almost ever means HUB_PORT here
3900 */
3901
3902 case RH_GET_STATUS:
3903 *(__u16 *) data = cpu_to_le16 (1);
3904 OK (2);
3905
3906 case RH_GET_STATUS | RH_INTERFACE:
3907 *(__u16 *) data = cpu_to_le16 (0);
3908 OK (2);
3909
3910 case RH_GET_STATUS | RH_ENDPOINT:
3911 *(__u16 *) data = cpu_to_le16 (0);
3912 OK (2);
3913
3914 case RH_GET_STATUS | RH_CLASS:
3915 *(__u32 *) data = cpu_to_le32 (0);
3916 OK (4); /* hub power ** */
3917
3918 case RH_GET_STATUS | RH_OTHER | RH_CLASS:
3919 if (wIndex == 1) {
3920 *((__u16*)data) = cpu_to_le16(hc->rh.prev_wPortStatus_1);
3921 *((__u16*)data + 1) = cpu_to_le16(hc->rh.wPortChange_1);
3922 } else if (wIndex == 2) {
3923 *((__u16*)data) = cpu_to_le16(hc->rh.prev_wPortStatus_2);
3924 *((__u16*)data + 1) = cpu_to_le16(hc->rh.wPortChange_2);
3925 } else {
3926 dbg_rh("RH_GET_STATUS whith invalid wIndex!");
3927 OK(0);
3928 }
3929
3930 OK(4);
3931
3932 case RH_CLEAR_FEATURE | RH_ENDPOINT:
3933 switch (wValue) {
3934 case (RH_ENDPOINT_STALL):
3935 OK (0);
3936 }
3937 break;
3938
3939 case RH_CLEAR_FEATURE | RH_CLASS:
3940 switch (wValue) {
3941 case (RH_C_HUB_OVER_CURRENT):
3942 OK (0); /* hub power over current ** */
3943 }
3944 break;
3945
3946 case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
3947 switch (wValue) {
3948 case (RH_PORT_ENABLE):
3949 if (wIndex == 1) {
3950
3951 dbg_rh("trying to do disable port 1");
3952
3953 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
3954
3955 while (hc->rh.prev_wPortStatus_1 &
3956 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes));
3957 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
3958 dbg_rh("Port 1 is disabled");
3959
3960 } else if (wIndex == 2) {
3961
3962 dbg_rh("trying to do disable port 2");
3963
3964 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
3965
3966 while (hc->rh.prev_wPortStatus_2 &
3967 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes));
3968 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
3969 dbg_rh("Port 2 is disabled");
3970
3971 } else {
3972 dbg_rh("RH_CLEAR_FEATURE->RH_PORT_ENABLE "
3973 "with invalid wIndex == %d!", wIndex);
3974 }
3975
3976 OK (0);
3977 case (RH_PORT_SUSPEND):
3978 /* Opposite to suspend should be resume, so we'll do a resume. */
3979 /* FIXME: USB 1.1, 11.16.2.2 says:
3980 "Clearing the PORT_SUSPEND feature causes a host-initiated resume
3981 on the specified port. If the port is not in the Suspended state,
3982 the hub should treat this request as a functional no-operation."
3983 Shouldn't we check if the port is in a suspended state before
3984 resuming? */
3985
3986 /* Make sure the controller isn't busy. */
3987 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
3988
3989 if (wIndex == 1) {
3990 *R_USB_COMMAND =
3991 IO_STATE(R_USB_COMMAND, port_sel, port1) |
3992 IO_STATE(R_USB_COMMAND, port_cmd, resume) |
3993 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
3994 } else if (wIndex == 2) {
3995 *R_USB_COMMAND =
3996 IO_STATE(R_USB_COMMAND, port_sel, port2) |
3997 IO_STATE(R_USB_COMMAND, port_cmd, resume) |
3998 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
3999 } else {
4000 dbg_rh("RH_CLEAR_FEATURE->RH_PORT_SUSPEND "
4001 "with invalid wIndex == %d!", wIndex);
4002 }
4003
4004 OK (0);
4005 case (RH_PORT_POWER):
4006 OK (0); /* port power ** */
4007 case (RH_C_PORT_CONNECTION):
4008 if (wIndex == 1) {
4009 hc->rh.wPortChange_1 &= ~(1 << RH_PORT_CONNECTION);
4010 } else if (wIndex == 2) {
4011 hc->rh.wPortChange_2 &= ~(1 << RH_PORT_CONNECTION);
4012 } else {
4013 dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_CONNECTION "
4014 "with invalid wIndex == %d!", wIndex);
4015 }
4016
4017 OK (0);
4018 case (RH_C_PORT_ENABLE):
4019 if (wIndex == 1) {
4020 hc->rh.wPortChange_1 &= ~(1 << RH_PORT_ENABLE);
4021 } else if (wIndex == 2) {
4022 hc->rh.wPortChange_2 &= ~(1 << RH_PORT_ENABLE);
4023 } else {
4024 dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_ENABLE "
4025 "with invalid wIndex == %d!", wIndex);
4026 }
4027 OK (0);
4028 case (RH_C_PORT_SUSPEND):
4029/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
4030 OK (0);
4031 case (RH_C_PORT_OVER_CURRENT):
4032 OK (0); /* port power over current ** */
4033 case (RH_C_PORT_RESET):
4034 if (wIndex == 1) {
4035 hc->rh.wPortChange_1 &= ~(1 << RH_PORT_RESET);
4036 } else if (wIndex == 2) {
4037 hc->rh.wPortChange_2 &= ~(1 << RH_PORT_RESET);
4038 } else {
4039 dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_RESET "
4040 "with invalid index == %d!", wIndex);
4041 }
4042
4043 OK (0);
4044
4045 }
4046 break;
4047
4048 case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
4049 switch (wValue) {
4050 case (RH_PORT_SUSPEND):
4051
4052 /* Make sure the controller isn't busy. */
4053 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4054
4055 if (wIndex == 1) {
4056 *R_USB_COMMAND =
4057 IO_STATE(R_USB_COMMAND, port_sel, port1) |
4058 IO_STATE(R_USB_COMMAND, port_cmd, suspend) |
4059 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
4060 } else if (wIndex == 2) {
4061 *R_USB_COMMAND =
4062 IO_STATE(R_USB_COMMAND, port_sel, port2) |
4063 IO_STATE(R_USB_COMMAND, port_cmd, suspend) |
4064 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
4065 } else {
4066 dbg_rh("RH_SET_FEATURE->RH_PORT_SUSPEND "
4067 "with invalid wIndex == %d!", wIndex);
4068 }
4069
4070 OK (0);
4071 case (RH_PORT_RESET):
4072 if (wIndex == 1) {
4073
4074 port_1_reset:
4075 dbg_rh("Doing reset of port 1");
4076
4077 /* Make sure the controller isn't busy. */
4078 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4079
4080 *R_USB_COMMAND =
4081 IO_STATE(R_USB_COMMAND, port_sel, port1) |
4082 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4083 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
4084
4085 /* We must wait at least 10 ms for the device to recover.
4086 15 ms should be enough. */
4087 udelay(15000);
4088
4089 /* Wait for reset bit to go low (should be done by now). */
4090 while (hc->rh.prev_wPortStatus_1 &
4091 IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes));
4092
4093 /* If the port status is
4094 1) connected and enabled then there is a device and everything is fine
4095 2) neither connected nor enabled then there is no device, also fine
4096 3) connected and not enabled then we try again
4097 (Yes, there are other port status combinations besides these.) */
4098
4099 if ((hc->rh.prev_wPortStatus_1 &
4100 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) &&
4101 (hc->rh.prev_wPortStatus_1 &
4102 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no))) {
4103 dbg_rh("Connected device on port 1, but port not enabled?"
4104 " Trying reset again.");
4105 goto port_2_reset;
4106 }
4107
4108 /* Diagnostic printouts. */
4109 if ((hc->rh.prev_wPortStatus_1 &
4110 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, no)) &&
4111 (hc->rh.prev_wPortStatus_1 &
4112 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no))) {
4113 dbg_rh("No connected device on port 1");
4114 } else if ((hc->rh.prev_wPortStatus_1 &
4115 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) &&
4116 (hc->rh.prev_wPortStatus_1 &
4117 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))) {
4118 dbg_rh("Connected device on port 1, port 1 enabled");
4119 }
4120
4121 } else if (wIndex == 2) {
4122
4123 port_2_reset:
4124 dbg_rh("Doing reset of port 2");
4125
4126 /* Make sure the controller isn't busy. */
4127 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4128
4129 /* Issue the reset command. */
4130 *R_USB_COMMAND =
4131 IO_STATE(R_USB_COMMAND, port_sel, port2) |
4132 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4133 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
4134
4135 /* We must wait at least 10 ms for the device to recover.
4136 15 ms should be enough. */
4137 udelay(15000);
4138
4139 /* Wait for reset bit to go low (should be done by now). */
4140 while (hc->rh.prev_wPortStatus_2 &
4141 IO_STATE(R_USB_RH_PORT_STATUS_2, reset, yes));
4142
4143 /* If the port status is
4144 1) connected and enabled then there is a device and everything is fine
4145 2) neither connected nor enabled then there is no device, also fine
4146 3) connected and not enabled then we try again
4147 (Yes, there are other port status combinations besides these.) */
4148
4149 if ((hc->rh.prev_wPortStatus_2 &
4150 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes)) &&
4151 (hc->rh.prev_wPortStatus_2 &
4152 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no))) {
4153 dbg_rh("Connected device on port 2, but port not enabled?"
4154 " Trying reset again.");
4155 goto port_2_reset;
4156 }
4157
4158 /* Diagnostic printouts. */
4159 if ((hc->rh.prev_wPortStatus_2 &
4160 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, no)) &&
4161 (hc->rh.prev_wPortStatus_2 &
4162 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no))) {
4163 dbg_rh("No connected device on port 2");
4164 } else if ((hc->rh.prev_wPortStatus_2 &
4165 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes)) &&
4166 (hc->rh.prev_wPortStatus_2 &
4167 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes))) {
4168 dbg_rh("Connected device on port 2, port 2 enabled");
4169 }
4170
4171 } else {
4172 dbg_rh("RH_SET_FEATURE->RH_PORT_RESET with invalid wIndex = %d", wIndex);
4173 }
4174
4175 /* Make sure the controller isn't busy. */
4176 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4177
4178 /* If all enabled ports were disabled the host controller goes down into
4179 started mode, so we need to bring it back into the running state.
4180 (This is safe even if it's already in the running state.) */
4181 *R_USB_COMMAND =
4182 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4183 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4184 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
4185
4186 dbg_rh("...Done");
4187 OK(0);
4188
4189 case (RH_PORT_POWER):
4190 OK (0); /* port power ** */
4191 case (RH_PORT_ENABLE):
4192 /* There is no port enable command in the host controller, so if the
4193 port is already enabled, we do nothing. If not, we reset the port
4194 (with an ugly goto). */
4195
4196 if (wIndex == 1) {
4197 if (hc->rh.prev_wPortStatus_1 &
4198 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no)) {
4199 goto port_1_reset;
4200 }
4201 } else if (wIndex == 2) {
4202 if (hc->rh.prev_wPortStatus_2 &
4203 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no)) {
4204 goto port_2_reset;
4205 }
4206 } else {
4207 dbg_rh("RH_SET_FEATURE->RH_GET_STATUS with invalid wIndex = %d", wIndex);
4208 }
4209 OK (0);
4210 }
4211 break;
4212
4213 case RH_SET_ADDRESS:
4214 hc->rh.devnum = wValue;
4215 dbg_rh("RH address set to: %d", hc->rh.devnum);
4216 OK (0);
4217
4218 case RH_GET_DESCRIPTOR:
4219 switch ((wValue & 0xff00) >> 8) {
4220 case (0x01): /* device descriptor */
4221 len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_dev_des), wLength));
4222 memcpy (data, root_hub_dev_des, len);
4223 OK (len);
4224 case (0x02): /* configuration descriptor */
4225 len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_config_des), wLength));
4226 memcpy (data, root_hub_config_des, len);
4227 OK (len);
4228 case (0x03): /* string descriptors */
4229 len = usb_root_hub_string (wValue & 0xff,
4230 0xff, "ETRAX 100LX",
4231 data, wLength);
4232 if (len > 0) {
4233 OK(min(leni, len));
4234 } else {
4235 stat = -EPIPE;
4236 }
4237
4238 }
4239 break;
4240
4241 case RH_GET_DESCRIPTOR | RH_CLASS:
4242 root_hub_hub_des[2] = hc->rh.numports;
4243 len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_hub_des), wLength));
4244 memcpy (data, root_hub_hub_des, len);
4245 OK (len);
4246
4247 case RH_GET_CONFIGURATION:
4248 *(__u8 *) data = 0x01;
4249 OK (1);
4250
4251 case RH_SET_CONFIGURATION:
4252 OK (0);
4253
4254 default:
4255 stat = -EPIPE;
4256 }
4257
4258 urb->actual_length = len;
4259 urb->status = stat;
4260 urb->dev = NULL;
4261 if (urb->complete) {
4262 urb->complete(urb, NULL);
4263 }
4264 DBFEXIT;
4265
4266 return 0;
4267}
4268
4269static void
4270etrax_usb_bulk_eot_timer_func(unsigned long dummy)
4271{
4272 /* Because of a race condition in the top half, we might miss a bulk eot.
4273 This timer "simulates" a bulk eot if we don't get one for a while, hopefully
4274 correcting the situation. */
4275 dbg_bulk("bulk_eot_timer timed out.");
4276 etrax_usb_hc_bulk_eot_interrupt(1);
4277}
4278
4279static void*
4280etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size, int mem_flags, dma_addr_t *dma)
4281{
4282 return kmalloc(size, mem_flags);
4283}
4284
4285static void
4286etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma)
4287{
4288 kfree(addr);
4289}
4290
4291
4292static struct device fake_device;
4293
4294static int __init etrax_usb_hc_init(void)
4295{
4296 static etrax_hc_t *hc;
4297 struct usb_bus *bus;
4298 struct usb_device *usb_rh;
4299 int i;
4300
4301 DBFENTER;
4302
4303 info("ETRAX 100LX USB-HCD %s (c) 2001-2003 Axis Communications AB\n", usb_hcd_version);
4304
4305 hc = kmalloc(sizeof(etrax_hc_t), GFP_KERNEL);
4306 assert(hc != NULL);
4307
4308 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
4309 /* Note that we specify sizeof(USB_EP_Desc_t) as the size, but also allocate
4310 SB descriptors from this cache. This is ok since sizeof(USB_EP_Desc_t) ==
4311 sizeof(USB_SB_Desc_t). */
4312
4313 usb_desc_cache = kmem_cache_create("usb_desc_cache", sizeof(USB_EP_Desc_t), 0,
4314 SLAB_HWCACHE_ALIGN, 0, 0);
4315 assert(usb_desc_cache != NULL);
4316
4317 top_half_reg_cache = kmem_cache_create("top_half_reg_cache",
4318 sizeof(usb_interrupt_registers_t),
4319 0, SLAB_HWCACHE_ALIGN, 0, 0);
4320 assert(top_half_reg_cache != NULL);
4321
4322 isoc_compl_cache = kmem_cache_create("isoc_compl_cache",
4323 sizeof(usb_isoc_complete_data_t),
4324 0, SLAB_HWCACHE_ALIGN, 0, 0);
4325 assert(isoc_compl_cache != NULL);
4326
4327 etrax_usb_bus = bus = usb_alloc_bus(&etrax_usb_device_operations);
4328 hc->bus = bus;
4329 bus->bus_name="ETRAX 100LX";
4330 bus->hcpriv = hc;
4331
4332 /* Initalize RH to the default address.
4333 And make sure that we have no status change indication */
4334 hc->rh.numports = 2; /* The RH has two ports */
4335 hc->rh.devnum = 1;
4336 hc->rh.wPortChange_1 = 0;
4337 hc->rh.wPortChange_2 = 0;
4338
4339 /* Also initate the previous values to zero */
4340 hc->rh.prev_wPortStatus_1 = 0;
4341 hc->rh.prev_wPortStatus_2 = 0;
4342
4343 /* Initialize the intr-traffic flags */
4344 /* FIXME: This isn't used. (Besides, the error field isn't initialized.) */
4345 hc->intr.sleeping = 0;
4346 hc->intr.wq = NULL;
4347
4348 epid_usage_bitmask = 0;
4349 epid_out_traffic = 0;
4350
4351 /* Mark the invalid epid as being used. */
4352 set_bit(INVALID_EPID, (void *)&epid_usage_bitmask);
4353 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, INVALID_EPID);
4354 nop();
4355 /* The valid bit should still be set ('invalid' is in our world; not the hardware's). */
4356 *R_USB_EPT_DATA = (IO_STATE(R_USB_EPT_DATA, valid, yes) |
4357 IO_FIELD(R_USB_EPT_DATA, max_len, 1));
4358
4359 /* Mark the dummy epid as being used. */
4360 set_bit(DUMMY_EPID, (void *)&epid_usage_bitmask);
4361 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, DUMMY_EPID);
4362 nop();
4363 *R_USB_EPT_DATA = (IO_STATE(R_USB_EPT_DATA, valid, no) |
4364 IO_FIELD(R_USB_EPT_DATA, max_len, 1));
4365
4366 /* Initialize the urb list by initiating a head for each list. */
4367 for (i = 0; i < NBR_OF_EPIDS; i++) {
4368 INIT_LIST_HEAD(&urb_list[i]);
4369 }
4370 spin_lock_init(&urb_list_lock);
4371
4372 INIT_LIST_HEAD(&urb_unlink_list);
4373
4374
4375 /* Initiate the bulk start timer. */
4376 init_timer(&bulk_start_timer);
4377 bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
4378 bulk_start_timer.function = etrax_usb_bulk_start_timer_func;
4379 add_timer(&bulk_start_timer);
4380
4381
4382 /* Initiate the bulk eot timer. */
4383 init_timer(&bulk_eot_timer);
4384 bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
4385 bulk_eot_timer.function = etrax_usb_bulk_eot_timer_func;
4386 add_timer(&bulk_eot_timer);
4387
4388 /* Set up the data structures for USB traffic. Note that this must be done before
4389 any interrupt that relies on sane DMA list occurrs. */
4390 init_rx_buffers();
4391 init_tx_bulk_ep();
4392 init_tx_ctrl_ep();
4393 init_tx_intr_ep();
4394 init_tx_isoc_ep();
4395
4396 device_initialize(&fake_device);
4397 kobject_set_name(&fake_device.kobj, "etrax_usb");
4398 kobject_add(&fake_device.kobj);
4399 hc->bus->controller = &fake_device;
4400 usb_register_bus(hc->bus);
4401
4402 *R_IRQ_MASK2_SET =
4403 /* Note that these interrupts are not used. */
4404 IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
4405 /* Sub channel 1 (ctrl) descr. interrupts are used. */
4406 IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
4407 IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
4408 /* Sub channel 3 (isoc) descr. interrupts are used. */
4409 IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
4410
4411 /* Note that the dma9_descr interrupt is not used. */
4412 *R_IRQ_MASK2_SET =
4413 IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
4414 IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
4415
4416 /* FIXME: Enable iso_eof only when isoc traffic is running. */
4417 *R_USB_IRQ_MASK_SET =
4418 IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) |
4419 IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
4420 IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
4421 IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
4422 IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
4423
4424
4425 if (request_irq(ETRAX_USB_HC_IRQ, etrax_usb_hc_interrupt_top_half, 0,
4426 "ETRAX 100LX built-in USB (HC)", hc)) {
4427 err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
4428 etrax_usb_hc_cleanup();
4429 DBFEXIT;
4430 return -1;
4431 }
4432
4433 if (request_irq(ETRAX_USB_RX_IRQ, etrax_usb_rx_interrupt, 0,
4434 "ETRAX 100LX built-in USB (Rx)", hc)) {
4435 err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
4436 etrax_usb_hc_cleanup();
4437 DBFEXIT;
4438 return -1;
4439 }
4440
4441 if (request_irq(ETRAX_USB_TX_IRQ, etrax_usb_tx_interrupt, 0,
4442 "ETRAX 100LX built-in USB (Tx)", hc)) {
4443 err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
4444 etrax_usb_hc_cleanup();
4445 DBFEXIT;
4446 return -1;
4447 }
4448
4449 /* R_USB_COMMAND:
4450 USB commands in host mode. The fields in this register should all be
4451 written to in one write. Do not read-modify-write one field at a time. A
4452 write to this register will trigger events in the USB controller and an
4453 incomplete command may lead to unpredictable results, and in worst case
4454 even to a deadlock in the controller.
4455 (Note however that the busy field is read-only, so no need to write to it.) */
4456
4457 /* Check the busy bit before writing to R_USB_COMMAND. */
4458
4459 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4460
4461 /* Reset the USB interface. */
4462 *R_USB_COMMAND =
4463 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4464 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4465 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
4466
4467 /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to 0x2A30 (10800),
4468 to guarantee that control traffic gets 10% of the bandwidth, and periodic transfer may
4469 allocate the rest (90%). This doesn't work though. Read on for a lenghty explanation.
4470
4471 While there is a difference between rev. 2 and rev. 3 of the ETRAX 100LX regarding the NAK
4472 behaviour, it doesn't solve this problem. What happens is that a control transfer will not
4473 be interrupted in its data stage when PSTART happens (the point at which periodic traffic
4474 is started). Thus, if PSTART is set to 10800 and its IN or OUT token is NAKed until just before
4475 PSTART happens, it will continue the IN/OUT transfer as long as it's ACKed. After it's done,
4476 there may be too little time left for an isochronous transfer, causing an epid attention
4477 interrupt due to perror. The work-around for this is to let the control transfers run at the
4478 end of the frame instead of at the beginning, and will be interrupted just fine if it doesn't
4479 fit into the frame. However, since there will *always* be a control transfer at the beginning
4480 of the frame, regardless of what we set PSTART to, that transfer might be a 64-byte transfer
4481 which consumes up to 15% of the frame, leaving only 85% for periodic traffic. The solution to
4482 this would be to 'dummy allocate' 5% of the frame with the usb_claim_bandwidth function to make
4483 sure that the periodic transfers that are inserted will always fit in the frame.
4484
4485 The idea was suggested that a control transfer could be split up into several 8 byte transfers,
4486 so that it would be interrupted by PSTART, but since this can't be done for an IN transfer this
4487 hasn't been implemented.
4488
4489 The value 11960 is chosen to be just after the SOF token, with a couple of bit times extra
4490 for possible bit stuffing. */
4491
4492 *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
4493
4494#ifdef CONFIG_ETRAX_USB_HOST_PORT1
4495 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
4496#endif
4497
4498#ifdef CONFIG_ETRAX_USB_HOST_PORT2
4499 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
4500#endif
4501
4502 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4503
4504 /* Configure the USB interface as a host controller. */
4505 *R_USB_COMMAND =
4506 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4507 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4508 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
4509
4510 /* Note: Do not reset any ports here. Await the port status interrupts, to have a controlled
4511 sequence of resetting the ports. If we reset both ports now, and there are devices
4512 on both ports, we will get a bus error because both devices will answer the set address
4513 request. */
4514
4515 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4516
4517 /* Start processing of USB traffic. */
4518 *R_USB_COMMAND =
4519 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4520 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4521 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
4522
4523 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4524
4525 usb_rh = usb_alloc_dev(NULL, hc->bus, 0);
4526 hc->bus->root_hub = usb_rh;
4527 usb_rh->state = USB_STATE_ADDRESS;
4528 usb_rh->speed = USB_SPEED_FULL;
4529 usb_rh->devnum = 1;
4530 hc->bus->devnum_next = 2;
4531 usb_rh->ep0.desc.wMaxPacketSize = __const_cpu_to_le16(64);
4532 usb_get_device_descriptor(usb_rh, USB_DT_DEVICE_SIZE);
4533 usb_new_device(usb_rh);
4534
4535 DBFEXIT;
4536
4537 return 0;
4538}
4539
4540static void etrax_usb_hc_cleanup(void)
4541{
4542 DBFENTER;
4543
4544 free_irq(ETRAX_USB_HC_IRQ, NULL);
4545 free_irq(ETRAX_USB_RX_IRQ, NULL);
4546 free_irq(ETRAX_USB_TX_IRQ, NULL);
4547
4548 usb_deregister_bus(etrax_usb_bus);
4549
4550 /* FIXME: call kmem_cache_destroy here? */
4551
4552 DBFEXIT;
4553}
4554
4555module_init(etrax_usb_hc_init);
4556module_exit(etrax_usb_hc_cleanup);
diff --git a/drivers/usb/host/hc_crisv10.h b/drivers/usb/host/hc_crisv10.h
new file mode 100644
index 000000000000..62f77111d418
--- /dev/null
+++ b/drivers/usb/host/hc_crisv10.h
@@ -0,0 +1,289 @@
1#ifndef __LINUX_ETRAX_USB_H
2#define __LINUX_ETRAX_USB_H
3
4#include <linux/types.h>
5#include <linux/list.h>
6
7typedef struct USB_IN_Desc {
8 volatile __u16 sw_len;
9 volatile __u16 command;
10 volatile unsigned long next;
11 volatile unsigned long buf;
12 volatile __u16 hw_len;
13 volatile __u16 status;
14} USB_IN_Desc_t;
15
16typedef struct USB_SB_Desc {
17 volatile __u16 sw_len;
18 volatile __u16 command;
19 volatile unsigned long next;
20 volatile unsigned long buf;
21 __u32 dummy;
22} USB_SB_Desc_t;
23
24typedef struct USB_EP_Desc {
25 volatile __u16 hw_len;
26 volatile __u16 command;
27 volatile unsigned long sub;
28 volatile unsigned long next;
29 __u32 dummy;
30} USB_EP_Desc_t;
31
32struct virt_root_hub {
33 int devnum;
34 void *urb;
35 void *int_addr;
36 int send;
37 int interval;
38 int numports;
39 struct timer_list rh_int_timer;
40 volatile __u16 wPortChange_1;
41 volatile __u16 wPortChange_2;
42 volatile __u16 prev_wPortStatus_1;
43 volatile __u16 prev_wPortStatus_2;
44};
45
46struct etrax_usb_intr_traffic {
47 int sleeping;
48 int error;
49 struct wait_queue *wq;
50};
51
52typedef struct etrax_usb_hc {
53 struct usb_bus *bus;
54 struct virt_root_hub rh;
55 struct etrax_usb_intr_traffic intr;
56} etrax_hc_t;
57
58typedef enum {
59 STARTED,
60 NOT_STARTED,
61 UNLINK,
62 TRANSFER_DONE,
63 WAITING_FOR_DESCR_INTR
64} etrax_usb_urb_state_t;
65
66
67
68typedef struct etrax_usb_urb_priv {
69 /* The first_sb field is used for freeing all SB descriptors belonging
70 to an urb. The corresponding ep descriptor's sub pointer cannot be
71 used for this since the DMA advances the sub pointer as it processes
72 the sb list. */
73 USB_SB_Desc_t *first_sb;
74 /* The last_sb field referes to the last SB descriptor that belongs to
75 this urb. This is important to know so we can free the SB descriptors
76 that ranges between first_sb and last_sb. */
77 USB_SB_Desc_t *last_sb;
78
79 /* The rx_offset field is used in ctrl and bulk traffic to keep track
80 of the offset in the urb's transfer_buffer where incoming data should be
81 copied to. */
82 __u32 rx_offset;
83
84 /* Counter used in isochronous transfers to keep track of the
85 number of packets received/transmitted. */
86 __u32 isoc_packet_counter;
87
88 /* This field is used to pass information about the urb's current state between
89 the various interrupt handlers (thus marked volatile). */
90 volatile etrax_usb_urb_state_t urb_state;
91
92 /* Connection between the submitted urb and ETRAX epid number */
93 __u8 epid;
94
95 /* The rx_data_list field is used for periodic traffic, to hold
96 received data for later processing in the the complete_urb functions,
97 where the data us copied to the urb's transfer_buffer. Basically, we
98 use this intermediate storage because we don't know when it's safe to
99 reuse the transfer_buffer (FIXME?). */
100 struct list_head rx_data_list;
101} etrax_urb_priv_t;
102
103/* This struct is for passing data from the top half to the bottom half. */
104typedef struct usb_interrupt_registers
105{
106 etrax_hc_t *hc;
107 __u32 r_usb_epid_attn;
108 __u8 r_usb_status;
109 __u16 r_usb_rh_port_status_1;
110 __u16 r_usb_rh_port_status_2;
111 __u32 r_usb_irq_mask_read;
112 __u32 r_usb_fm_number;
113 struct work_struct usb_bh;
114} usb_interrupt_registers_t;
115
116/* This struct is for passing data from the isoc top half to the isoc bottom half. */
117typedef struct usb_isoc_complete_data
118{
119 struct urb *urb;
120 struct work_struct usb_bh;
121} usb_isoc_complete_data_t;
122
123/* This struct holds data we get from the rx descriptors for DMA channel 9
124 for periodic traffic (intr and isoc). */
125typedef struct rx_data
126{
127 void *data;
128 int length;
129 struct list_head list;
130} rx_data_t;
131
132typedef struct urb_entry
133{
134 struct urb *urb;
135 struct list_head list;
136} urb_entry_t;
137
138/* ---------------------------------------------------------------------------
139 Virtual Root HUB
140 ------------------------------------------------------------------------- */
141/* destination of request */
142#define RH_INTERFACE 0x01
143#define RH_ENDPOINT 0x02
144#define RH_OTHER 0x03
145
146#define RH_CLASS 0x20
147#define RH_VENDOR 0x40
148
149/* Requests: bRequest << 8 | bmRequestType */
150#define RH_GET_STATUS 0x0080
151#define RH_CLEAR_FEATURE 0x0100
152#define RH_SET_FEATURE 0x0300
153#define RH_SET_ADDRESS 0x0500
154#define RH_GET_DESCRIPTOR 0x0680
155#define RH_SET_DESCRIPTOR 0x0700
156#define RH_GET_CONFIGURATION 0x0880
157#define RH_SET_CONFIGURATION 0x0900
158#define RH_GET_STATE 0x0280
159#define RH_GET_INTERFACE 0x0A80
160#define RH_SET_INTERFACE 0x0B00
161#define RH_SYNC_FRAME 0x0C80
162/* Our Vendor Specific Request */
163#define RH_SET_EP 0x2000
164
165
166/* Hub port features */
167#define RH_PORT_CONNECTION 0x00
168#define RH_PORT_ENABLE 0x01
169#define RH_PORT_SUSPEND 0x02
170#define RH_PORT_OVER_CURRENT 0x03
171#define RH_PORT_RESET 0x04
172#define RH_PORT_POWER 0x08
173#define RH_PORT_LOW_SPEED 0x09
174#define RH_C_PORT_CONNECTION 0x10
175#define RH_C_PORT_ENABLE 0x11
176#define RH_C_PORT_SUSPEND 0x12
177#define RH_C_PORT_OVER_CURRENT 0x13
178#define RH_C_PORT_RESET 0x14
179
180/* Hub features */
181#define RH_C_HUB_LOCAL_POWER 0x00
182#define RH_C_HUB_OVER_CURRENT 0x01
183
184#define RH_DEVICE_REMOTE_WAKEUP 0x00
185#define RH_ENDPOINT_STALL 0x01
186
187/* Our Vendor Specific feature */
188#define RH_REMOVE_EP 0x00
189
190
191#define RH_ACK 0x01
192#define RH_REQ_ERR -1
193#define RH_NACK 0x00
194
195/* Field definitions for */
196
197#define USB_IN_command__eol__BITNR 0 /* command macros */
198#define USB_IN_command__eol__WIDTH 1
199#define USB_IN_command__eol__no 0
200#define USB_IN_command__eol__yes 1
201
202#define USB_IN_command__intr__BITNR 3
203#define USB_IN_command__intr__WIDTH 1
204#define USB_IN_command__intr__no 0
205#define USB_IN_command__intr__yes 1
206
207#define USB_IN_status__eop__BITNR 1 /* status macros. */
208#define USB_IN_status__eop__WIDTH 1
209#define USB_IN_status__eop__no 0
210#define USB_IN_status__eop__yes 1
211
212#define USB_IN_status__eot__BITNR 5
213#define USB_IN_status__eot__WIDTH 1
214#define USB_IN_status__eot__no 0
215#define USB_IN_status__eot__yes 1
216
217#define USB_IN_status__error__BITNR 6
218#define USB_IN_status__error__WIDTH 1
219#define USB_IN_status__error__no 0
220#define USB_IN_status__error__yes 1
221
222#define USB_IN_status__nodata__BITNR 7
223#define USB_IN_status__nodata__WIDTH 1
224#define USB_IN_status__nodata__no 0
225#define USB_IN_status__nodata__yes 1
226
227#define USB_IN_status__epid__BITNR 8
228#define USB_IN_status__epid__WIDTH 5
229
230#define USB_EP_command__eol__BITNR 0
231#define USB_EP_command__eol__WIDTH 1
232#define USB_EP_command__eol__no 0
233#define USB_EP_command__eol__yes 1
234
235#define USB_EP_command__eof__BITNR 1
236#define USB_EP_command__eof__WIDTH 1
237#define USB_EP_command__eof__no 0
238#define USB_EP_command__eof__yes 1
239
240#define USB_EP_command__intr__BITNR 3
241#define USB_EP_command__intr__WIDTH 1
242#define USB_EP_command__intr__no 0
243#define USB_EP_command__intr__yes 1
244
245#define USB_EP_command__enable__BITNR 4
246#define USB_EP_command__enable__WIDTH 1
247#define USB_EP_command__enable__no 0
248#define USB_EP_command__enable__yes 1
249
250#define USB_EP_command__hw_valid__BITNR 5
251#define USB_EP_command__hw_valid__WIDTH 1
252#define USB_EP_command__hw_valid__no 0
253#define USB_EP_command__hw_valid__yes 1
254
255#define USB_EP_command__epid__BITNR 8
256#define USB_EP_command__epid__WIDTH 5
257
258#define USB_SB_command__eol__BITNR 0 /* command macros. */
259#define USB_SB_command__eol__WIDTH 1
260#define USB_SB_command__eol__no 0
261#define USB_SB_command__eol__yes 1
262
263#define USB_SB_command__eot__BITNR 1
264#define USB_SB_command__eot__WIDTH 1
265#define USB_SB_command__eot__no 0
266#define USB_SB_command__eot__yes 1
267
268#define USB_SB_command__intr__BITNR 3
269#define USB_SB_command__intr__WIDTH 1
270#define USB_SB_command__intr__no 0
271#define USB_SB_command__intr__yes 1
272
273#define USB_SB_command__tt__BITNR 4
274#define USB_SB_command__tt__WIDTH 2
275#define USB_SB_command__tt__zout 0
276#define USB_SB_command__tt__in 1
277#define USB_SB_command__tt__out 2
278#define USB_SB_command__tt__setup 3
279
280
281#define USB_SB_command__rem__BITNR 8
282#define USB_SB_command__rem__WIDTH 6
283
284#define USB_SB_command__full__BITNR 6
285#define USB_SB_command__full__WIDTH 1
286#define USB_SB_command__full__no 0
287#define USB_SB_command__full__yes 1
288
289#endif
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
new file mode 100644
index 000000000000..3981bf15c8c7
--- /dev/null
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -0,0 +1,284 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 * (C) Copyright 2002 Hewlett-Packard Company
7 *
8 * Bus Glue for AMD Alchemy Au1xxx
9 *
10 * Written by Christopher Hoover <ch@hpl.hp.com>
11 * Based on fragments of previous driver by Rusell King et al.
12 *
13 * Modified for LH7A404 from ohci-sa1111.c
14 * by Durgesh Pattamatta <pattamattad@sharpsec.com>
15 * Modified for AMD Alchemy Au1xxx
16 * by Matt Porter <mporter@kernel.crashing.org>
17 *
18 * This file is licenced under the GPL.
19 */
20
21#include <asm/mach-au1x00/au1000.h>
22
23#define USBH_ENABLE_BE (1<<0)
24#define USBH_ENABLE_C (1<<1)
25#define USBH_ENABLE_E (1<<2)
26#define USBH_ENABLE_CE (1<<3)
27#define USBH_ENABLE_RD (1<<4)
28
29#ifdef __LITTLE_ENDIAN
30#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
31#elif __BIG_ENDIAN
32#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | USBH_ENABLE_BE)
33#else
34#error not byte order defined
35#endif
36
37extern int usb_disabled(void);
38
39/*-------------------------------------------------------------------------*/
40
41static void au1xxx_start_hc(struct platform_device *dev)
42{
43 printk(KERN_DEBUG __FILE__
44 ": starting Au1xxx OHCI USB Controller\n");
45
46 /* enable host controller */
47 au_writel(USBH_ENABLE_CE, USB_HOST_CONFIG);
48 udelay(1000);
49 au_writel(USBH_ENABLE_INIT, USB_HOST_CONFIG);
50 udelay(1000);
51
52 /* wait for reset complete (read register twice; see au1500 errata) */
53 while (au_readl(USB_HOST_CONFIG),
54 !(au_readl(USB_HOST_CONFIG) & USBH_ENABLE_RD))
55 udelay(1000);
56
57 printk(KERN_DEBUG __FILE__
58 ": Clock to USB host has been enabled \n");
59}
60
61static void au1xxx_stop_hc(struct platform_device *dev)
62{
63 printk(KERN_DEBUG __FILE__
64 ": stopping Au1xxx OHCI USB Controller\n");
65
66 /* Disable clock */
67 au_writel(readl((void *)USB_HOST_CONFIG) & ~USBH_ENABLE_CE, USB_HOST_CONFIG);
68}
69
70
71/*-------------------------------------------------------------------------*/
72
73/* configure so an HC device and id are always provided */
74/* always called with process context; sleeping is OK */
75
76
77/**
78 * usb_hcd_au1xxx_probe - initialize Au1xxx-based HCDs
79 * Context: !in_interrupt()
80 *
81 * Allocates basic resources for this USB host controller, and
82 * then invokes the start() method for the HCD associated with it
83 * through the hotplug entry's driver_data.
84 *
85 */
86int usb_hcd_au1xxx_probe (const struct hc_driver *driver,
87 struct platform_device *dev)
88{
89 int retval;
90 struct usb_hcd *hcd;
91
92 if(dev->resource[1].flags != IORESOURCE_IRQ) {
93 pr_debug ("resource[1] is not IORESOURCE_IRQ");
94 return -ENOMEM;
95 }
96
97 hcd = usb_create_hcd(driver, &dev->dev, "au1xxx");
98 if (!hcd)
99 return -ENOMEM;
100 hcd->rsrc_start = dev->resource[0].start;
101 hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
102
103 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
104 pr_debug("request_mem_region failed");
105 retval = -EBUSY;
106 goto err1;
107 }
108
109 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
110 if (!hcd->regs) {
111 pr_debug("ioremap failed");
112 retval = -ENOMEM;
113 goto err2;
114 }
115
116 au1xxx_start_hc(dev);
117 ohci_hcd_init(hcd_to_ohci(hcd));
118
119 retval = usb_add_hcd(hcd, dev->resource[1].start, SA_INTERRUPT);
120 if (retval == 0)
121 return retval;
122
123 au1xxx_stop_hc(dev);
124 iounmap(hcd->regs);
125 err2:
126 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
127 err1:
128 usb_put_hcd(hcd);
129 return retval;
130}
131
132
133/* may be called without controller electrically present */
134/* may be called with controller, bus, and devices active */
135
136/**
137 * usb_hcd_au1xxx_remove - shutdown processing for Au1xxx-based HCDs
138 * @dev: USB Host Controller being removed
139 * Context: !in_interrupt()
140 *
141 * Reverses the effect of usb_hcd_au1xxx_probe(), first invoking
142 * the HCD's stop() method. It is always called from a thread
143 * context, normally "rmmod", "apmd", or something similar.
144 *
145 */
146void usb_hcd_au1xxx_remove (struct usb_hcd *hcd, struct platform_device *dev)
147{
148 usb_remove_hcd(hcd);
149 au1xxx_stop_hc(dev);
150 iounmap(hcd->regs);
151 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
152 usb_put_hcd(hcd);
153}
154
155/*-------------------------------------------------------------------------*/
156
157static int __devinit
158ohci_au1xxx_start (struct usb_hcd *hcd)
159{
160 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
161 int ret;
162
163 ohci_dbg (ohci, "ohci_au1xxx_start, ohci:%p", ohci);
164
165 if ((ret = ohci_init (ohci)) < 0)
166 return ret;
167
168 if ((ret = ohci_run (ohci)) < 0) {
169 err ("can't start %s", hcd->self.bus_name);
170 ohci_stop (hcd);
171 return ret;
172 }
173
174 return 0;
175}
176
177/*-------------------------------------------------------------------------*/
178
179static const struct hc_driver ohci_au1xxx_hc_driver = {
180 .description = hcd_name,
181 .product_desc = "Au1xxx OHCI",
182 .hcd_priv_size = sizeof(struct ohci_hcd),
183
184 /*
185 * generic hardware linkage
186 */
187 .irq = ohci_irq,
188 .flags = HCD_USB11 | HCD_MEMORY,
189
190 /*
191 * basic lifecycle operations
192 */
193 .start = ohci_au1xxx_start,
194#ifdef CONFIG_PM
195 /* suspend: ohci_au1xxx_suspend, -- tbd */
196 /* resume: ohci_au1xxx_resume, -- tbd */
197#endif /*CONFIG_PM*/
198 .stop = ohci_stop,
199
200 /*
201 * managing i/o requests and associated device resources
202 */
203 .urb_enqueue = ohci_urb_enqueue,
204 .urb_dequeue = ohci_urb_dequeue,
205 .endpoint_disable = ohci_endpoint_disable,
206
207 /*
208 * scheduling support
209 */
210 .get_frame_number = ohci_get_frame,
211
212 /*
213 * root hub support
214 */
215 .hub_status_data = ohci_hub_status_data,
216 .hub_control = ohci_hub_control,
217};
218
219/*-------------------------------------------------------------------------*/
220
221static int ohci_hcd_au1xxx_drv_probe(struct device *dev)
222{
223 struct platform_device *pdev = to_platform_device(dev);
224 int ret;
225
226 pr_debug ("In ohci_hcd_au1xxx_drv_probe");
227
228 if (usb_disabled())
229 return -ENODEV;
230
231 ret = usb_hcd_au1xxx_probe(&ohci_au1xxx_hc_driver, pdev);
232 return ret;
233}
234
235static int ohci_hcd_au1xxx_drv_remove(struct device *dev)
236{
237 struct platform_device *pdev = to_platform_device(dev);
238 struct usb_hcd *hcd = dev_get_drvdata(dev);
239
240 usb_hcd_au1xxx_remove(hcd, pdev);
241 return 0;
242}
243 /*TBD*/
244/*static int ohci_hcd_au1xxx_drv_suspend(struct device *dev)
245{
246 struct platform_device *pdev = to_platform_device(dev);
247 struct usb_hcd *hcd = dev_get_drvdata(dev);
248
249 return 0;
250}
251static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
252{
253 struct platform_device *pdev = to_platform_device(dev);
254 struct usb_hcd *hcd = dev_get_drvdata(dev);
255
256 return 0;
257}
258*/
259
260static struct device_driver ohci_hcd_au1xxx_driver = {
261 .name = "au1xxx-ohci",
262 .bus = &platform_bus_type,
263 .probe = ohci_hcd_au1xxx_drv_probe,
264 .remove = ohci_hcd_au1xxx_drv_remove,
265 /*.suspend = ohci_hcd_au1xxx_drv_suspend, */
266 /*.resume = ohci_hcd_au1xxx_drv_resume, */
267};
268
269static int __init ohci_hcd_au1xxx_init (void)
270{
271 pr_debug (DRIVER_INFO " (Au1xxx)");
272 pr_debug ("block sizes: ed %d td %d\n",
273 sizeof (struct ed), sizeof (struct td));
274
275 return driver_register(&ohci_hcd_au1xxx_driver);
276}
277
278static void __exit ohci_hcd_au1xxx_cleanup (void)
279{
280 driver_unregister(&ohci_hcd_au1xxx_driver);
281}
282
283module_init (ohci_hcd_au1xxx_init);
284module_exit (ohci_hcd_au1xxx_cleanup);
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
new file mode 100644
index 000000000000..62f53a213808
--- /dev/null
+++ b/drivers/usb/host/ohci-dbg.c
@@ -0,0 +1,707 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under the GPL.
8 */
9
10/*-------------------------------------------------------------------------*/
11
12#ifdef DEBUG
13
14#define edstring(ed_type) ({ char *temp; \
15 switch (ed_type) { \
16 case PIPE_CONTROL: temp = "ctrl"; break; \
17 case PIPE_BULK: temp = "bulk"; break; \
18 case PIPE_INTERRUPT: temp = "intr"; break; \
19 default: temp = "isoc"; break; \
20 }; temp;})
21#define pipestring(pipe) edstring(usb_pipetype(pipe))
22
23/* debug| print the main components of an URB
24 * small: 0) header + data packets 1) just header
25 */
26static void __attribute__((unused))
27urb_print (struct urb * urb, char * str, int small)
28{
29 unsigned int pipe= urb->pipe;
30
31 if (!urb->dev || !urb->dev->bus) {
32 dbg("%s URB: no dev", str);
33 return;
34 }
35
36#ifndef OHCI_VERBOSE_DEBUG
37 if (urb->status != 0)
38#endif
39 dbg("%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d stat=%d",
40 str,
41 urb,
42 usb_pipedevice (pipe),
43 usb_pipeendpoint (pipe),
44 usb_pipeout (pipe)? "out" : "in",
45 pipestring (pipe),
46 urb->transfer_flags,
47 urb->actual_length,
48 urb->transfer_buffer_length,
49 urb->status);
50
51#ifdef OHCI_VERBOSE_DEBUG
52 if (!small) {
53 int i, len;
54
55 if (usb_pipecontrol (pipe)) {
56 printk (KERN_DEBUG __FILE__ ": setup(8):");
57 for (i = 0; i < 8 ; i++)
58 printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
59 printk ("\n");
60 }
61 if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
62 printk (KERN_DEBUG __FILE__ ": data(%d/%d):",
63 urb->actual_length,
64 urb->transfer_buffer_length);
65 len = usb_pipeout (pipe)?
66 urb->transfer_buffer_length: urb->actual_length;
67 for (i = 0; i < 16 && i < len; i++)
68 printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]);
69 printk ("%s stat:%d\n", i < len? "...": "", urb->status);
70 }
71 }
72#endif
73}
74
75#define ohci_dbg_sw(ohci, next, size, format, arg...) \
76 do { \
77 if (next) { \
78 unsigned s_len; \
79 s_len = scnprintf (*next, *size, format, ## arg ); \
80 *size -= s_len; *next += s_len; \
81 } else \
82 ohci_dbg(ohci,format, ## arg ); \
83 } while (0);
84
85
86static void ohci_dump_intr_mask (
87 struct ohci_hcd *ohci,
88 char *label,
89 u32 mask,
90 char **next,
91 unsigned *size)
92{
93 ohci_dbg_sw (ohci, next, size, "%s 0x%08x%s%s%s%s%s%s%s%s%s\n",
94 label,
95 mask,
96 (mask & OHCI_INTR_MIE) ? " MIE" : "",
97 (mask & OHCI_INTR_OC) ? " OC" : "",
98 (mask & OHCI_INTR_RHSC) ? " RHSC" : "",
99 (mask & OHCI_INTR_FNO) ? " FNO" : "",
100 (mask & OHCI_INTR_UE) ? " UE" : "",
101 (mask & OHCI_INTR_RD) ? " RD" : "",
102 (mask & OHCI_INTR_SF) ? " SF" : "",
103 (mask & OHCI_INTR_WDH) ? " WDH" : "",
104 (mask & OHCI_INTR_SO) ? " SO" : ""
105 );
106}
107
108static void maybe_print_eds (
109 struct ohci_hcd *ohci,
110 char *label,
111 u32 value,
112 char **next,
113 unsigned *size)
114{
115 if (value)
116 ohci_dbg_sw (ohci, next, size, "%s %08x\n", label, value);
117}
118
119static char *hcfs2string (int state)
120{
121 switch (state) {
122 case OHCI_USB_RESET: return "reset";
123 case OHCI_USB_RESUME: return "resume";
124 case OHCI_USB_OPER: return "operational";
125 case OHCI_USB_SUSPEND: return "suspend";
126 }
127 return "?";
128}
129
130// dump control and status registers
131static void
132ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
133{
134 struct ohci_regs __iomem *regs = controller->regs;
135 u32 temp;
136
137 temp = ohci_readl (controller, &regs->revision) & 0xff;
138 ohci_dbg_sw (controller, next, size,
139 "OHCI %d.%d, %s legacy support registers\n",
140 0x03 & (temp >> 4), (temp & 0x0f),
141 (temp & 0x0100) ? "with" : "NO");
142
143 temp = ohci_readl (controller, &regs->control);
144 ohci_dbg_sw (controller, next, size,
145 "control 0x%03x%s%s%s HCFS=%s%s%s%s%s CBSR=%d\n",
146 temp,
147 (temp & OHCI_CTRL_RWE) ? " RWE" : "",
148 (temp & OHCI_CTRL_RWC) ? " RWC" : "",
149 (temp & OHCI_CTRL_IR) ? " IR" : "",
150 hcfs2string (temp & OHCI_CTRL_HCFS),
151 (temp & OHCI_CTRL_BLE) ? " BLE" : "",
152 (temp & OHCI_CTRL_CLE) ? " CLE" : "",
153 (temp & OHCI_CTRL_IE) ? " IE" : "",
154 (temp & OHCI_CTRL_PLE) ? " PLE" : "",
155 temp & OHCI_CTRL_CBSR
156 );
157
158 temp = ohci_readl (controller, &regs->cmdstatus);
159 ohci_dbg_sw (controller, next, size,
160 "cmdstatus 0x%05x SOC=%d%s%s%s%s\n", temp,
161 (temp & OHCI_SOC) >> 16,
162 (temp & OHCI_OCR) ? " OCR" : "",
163 (temp & OHCI_BLF) ? " BLF" : "",
164 (temp & OHCI_CLF) ? " CLF" : "",
165 (temp & OHCI_HCR) ? " HCR" : ""
166 );
167
168 ohci_dump_intr_mask (controller, "intrstatus",
169 ohci_readl (controller, &regs->intrstatus),
170 next, size);
171 ohci_dump_intr_mask (controller, "intrenable",
172 ohci_readl (controller, &regs->intrenable),
173 next, size);
174 // intrdisable always same as intrenable
175
176 maybe_print_eds (controller, "ed_periodcurrent",
177 ohci_readl (controller, &regs->ed_periodcurrent),
178 next, size);
179
180 maybe_print_eds (controller, "ed_controlhead",
181 ohci_readl (controller, &regs->ed_controlhead),
182 next, size);
183 maybe_print_eds (controller, "ed_controlcurrent",
184 ohci_readl (controller, &regs->ed_controlcurrent),
185 next, size);
186
187 maybe_print_eds (controller, "ed_bulkhead",
188 ohci_readl (controller, &regs->ed_bulkhead),
189 next, size);
190 maybe_print_eds (controller, "ed_bulkcurrent",
191 ohci_readl (controller, &regs->ed_bulkcurrent),
192 next, size);
193
194 maybe_print_eds (controller, "donehead",
195 ohci_readl (controller, &regs->donehead), next, size);
196
197 /* broken fminterval means traffic won't flow! */
198 ohci_dbg (controller, "fminterval %08x\n",
199 ohci_readl (controller, &regs->fminterval));
200}
201
202#define dbg_port_sw(hc,num,value,next,size) \
203 ohci_dbg_sw (hc, next, size, \
204 "roothub.portstatus [%d] " \
205 "0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
206 num, temp, \
207 (temp & RH_PS_PRSC) ? " PRSC" : "", \
208 (temp & RH_PS_OCIC) ? " OCIC" : "", \
209 (temp & RH_PS_PSSC) ? " PSSC" : "", \
210 (temp & RH_PS_PESC) ? " PESC" : "", \
211 (temp & RH_PS_CSC) ? " CSC" : "", \
212 \
213 (temp & RH_PS_LSDA) ? " LSDA" : "", \
214 (temp & RH_PS_PPS) ? " PPS" : "", \
215 (temp & RH_PS_PRS) ? " PRS" : "", \
216 (temp & RH_PS_POCI) ? " POCI" : "", \
217 (temp & RH_PS_PSS) ? " PSS" : "", \
218 \
219 (temp & RH_PS_PES) ? " PES" : "", \
220 (temp & RH_PS_CCS) ? " CCS" : "" \
221 );
222
223
224static void
225ohci_dump_roothub (
226 struct ohci_hcd *controller,
227 int verbose,
228 char **next,
229 unsigned *size)
230{
231 u32 temp, ndp, i;
232
233 temp = roothub_a (controller);
234 if (temp == ~(u32)0)
235 return;
236 ndp = (temp & RH_A_NDP);
237
238 if (verbose) {
239 ohci_dbg_sw (controller, next, size,
240 "roothub.a %08x POTPGT=%d%s%s%s%s%s NDP=%d\n", temp,
241 ((temp & RH_A_POTPGT) >> 24) & 0xff,
242 (temp & RH_A_NOCP) ? " NOCP" : "",
243 (temp & RH_A_OCPM) ? " OCPM" : "",
244 (temp & RH_A_DT) ? " DT" : "",
245 (temp & RH_A_NPS) ? " NPS" : "",
246 (temp & RH_A_PSM) ? " PSM" : "",
247 ndp
248 );
249 temp = roothub_b (controller);
250 ohci_dbg_sw (controller, next, size,
251 "roothub.b %08x PPCM=%04x DR=%04x\n",
252 temp,
253 (temp & RH_B_PPCM) >> 16,
254 (temp & RH_B_DR)
255 );
256 temp = roothub_status (controller);
257 ohci_dbg_sw (controller, next, size,
258 "roothub.status %08x%s%s%s%s%s%s\n",
259 temp,
260 (temp & RH_HS_CRWE) ? " CRWE" : "",
261 (temp & RH_HS_OCIC) ? " OCIC" : "",
262 (temp & RH_HS_LPSC) ? " LPSC" : "",
263 (temp & RH_HS_DRWE) ? " DRWE" : "",
264 (temp & RH_HS_OCI) ? " OCI" : "",
265 (temp & RH_HS_LPS) ? " LPS" : ""
266 );
267 }
268
269 for (i = 0; i < ndp; i++) {
270 temp = roothub_portstatus (controller, i);
271 dbg_port_sw (controller, i, temp, next, size);
272 }
273}
274
275static void ohci_dump (struct ohci_hcd *controller, int verbose)
276{
277 ohci_dbg (controller, "OHCI controller state\n");
278
279 // dumps some of the state we know about
280 ohci_dump_status (controller, NULL, NULL);
281 if (controller->hcca)
282 ohci_dbg (controller,
283 "hcca frame #%04x\n", ohci_frame_no(controller));
284 ohci_dump_roothub (controller, 1, NULL, NULL);
285}
286
287static const char data0 [] = "DATA0";
288static const char data1 [] = "DATA1";
289
290static void ohci_dump_td (const struct ohci_hcd *ohci, const char *label,
291 const struct td *td)
292{
293 u32 tmp = hc32_to_cpup (ohci, &td->hwINFO);
294
295 ohci_dbg (ohci, "%s td %p%s; urb %p index %d; hw next td %08x\n",
296 label, td,
297 (tmp & TD_DONE) ? " (DONE)" : "",
298 td->urb, td->index,
299 hc32_to_cpup (ohci, &td->hwNextTD));
300 if ((tmp & TD_ISO) == 0) {
301 const char *toggle, *pid;
302 u32 cbp, be;
303
304 switch (tmp & TD_T) {
305 case TD_T_DATA0: toggle = data0; break;
306 case TD_T_DATA1: toggle = data1; break;
307 case TD_T_TOGGLE: toggle = "(CARRY)"; break;
308 default: toggle = "(?)"; break;
309 }
310 switch (tmp & TD_DP) {
311 case TD_DP_SETUP: pid = "SETUP"; break;
312 case TD_DP_IN: pid = "IN"; break;
313 case TD_DP_OUT: pid = "OUT"; break;
314 default: pid = "(bad pid)"; break;
315 }
316 ohci_dbg (ohci, " info %08x CC=%x %s DI=%d %s %s\n", tmp,
317 TD_CC_GET(tmp), /* EC, */ toggle,
318 (tmp & TD_DI) >> 21, pid,
319 (tmp & TD_R) ? "R" : "");
320 cbp = hc32_to_cpup (ohci, &td->hwCBP);
321 be = hc32_to_cpup (ohci, &td->hwBE);
322 ohci_dbg (ohci, " cbp %08x be %08x (len %d)\n", cbp, be,
323 cbp ? (be + 1 - cbp) : 0);
324 } else {
325 unsigned i;
326 ohci_dbg (ohci, " info %08x CC=%x FC=%d DI=%d SF=%04x\n", tmp,
327 TD_CC_GET(tmp),
328 (tmp >> 24) & 0x07,
329 (tmp & TD_DI) >> 21,
330 tmp & 0x0000ffff);
331 ohci_dbg (ohci, " bp0 %08x be %08x\n",
332 hc32_to_cpup (ohci, &td->hwCBP) & ~0x0fff,
333 hc32_to_cpup (ohci, &td->hwBE));
334 for (i = 0; i < MAXPSW; i++) {
335 u16 psw = ohci_hwPSW (ohci, td, i);
336 int cc = (psw >> 12) & 0x0f;
337 ohci_dbg (ohci, " psw [%d] = %2x, CC=%x %s=%d\n", i,
338 psw, cc,
339 (cc >= 0x0e) ? "OFFSET" : "SIZE",
340 psw & 0x0fff);
341 }
342 }
343}
344
345/* caller MUST own hcd spinlock if verbose is set! */
346static void __attribute__((unused))
347ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
348 const struct ed *ed, int verbose)
349{
350 u32 tmp = hc32_to_cpu (ohci, ed->hwINFO);
351 char *type = "";
352
353 ohci_dbg (ohci, "%s, ed %p state 0x%x type %s; next ed %08x\n",
354 label,
355 ed, ed->state, edstring (ed->type),
356 hc32_to_cpup (ohci, &ed->hwNextED));
357 switch (tmp & (ED_IN|ED_OUT)) {
358 case ED_OUT: type = "-OUT"; break;
359 case ED_IN: type = "-IN"; break;
360 /* else from TDs ... control */
361 }
362 ohci_dbg (ohci,
363 " info %08x MAX=%d%s%s%s%s EP=%d%s DEV=%d\n", tmp,
364 0x03ff & (tmp >> 16),
365 (tmp & ED_DEQUEUE) ? " DQ" : "",
366 (tmp & ED_ISO) ? " ISO" : "",
367 (tmp & ED_SKIP) ? " SKIP" : "",
368 (tmp & ED_LOWSPEED) ? " LOW" : "",
369 0x000f & (tmp >> 7),
370 type,
371 0x007f & tmp);
372 tmp = hc32_to_cpup (ohci, &ed->hwHeadP);
373 ohci_dbg (ohci, " tds: head %08x %s%s tail %08x%s\n",
374 tmp,
375 (tmp & ED_C) ? data1 : data0,
376 (tmp & ED_H) ? " HALT" : "",
377 hc32_to_cpup (ohci, &ed->hwTailP),
378 verbose ? "" : " (not listing)");
379 if (verbose) {
380 struct list_head *tmp;
381
382 /* use ed->td_list because HC concurrently modifies
383 * hwNextTD as it accumulates ed_donelist.
384 */
385 list_for_each (tmp, &ed->td_list) {
386 struct td *td;
387 td = list_entry (tmp, struct td, td_list);
388 ohci_dump_td (ohci, " ->", td);
389 }
390 }
391}
392
393#else
394static inline void ohci_dump (struct ohci_hcd *controller, int verbose) {}
395
396#undef OHCI_VERBOSE_DEBUG
397
398#endif /* DEBUG */
399
400/*-------------------------------------------------------------------------*/
401
402#ifdef STUB_DEBUG_FILES
403
404static inline void create_debug_files (struct ohci_hcd *bus) { }
405static inline void remove_debug_files (struct ohci_hcd *bus) { }
406
407#else
408
409static ssize_t
410show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
411{
412 unsigned temp, size = count;
413
414 if (!ed)
415 return 0;
416
417 /* print first --> last */
418 while (ed->ed_prev)
419 ed = ed->ed_prev;
420
421 /* dump a snapshot of the bulk or control schedule */
422 while (ed) {
423 u32 info = hc32_to_cpu (ohci, ed->hwINFO);
424 u32 headp = hc32_to_cpu (ohci, ed->hwHeadP);
425 struct list_head *entry;
426 struct td *td;
427
428 temp = scnprintf (buf, size,
429 "ed/%p %cs dev%d ep%d%s max %d %08x%s%s %s",
430 ed,
431 (info & ED_LOWSPEED) ? 'l' : 'f',
432 info & 0x7f,
433 (info >> 7) & 0xf,
434 (info & ED_IN) ? "in" : "out",
435 0x03ff & (info >> 16),
436 info,
437 (info & ED_SKIP) ? " s" : "",
438 (headp & ED_H) ? " H" : "",
439 (headp & ED_C) ? data1 : data0);
440 size -= temp;
441 buf += temp;
442
443 list_for_each (entry, &ed->td_list) {
444 u32 cbp, be;
445
446 td = list_entry (entry, struct td, td_list);
447 info = hc32_to_cpup (ohci, &td->hwINFO);
448 cbp = hc32_to_cpup (ohci, &td->hwCBP);
449 be = hc32_to_cpup (ohci, &td->hwBE);
450 temp = scnprintf (buf, size,
451 "\n\ttd %p %s %d cc=%x urb %p (%08x)",
452 td,
453 ({ char *pid;
454 switch (info & TD_DP) {
455 case TD_DP_SETUP: pid = "setup"; break;
456 case TD_DP_IN: pid = "in"; break;
457 case TD_DP_OUT: pid = "out"; break;
458 default: pid = "(?)"; break;
459 } pid;}),
460 cbp ? (be + 1 - cbp) : 0,
461 TD_CC_GET (info), td->urb, info);
462 size -= temp;
463 buf += temp;
464 }
465
466 temp = scnprintf (buf, size, "\n");
467 size -= temp;
468 buf += temp;
469
470 ed = ed->ed_next;
471 }
472 return count - size;
473}
474
475static ssize_t
476show_async (struct class_device *class_dev, char *buf)
477{
478 struct usb_bus *bus;
479 struct usb_hcd *hcd;
480 struct ohci_hcd *ohci;
481 size_t temp;
482 unsigned long flags;
483
484 bus = to_usb_bus(class_dev);
485 hcd = bus->hcpriv;
486 ohci = hcd_to_ohci(hcd);
487
488 /* display control and bulk lists together, for simplicity */
489 spin_lock_irqsave (&ohci->lock, flags);
490 temp = show_list (ohci, buf, PAGE_SIZE, ohci->ed_controltail);
491 temp += show_list (ohci, buf + temp, PAGE_SIZE - temp, ohci->ed_bulktail);
492 spin_unlock_irqrestore (&ohci->lock, flags);
493
494 return temp;
495}
496static CLASS_DEVICE_ATTR (async, S_IRUGO, show_async, NULL);
497
498
499#define DBG_SCHED_LIMIT 64
500
501static ssize_t
502show_periodic (struct class_device *class_dev, char *buf)
503{
504 struct usb_bus *bus;
505 struct usb_hcd *hcd;
506 struct ohci_hcd *ohci;
507 struct ed **seen, *ed;
508 unsigned long flags;
509 unsigned temp, size, seen_count;
510 char *next;
511 unsigned i;
512
513 if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
514 return 0;
515 seen_count = 0;
516
517 bus = to_usb_bus(class_dev);
518 hcd = bus->hcpriv;
519 ohci = hcd_to_ohci(hcd);
520 next = buf;
521 size = PAGE_SIZE;
522
523 temp = scnprintf (next, size, "size = %d\n", NUM_INTS);
524 size -= temp;
525 next += temp;
526
527 /* dump a snapshot of the periodic schedule (and load) */
528 spin_lock_irqsave (&ohci->lock, flags);
529 for (i = 0; i < NUM_INTS; i++) {
530 if (!(ed = ohci->periodic [i]))
531 continue;
532
533 temp = scnprintf (next, size, "%2d [%3d]:", i, ohci->load [i]);
534 size -= temp;
535 next += temp;
536
537 do {
538 temp = scnprintf (next, size, " ed%d/%p",
539 ed->interval, ed);
540 size -= temp;
541 next += temp;
542 for (temp = 0; temp < seen_count; temp++) {
543 if (seen [temp] == ed)
544 break;
545 }
546
547 /* show more info the first time around */
548 if (temp == seen_count) {
549 u32 info = hc32_to_cpu (ohci, ed->hwINFO);
550 struct list_head *entry;
551 unsigned qlen = 0;
552
553 /* qlen measured here in TDs, not urbs */
554 list_for_each (entry, &ed->td_list)
555 qlen++;
556
557 temp = scnprintf (next, size,
558 " (%cs dev%d ep%d%s-%s qlen %u"
559 " max %d %08x%s%s)",
560 (info & ED_LOWSPEED) ? 'l' : 'f',
561 info & 0x7f,
562 (info >> 7) & 0xf,
563 (info & ED_IN) ? "in" : "out",
564 (info & ED_ISO) ? "iso" : "int",
565 qlen,
566 0x03ff & (info >> 16),
567 info,
568 (info & ED_SKIP) ? " K" : "",
569 (ed->hwHeadP &
570 cpu_to_hc32(ohci, ED_H)) ?
571 " H" : "");
572 size -= temp;
573 next += temp;
574
575 if (seen_count < DBG_SCHED_LIMIT)
576 seen [seen_count++] = ed;
577
578 ed = ed->ed_next;
579
580 } else {
581 /* we've seen it and what's after */
582 temp = 0;
583 ed = NULL;
584 }
585
586 } while (ed);
587
588 temp = scnprintf (next, size, "\n");
589 size -= temp;
590 next += temp;
591 }
592 spin_unlock_irqrestore (&ohci->lock, flags);
593 kfree (seen);
594
595 return PAGE_SIZE - size;
596}
597static CLASS_DEVICE_ATTR (periodic, S_IRUGO, show_periodic, NULL);
598
599
600#undef DBG_SCHED_LIMIT
601
602static ssize_t
603show_registers (struct class_device *class_dev, char *buf)
604{
605 struct usb_bus *bus;
606 struct usb_hcd *hcd;
607 struct ohci_hcd *ohci;
608 struct ohci_regs __iomem *regs;
609 unsigned long flags;
610 unsigned temp, size;
611 char *next;
612 u32 rdata;
613
614 bus = to_usb_bus(class_dev);
615 hcd = bus->hcpriv;
616 ohci = hcd_to_ohci(hcd);
617 regs = ohci->regs;
618 next = buf;
619 size = PAGE_SIZE;
620
621 spin_lock_irqsave (&ohci->lock, flags);
622
623 /* dump driver info, then registers in spec order */
624
625 ohci_dbg_sw (ohci, &next, &size,
626 "bus %s, device %s\n"
627 "%s\n"
628 "%s version " DRIVER_VERSION "\n",
629 hcd->self.controller->bus->name,
630 hcd->self.controller->bus_id,
631 hcd->product_desc,
632 hcd_name);
633
634 if (bus->controller->power.power_state) {
635 size -= scnprintf (next, size,
636 "SUSPENDED (no register access)\n");
637 goto done;
638 }
639
640 ohci_dump_status(ohci, &next, &size);
641
642 /* hcca */
643 if (ohci->hcca)
644 ohci_dbg_sw (ohci, &next, &size,
645 "hcca frame 0x%04x\n", ohci_frame_no(ohci));
646
647 /* other registers mostly affect frame timings */
648 rdata = ohci_readl (ohci, &regs->fminterval);
649 temp = scnprintf (next, size,
650 "fmintvl 0x%08x %sFSMPS=0x%04x FI=0x%04x\n",
651 rdata, (rdata >> 31) ? "FIT " : "",
652 (rdata >> 16) & 0xefff, rdata & 0xffff);
653 size -= temp;
654 next += temp;
655
656 rdata = ohci_readl (ohci, &regs->fmremaining);
657 temp = scnprintf (next, size, "fmremaining 0x%08x %sFR=0x%04x\n",
658 rdata, (rdata >> 31) ? "FRT " : "",
659 rdata & 0x3fff);
660 size -= temp;
661 next += temp;
662
663 rdata = ohci_readl (ohci, &regs->periodicstart);
664 temp = scnprintf (next, size, "periodicstart 0x%04x\n",
665 rdata & 0x3fff);
666 size -= temp;
667 next += temp;
668
669 rdata = ohci_readl (ohci, &regs->lsthresh);
670 temp = scnprintf (next, size, "lsthresh 0x%04x\n",
671 rdata & 0x3fff);
672 size -= temp;
673 next += temp;
674
675 /* roothub */
676 ohci_dump_roothub (ohci, 1, &next, &size);
677
678done:
679 spin_unlock_irqrestore (&ohci->lock, flags);
680 return PAGE_SIZE - size;
681}
682static CLASS_DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL);
683
684
685static inline void create_debug_files (struct ohci_hcd *ohci)
686{
687 struct class_device *cldev = &ohci_to_hcd(ohci)->self.class_dev;
688
689 class_device_create_file(cldev, &class_device_attr_async);
690 class_device_create_file(cldev, &class_device_attr_periodic);
691 class_device_create_file(cldev, &class_device_attr_registers);
692 ohci_dbg (ohci, "created debug files\n");
693}
694
695static inline void remove_debug_files (struct ohci_hcd *ohci)
696{
697 struct class_device *cldev = &ohci_to_hcd(ohci)->self.class_dev;
698
699 class_device_remove_file(cldev, &class_device_attr_async);
700 class_device_remove_file(cldev, &class_device_attr_periodic);
701 class_device_remove_file(cldev, &class_device_attr_registers);
702}
703
704#endif
705
706/*-------------------------------------------------------------------------*/
707
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
new file mode 100644
index 000000000000..1e27f10c1592
--- /dev/null
+++ b/drivers/usb/host/ohci-hcd.c
@@ -0,0 +1,925 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * [ Initialisation is based on Linus' ]
8 * [ uhci code and gregs ohci fragments ]
9 * [ (C) Copyright 1999 Linus Torvalds ]
10 * [ (C) Copyright 1999 Gregory P. Smith]
11 *
12 *
13 * OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller
14 * interfaces (though some non-x86 Intel chips use it). It supports
15 * smarter hardware than UHCI. A download link for the spec available
16 * through the http://www.usb.org website.
17 *
18 * History:
19 *
20 * 2004/03/24 LH7A404 support (Durgesh Pattamatta & Marc Singer)
21 * 2004/02/04 use generic dma_* functions instead of pci_* (dsaxena@plexity.net)
22 * 2003/02/24 show registers in sysfs (Kevin Brosius)
23 *
24 * 2002/09/03 get rid of ed hashtables, rework periodic scheduling and
25 * bandwidth accounting; if debugging, show schedules in driverfs
26 * 2002/07/19 fixes to management of ED and schedule state.
27 * 2002/06/09 SA-1111 support (Christopher Hoover)
28 * 2002/06/01 remember frame when HC won't see EDs any more; use that info
29 * to fix urb unlink races caused by interrupt latency assumptions;
30 * minor ED field and function naming updates
31 * 2002/01/18 package as a patch for 2.5.3; this should match the
32 * 2.4.17 kernel modulo some bugs being fixed.
33 *
34 * 2001/10/18 merge pmac cleanup (Benjamin Herrenschmidt) and bugfixes
35 * from post-2.4.5 patches.
36 * 2001/09/20 URB_ZERO_PACKET support; hcca_dma portability, OPTi warning
37 * 2001/09/07 match PCI PM changes, errnos from Linus' tree
38 * 2001/05/05 fork 2.4.5 version into "hcd" framework, cleanup, simplify;
39 * pbook pci quirks gone (please fix pbook pci sw!) (db)
40 *
41 * 2001/04/08 Identify version on module load (gb)
42 * 2001/03/24 td/ed hashing to remove bus_to_virt (Steve Longerbeam);
43 pci_map_single (db)
44 * 2001/03/21 td and dev/ed allocation uses new pci_pool API (db)
45 * 2001/03/07 hcca allocation uses pci_alloc_consistent (Steve Longerbeam)
46 *
47 * 2000/09/26 fixed races in removing the private portion of the urb
48 * 2000/09/07 disable bulk and control lists when unlinking the last
49 * endpoint descriptor in order to avoid unrecoverable errors on
50 * the Lucent chips. (rwc@sgi)
51 * 2000/08/29 use bandwidth claiming hooks (thanks Randy!), fix some
52 * urb unlink probs, indentation fixes
53 * 2000/08/11 various oops fixes mostly affecting iso and cleanup from
54 * device unplugs.
55 * 2000/06/28 use PCI hotplug framework, for better power management
56 * and for Cardbus support (David Brownell)
57 * 2000/earlier: fixes for NEC/Lucent chips; suspend/resume handling
58 * when the controller loses power; handle UE; cleanup; ...
59 *
60 * v5.2 1999/12/07 URB 3rd preview,
61 * v5.1 1999/11/30 URB 2nd preview, cpia, (usb-scsi)
62 * v5.0 1999/11/22 URB Technical preview, Paul Mackerras powerbook susp/resume
63 * i386: HUB, Keyboard, Mouse, Printer
64 *
65 * v4.3 1999/10/27 multiple HCs, bulk_request
66 * v4.2 1999/09/05 ISO API alpha, new dev alloc, neg Error-codes
67 * v4.1 1999/08/27 Randy Dunlap's - ISO API first impl.
68 * v4.0 1999/08/18
69 * v3.0 1999/06/25
70 * v2.1 1999/05/09 code clean up
71 * v2.0 1999/05/04
72 * v1.0 1999/04/27 initial release
73 *
74 * This file is licenced under the GPL.
75 */
76
77#include <linux/config.h>
78
79#ifdef CONFIG_USB_DEBUG
80# define DEBUG
81#else
82# undef DEBUG
83#endif
84
85#include <linux/module.h>
86#include <linux/moduleparam.h>
87#include <linux/pci.h>
88#include <linux/kernel.h>
89#include <linux/delay.h>
90#include <linux/ioport.h>
91#include <linux/sched.h>
92#include <linux/slab.h>
93#include <linux/smp_lock.h>
94#include <linux/errno.h>
95#include <linux/init.h>
96#include <linux/timer.h>
97#include <linux/list.h>
98#include <linux/interrupt.h> /* for in_interrupt () */
99#include <linux/usb.h>
100#include <linux/usb_otg.h>
101#include "../core/hcd.h"
102#include <linux/dma-mapping.h>
103#include <linux/dmapool.h> /* needed by ohci-mem.c when no PCI */
104
105#include <asm/io.h>
106#include <asm/irq.h>
107#include <asm/system.h>
108#include <asm/unaligned.h>
109#include <asm/byteorder.h>
110
111
112#define DRIVER_VERSION "2004 Nov 08"
113#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
114#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
115
116/*-------------------------------------------------------------------------*/
117
118// #define OHCI_VERBOSE_DEBUG /* not always helpful */
119
120/* For initializing controller (mask in an HCFS mode too) */
121#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
122#define OHCI_INTR_INIT \
123 (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | OHCI_INTR_WDH)
124
125#ifdef __hppa__
126/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
127#define IR_DISABLE
128#endif
129
130#ifdef CONFIG_ARCH_OMAP
131/* OMAP doesn't support IR (no SMM; not needed) */
132#define IR_DISABLE
133#endif
134
135/*-------------------------------------------------------------------------*/
136
137static const char hcd_name [] = "ohci_hcd";
138
139#include "ohci.h"
140
141static void ohci_dump (struct ohci_hcd *ohci, int verbose);
142static int ohci_init (struct ohci_hcd *ohci);
143static void ohci_stop (struct usb_hcd *hcd);
144
145#include "ohci-hub.c"
146#include "ohci-dbg.c"
147#include "ohci-mem.c"
148#include "ohci-q.c"
149
150
151/*
152 * On architectures with edge-triggered interrupts we must never return
153 * IRQ_NONE.
154 */
155#if defined(CONFIG_SA1111) /* ... or other edge-triggered systems */
156#define IRQ_NOTMINE IRQ_HANDLED
157#else
158#define IRQ_NOTMINE IRQ_NONE
159#endif
160
161
162/* Some boards misreport power switching/overcurrent */
163static int distrust_firmware = 1;
164module_param (distrust_firmware, bool, 0);
165MODULE_PARM_DESC (distrust_firmware,
166 "true to distrust firmware power/overcurrent setup");
167
168/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
169static int no_handshake = 0;
170module_param (no_handshake, bool, 0);
171MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
172
173/*-------------------------------------------------------------------------*/
174
175/*
176 * queue up an urb for anything except the root hub
177 */
178static int ohci_urb_enqueue (
179 struct usb_hcd *hcd,
180 struct usb_host_endpoint *ep,
181 struct urb *urb,
182 int mem_flags
183) {
184 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
185 struct ed *ed;
186 urb_priv_t *urb_priv;
187 unsigned int pipe = urb->pipe;
188 int i, size = 0;
189 unsigned long flags;
190 int retval = 0;
191
192#ifdef OHCI_VERBOSE_DEBUG
193 urb_print (urb, "SUB", usb_pipein (pipe));
194#endif
195
196 /* every endpoint has a ed, locate and maybe (re)initialize it */
197 if (! (ed = ed_get (ohci, ep, urb->dev, pipe, urb->interval)))
198 return -ENOMEM;
199
200 /* for the private part of the URB we need the number of TDs (size) */
201 switch (ed->type) {
202 case PIPE_CONTROL:
203 /* td_submit_urb() doesn't yet handle these */
204 if (urb->transfer_buffer_length > 4096)
205 return -EMSGSIZE;
206
207 /* 1 TD for setup, 1 for ACK, plus ... */
208 size = 2;
209 /* FALLTHROUGH */
210 // case PIPE_INTERRUPT:
211 // case PIPE_BULK:
212 default:
213 /* one TD for every 4096 Bytes (can be upto 8K) */
214 size += urb->transfer_buffer_length / 4096;
215 /* ... and for any remaining bytes ... */
216 if ((urb->transfer_buffer_length % 4096) != 0)
217 size++;
218 /* ... and maybe a zero length packet to wrap it up */
219 if (size == 0)
220 size++;
221 else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
222 && (urb->transfer_buffer_length
223 % usb_maxpacket (urb->dev, pipe,
224 usb_pipeout (pipe))) == 0)
225 size++;
226 break;
227 case PIPE_ISOCHRONOUS: /* number of packets from URB */
228 size = urb->number_of_packets;
229 break;
230 }
231
232 /* allocate the private part of the URB */
233 urb_priv = kmalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
234 mem_flags);
235 if (!urb_priv)
236 return -ENOMEM;
237 memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (struct td *));
238 INIT_LIST_HEAD (&urb_priv->pending);
239 urb_priv->length = size;
240 urb_priv->ed = ed;
241
242 /* allocate the TDs (deferring hash chain updates) */
243 for (i = 0; i < size; i++) {
244 urb_priv->td [i] = td_alloc (ohci, mem_flags);
245 if (!urb_priv->td [i]) {
246 urb_priv->length = i;
247 urb_free_priv (ohci, urb_priv);
248 return -ENOMEM;
249 }
250 }
251
252 spin_lock_irqsave (&ohci->lock, flags);
253
254 /* don't submit to a dead HC */
255 if (!HC_IS_RUNNING(hcd->state)) {
256 retval = -ENODEV;
257 goto fail;
258 }
259
260 /* in case of unlink-during-submit */
261 spin_lock (&urb->lock);
262 if (urb->status != -EINPROGRESS) {
263 spin_unlock (&urb->lock);
264 urb->hcpriv = urb_priv;
265 finish_urb (ohci, urb, NULL);
266 retval = 0;
267 goto fail;
268 }
269
270 /* schedule the ed if needed */
271 if (ed->state == ED_IDLE) {
272 retval = ed_schedule (ohci, ed);
273 if (retval < 0)
274 goto fail0;
275 if (ed->type == PIPE_ISOCHRONOUS) {
276 u16 frame = ohci_frame_no(ohci);
277
278 /* delay a few frames before the first TD */
279 frame += max_t (u16, 8, ed->interval);
280 frame &= ~(ed->interval - 1);
281 frame |= ed->branch;
282 urb->start_frame = frame;
283
284 /* yes, only URB_ISO_ASAP is supported, and
285 * urb->start_frame is never used as input.
286 */
287 }
288 } else if (ed->type == PIPE_ISOCHRONOUS)
289 urb->start_frame = ed->last_iso + ed->interval;
290
291 /* fill the TDs and link them to the ed; and
292 * enable that part of the schedule, if needed
293 * and update count of queued periodic urbs
294 */
295 urb->hcpriv = urb_priv;
296 td_submit_urb (ohci, urb);
297
298fail0:
299 spin_unlock (&urb->lock);
300fail:
301 if (retval)
302 urb_free_priv (ohci, urb_priv);
303 spin_unlock_irqrestore (&ohci->lock, flags);
304 return retval;
305}
306
307/*
308 * decouple the URB from the HC queues (TDs, urb_priv); it's
309 * already marked using urb->status. reporting is always done
310 * asynchronously, and we might be dealing with an urb that's
311 * partially transferred, or an ED with other urbs being unlinked.
312 */
313static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
314{
315 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
316 unsigned long flags;
317
318#ifdef OHCI_VERBOSE_DEBUG
319 urb_print (urb, "UNLINK", 1);
320#endif
321
322 spin_lock_irqsave (&ohci->lock, flags);
323 if (HC_IS_RUNNING(hcd->state)) {
324 urb_priv_t *urb_priv;
325
326 /* Unless an IRQ completed the unlink while it was being
327 * handed to us, flag it for unlink and giveback, and force
328 * some upcoming INTR_SF to call finish_unlinks()
329 */
330 urb_priv = urb->hcpriv;
331 if (urb_priv) {
332 if (urb_priv->ed->state == ED_OPER)
333 start_ed_unlink (ohci, urb_priv->ed);
334 }
335 } else {
336 /*
337 * with HC dead, we won't respect hc queue pointers
338 * any more ... just clean up every urb's memory.
339 */
340 if (urb->hcpriv)
341 finish_urb (ohci, urb, NULL);
342 }
343 spin_unlock_irqrestore (&ohci->lock, flags);
344 return 0;
345}
346
347/*-------------------------------------------------------------------------*/
348
349/* frees config/altsetting state for endpoints,
350 * including ED memory, dummy TD, and bulk/intr data toggle
351 */
352
353static void
354ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
355{
356 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
357 unsigned long flags;
358 struct ed *ed = ep->hcpriv;
359 unsigned limit = 1000;
360
361 /* ASSERT: any requests/urbs are being unlinked */
362 /* ASSERT: nobody can be submitting urbs for this any more */
363
364 if (!ed)
365 return;
366
367rescan:
368 spin_lock_irqsave (&ohci->lock, flags);
369
370 if (!HC_IS_RUNNING (hcd->state)) {
371sanitize:
372 ed->state = ED_IDLE;
373 finish_unlinks (ohci, 0, NULL);
374 }
375
376 switch (ed->state) {
377 case ED_UNLINK: /* wait for hw to finish? */
378 /* major IRQ delivery trouble loses INTR_SF too... */
379 if (limit-- == 0) {
380 ohci_warn (ohci, "IRQ INTR_SF lossage\n");
381 goto sanitize;
382 }
383 spin_unlock_irqrestore (&ohci->lock, flags);
384 set_current_state (TASK_UNINTERRUPTIBLE);
385 schedule_timeout (1);
386 goto rescan;
387 case ED_IDLE: /* fully unlinked */
388 if (list_empty (&ed->td_list)) {
389 td_free (ohci, ed->dummy);
390 ed_free (ohci, ed);
391 break;
392 }
393 /* else FALL THROUGH */
394 default:
395 /* caller was supposed to have unlinked any requests;
396 * that's not our job. can't recover; must leak ed.
397 */
398 ohci_err (ohci, "leak ed %p (#%02x) state %d%s\n",
399 ed, ep->desc.bEndpointAddress, ed->state,
400 list_empty (&ed->td_list) ? "" : " (has tds)");
401 td_free (ohci, ed->dummy);
402 break;
403 }
404 ep->hcpriv = NULL;
405 spin_unlock_irqrestore (&ohci->lock, flags);
406 return;
407}
408
409static int ohci_get_frame (struct usb_hcd *hcd)
410{
411 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
412
413 return ohci_frame_no(ohci);
414}
415
416static void ohci_usb_reset (struct ohci_hcd *ohci)
417{
418 ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
419 ohci->hc_control &= OHCI_CTRL_RWC;
420 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
421}
422
423/*-------------------------------------------------------------------------*
424 * HC functions
425 *-------------------------------------------------------------------------*/
426
427/* init memory, and kick BIOS/SMM off */
428
429static int ohci_init (struct ohci_hcd *ohci)
430{
431 int ret;
432
433 disable (ohci);
434 ohci->regs = ohci_to_hcd(ohci)->regs;
435 ohci->next_statechange = jiffies;
436
437#ifndef IR_DISABLE
438 /* SMM owns the HC? not for long! */
439 if (!no_handshake && ohci_readl (ohci,
440 &ohci->regs->control) & OHCI_CTRL_IR) {
441 u32 temp;
442
443 ohci_dbg (ohci, "USB HC TakeOver from BIOS/SMM\n");
444
445 /* this timeout is arbitrary. we make it long, so systems
446 * depending on usb keyboards may be usable even if the
447 * BIOS/SMM code seems pretty broken.
448 */
449 temp = 500; /* arbitrary: five seconds */
450
451 ohci_writel (ohci, OHCI_INTR_OC, &ohci->regs->intrenable);
452 ohci_writel (ohci, OHCI_OCR, &ohci->regs->cmdstatus);
453 while (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_IR) {
454 msleep (10);
455 if (--temp == 0) {
456 ohci_err (ohci, "USB HC takeover failed!"
457 " (BIOS/SMM bug)\n");
458 return -EBUSY;
459 }
460 }
461 ohci_usb_reset (ohci);
462 }
463#endif
464
465 /* Disable HC interrupts */
466 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
467 // flush the writes
468 (void) ohci_readl (ohci, &ohci->regs->control);
469
470 if (ohci->hcca)
471 return 0;
472
473 ohci->hcca = dma_alloc_coherent (ohci_to_hcd(ohci)->self.controller,
474 sizeof *ohci->hcca, &ohci->hcca_dma, 0);
475 if (!ohci->hcca)
476 return -ENOMEM;
477
478 if ((ret = ohci_mem_init (ohci)) < 0)
479 ohci_stop (ohci_to_hcd(ohci));
480
481 return ret;
482
483}
484
485/*-------------------------------------------------------------------------*/
486
487/* Start an OHCI controller, set the BUS operational
488 * resets USB and controller
489 * enable interrupts
490 * connect the virtual root hub
491 */
492static int ohci_run (struct ohci_hcd *ohci)
493{
494 u32 mask, temp;
495 struct usb_device *udev;
496 struct usb_bus *bus;
497 int first = ohci->fminterval == 0;
498
499 disable (ohci);
500
501 /* boot firmware should have set this up (5.1.1.3.1) */
502 if (first) {
503
504 temp = ohci_readl (ohci, &ohci->regs->fminterval);
505 ohci->fminterval = temp & 0x3fff;
506 if (ohci->fminterval != FI)
507 ohci_dbg (ohci, "fminterval delta %d\n",
508 ohci->fminterval - FI);
509 ohci->fminterval |= FSMP (ohci->fminterval) << 16;
510 /* also: power/overcurrent flags in roothub.a */
511 }
512
513 /* Reset USB nearly "by the book". RemoteWakeupConnected
514 * saved if boot firmware (BIOS/SMM/...) told us it's connected
515 * (for OHCI integrated on mainboard, it normally is)
516 */
517 ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
518 ohci_dbg (ohci, "resetting from state '%s', control = 0x%x\n",
519 hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS),
520 ohci->hc_control);
521
522 if (ohci->hc_control & OHCI_CTRL_RWC
523 && !(ohci->flags & OHCI_QUIRK_AMD756))
524 ohci_to_hcd(ohci)->can_wakeup = 1;
525
526 switch (ohci->hc_control & OHCI_CTRL_HCFS) {
527 case OHCI_USB_OPER:
528 temp = 0;
529 break;
530 case OHCI_USB_SUSPEND:
531 case OHCI_USB_RESUME:
532 ohci->hc_control &= OHCI_CTRL_RWC;
533 ohci->hc_control |= OHCI_USB_RESUME;
534 temp = 10 /* msec wait */;
535 break;
536 // case OHCI_USB_RESET:
537 default:
538 ohci->hc_control &= OHCI_CTRL_RWC;
539 ohci->hc_control |= OHCI_USB_RESET;
540 temp = 50 /* msec wait */;
541 break;
542 }
543 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
544 // flush the writes
545 (void) ohci_readl (ohci, &ohci->regs->control);
546 msleep(temp);
547 temp = roothub_a (ohci);
548 if (!(temp & RH_A_NPS)) {
549 unsigned ports = temp & RH_A_NDP;
550
551 /* power down each port */
552 for (temp = 0; temp < ports; temp++)
553 ohci_writel (ohci, RH_PS_LSDA,
554 &ohci->regs->roothub.portstatus [temp]);
555 }
556 // flush those writes
557 (void) ohci_readl (ohci, &ohci->regs->control);
558 memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
559
560 /* 2msec timelimit here means no irqs/preempt */
561 spin_lock_irq (&ohci->lock);
562
563retry:
564 /* HC Reset requires max 10 us delay */
565 ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
566 temp = 30; /* ... allow extra time */
567 while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
568 if (--temp == 0) {
569 spin_unlock_irq (&ohci->lock);
570 ohci_err (ohci, "USB HC reset timed out!\n");
571 return -1;
572 }
573 udelay (1);
574 }
575
576 /* now we're in the SUSPEND state ... must go OPERATIONAL
577 * within 2msec else HC enters RESUME
578 *
579 * ... but some hardware won't init fmInterval "by the book"
580 * (SiS, OPTi ...), so reset again instead. SiS doesn't need
581 * this if we write fmInterval after we're OPERATIONAL.
582 * Unclear about ALi, ServerWorks, and others ... this could
583 * easily be a longstanding bug in chip init on Linux.
584 */
585 if (ohci->flags & OHCI_QUIRK_INITRESET) {
586 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
587 // flush those writes
588 (void) ohci_readl (ohci, &ohci->regs->control);
589 }
590
591 /* Tell the controller where the control and bulk lists are
592 * The lists are empty now. */
593 ohci_writel (ohci, 0, &ohci->regs->ed_controlhead);
594 ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead);
595
596 /* a reset clears this */
597 ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca);
598
599 periodic_reinit (ohci);
600
601 /* some OHCI implementations are finicky about how they init.
602 * bogus values here mean not even enumeration could work.
603 */
604 if ((ohci_readl (ohci, &ohci->regs->fminterval) & 0x3fff0000) == 0
605 || !ohci_readl (ohci, &ohci->regs->periodicstart)) {
606 if (!(ohci->flags & OHCI_QUIRK_INITRESET)) {
607 ohci->flags |= OHCI_QUIRK_INITRESET;
608 ohci_dbg (ohci, "enabling initreset quirk\n");
609 goto retry;
610 }
611 spin_unlock_irq (&ohci->lock);
612 ohci_err (ohci, "init err (%08x %04x)\n",
613 ohci_readl (ohci, &ohci->regs->fminterval),
614 ohci_readl (ohci, &ohci->regs->periodicstart));
615 return -EOVERFLOW;
616 }
617
618 /* start controller operations */
619 ohci->hc_control &= OHCI_CTRL_RWC;
620 ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
621 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
622 ohci_to_hcd(ohci)->state = HC_STATE_RUNNING;
623
624 /* wake on ConnectStatusChange, matching external hubs */
625 ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
626
627 /* Choose the interrupts we care about now, others later on demand */
628 mask = OHCI_INTR_INIT;
629 ohci_writel (ohci, mask, &ohci->regs->intrstatus);
630 ohci_writel (ohci, mask, &ohci->regs->intrenable);
631
632 /* handle root hub init quirks ... */
633 temp = roothub_a (ohci);
634 temp &= ~(RH_A_PSM | RH_A_OCPM);
635 if (ohci->flags & OHCI_QUIRK_SUPERIO) {
636 /* NSC 87560 and maybe others */
637 temp |= RH_A_NOCP;
638 temp &= ~(RH_A_POTPGT | RH_A_NPS);
639 ohci_writel (ohci, temp, &ohci->regs->roothub.a);
640 } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
641 /* hub power always on; required for AMD-756 and some
642 * Mac platforms. ganged overcurrent reporting, if any.
643 */
644 temp |= RH_A_NPS;
645 ohci_writel (ohci, temp, &ohci->regs->roothub.a);
646 }
647 ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
648 ohci_writel (ohci, (temp & RH_A_NPS) ? 0 : RH_B_PPCM,
649 &ohci->regs->roothub.b);
650 // flush those writes
651 (void) ohci_readl (ohci, &ohci->regs->control);
652
653 spin_unlock_irq (&ohci->lock);
654
655 // POTPGT delay is bits 24-31, in 2 ms units.
656 mdelay ((temp >> 23) & 0x1fe);
657 bus = &ohci_to_hcd(ohci)->self;
658 ohci_to_hcd(ohci)->state = HC_STATE_RUNNING;
659
660 ohci_dump (ohci, 1);
661
662 udev = bus->root_hub;
663 if (udev) {
664 return 0;
665 }
666
667 /* connect the virtual root hub */
668 udev = usb_alloc_dev (NULL, bus, 0);
669 if (!udev) {
670 disable (ohci);
671 ohci->hc_control &= ~OHCI_CTRL_HCFS;
672 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
673 return -ENOMEM;
674 }
675
676 udev->speed = USB_SPEED_FULL;
677 if (usb_hcd_register_root_hub (udev, ohci_to_hcd(ohci)) != 0) {
678 usb_put_dev (udev);
679 disable (ohci);
680 ohci->hc_control &= ~OHCI_CTRL_HCFS;
681 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
682 return -ENODEV;
683 }
684 if (ohci->power_budget)
685 hub_set_power_budget(udev, ohci->power_budget);
686
687 create_debug_files (ohci);
688 return 0;
689}
690
691/*-------------------------------------------------------------------------*/
692
693/* an interrupt happens */
694
695static irqreturn_t ohci_irq (struct usb_hcd *hcd, struct pt_regs *ptregs)
696{
697 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
698 struct ohci_regs __iomem *regs = ohci->regs;
699 int ints;
700
701 /* we can eliminate a (slow) ohci_readl()
702 if _only_ WDH caused this irq */
703 if ((ohci->hcca->done_head != 0)
704 && ! (hc32_to_cpup (ohci, &ohci->hcca->done_head)
705 & 0x01)) {
706 ints = OHCI_INTR_WDH;
707
708 /* cardbus/... hardware gone before remove() */
709 } else if ((ints = ohci_readl (ohci, &regs->intrstatus)) == ~(u32)0) {
710 disable (ohci);
711 ohci_dbg (ohci, "device removed!\n");
712 return IRQ_HANDLED;
713
714 /* interrupt for some other device? */
715 } else if ((ints &= ohci_readl (ohci, &regs->intrenable)) == 0) {
716 return IRQ_NOTMINE;
717 }
718
719 if (ints & OHCI_INTR_UE) {
720 disable (ohci);
721 ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
722 // e.g. due to PCI Master/Target Abort
723
724 ohci_dump (ohci, 1);
725 ohci_usb_reset (ohci);
726 }
727
728 if (ints & OHCI_INTR_RD) {
729 ohci_vdbg (ohci, "resume detect\n");
730 if (hcd->state != HC_STATE_QUIESCING)
731 schedule_work(&ohci->rh_resume);
732 }
733
734 if (ints & OHCI_INTR_WDH) {
735 if (HC_IS_RUNNING(hcd->state))
736 ohci_writel (ohci, OHCI_INTR_WDH, &regs->intrdisable);
737 spin_lock (&ohci->lock);
738 dl_done_list (ohci, ptregs);
739 spin_unlock (&ohci->lock);
740 if (HC_IS_RUNNING(hcd->state))
741 ohci_writel (ohci, OHCI_INTR_WDH, &regs->intrenable);
742 }
743
744 /* could track INTR_SO to reduce available PCI/... bandwidth */
745
746 /* handle any pending URB/ED unlinks, leaving INTR_SF enabled
747 * when there's still unlinking to be done (next frame).
748 */
749 spin_lock (&ohci->lock);
750 if (ohci->ed_rm_list)
751 finish_unlinks (ohci, ohci_frame_no(ohci), ptregs);
752 if ((ints & OHCI_INTR_SF) != 0 && !ohci->ed_rm_list
753 && HC_IS_RUNNING(hcd->state))
754 ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
755 spin_unlock (&ohci->lock);
756
757 if (HC_IS_RUNNING(hcd->state)) {
758 ohci_writel (ohci, ints, &regs->intrstatus);
759 ohci_writel (ohci, OHCI_INTR_MIE, &regs->intrenable);
760 // flush those writes
761 (void) ohci_readl (ohci, &ohci->regs->control);
762 }
763
764 return IRQ_HANDLED;
765}
766
767/*-------------------------------------------------------------------------*/
768
769static void ohci_stop (struct usb_hcd *hcd)
770{
771 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
772
773 ohci_dbg (ohci, "stop %s controller (state 0x%02x)\n",
774 hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS),
775 hcd->state);
776 ohci_dump (ohci, 1);
777
778 flush_scheduled_work();
779
780 ohci_usb_reset (ohci);
781 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
782
783 remove_debug_files (ohci);
784 ohci_mem_cleanup (ohci);
785 if (ohci->hcca) {
786 dma_free_coherent (hcd->self.controller,
787 sizeof *ohci->hcca,
788 ohci->hcca, ohci->hcca_dma);
789 ohci->hcca = NULL;
790 ohci->hcca_dma = 0;
791 }
792}
793
794/*-------------------------------------------------------------------------*/
795
796/* must not be called from interrupt context */
797
798#if defined(CONFIG_USB_SUSPEND) || defined(CONFIG_PM)
799
800static int ohci_restart (struct ohci_hcd *ohci)
801{
802 int temp;
803 int i;
804 struct urb_priv *priv;
805 struct usb_device *root = ohci_to_hcd(ohci)->self.root_hub;
806
807 /* mark any devices gone, so they do nothing till khubd disconnects.
808 * recycle any "live" eds/tds (and urbs) right away.
809 * later, khubd disconnect processing will recycle the other state,
810 * (either as disconnect/reconnect, or maybe someday as a reset).
811 */
812 spin_lock_irq(&ohci->lock);
813 disable (ohci);
814 for (i = 0; i < root->maxchild; i++) {
815 if (root->children [i])
816 usb_set_device_state (root->children[i],
817 USB_STATE_NOTATTACHED);
818 }
819 if (!list_empty (&ohci->pending))
820 ohci_dbg(ohci, "abort schedule...\n");
821 list_for_each_entry (priv, &ohci->pending, pending) {
822 struct urb *urb = priv->td[0]->urb;
823 struct ed *ed = priv->ed;
824
825 switch (ed->state) {
826 case ED_OPER:
827 ed->state = ED_UNLINK;
828 ed->hwINFO |= cpu_to_hc32(ohci, ED_DEQUEUE);
829 ed_deschedule (ohci, ed);
830
831 ed->ed_next = ohci->ed_rm_list;
832 ed->ed_prev = NULL;
833 ohci->ed_rm_list = ed;
834 /* FALLTHROUGH */
835 case ED_UNLINK:
836 break;
837 default:
838 ohci_dbg(ohci, "bogus ed %p state %d\n",
839 ed, ed->state);
840 }
841
842 spin_lock (&urb->lock);
843 urb->status = -ESHUTDOWN;
844 spin_unlock (&urb->lock);
845 }
846 finish_unlinks (ohci, 0, NULL);
847 spin_unlock_irq(&ohci->lock);
848
849 /* paranoia, in case that didn't work: */
850
851 /* empty the interrupt branches */
852 for (i = 0; i < NUM_INTS; i++) ohci->load [i] = 0;
853 for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0;
854
855 /* no EDs to remove */
856 ohci->ed_rm_list = NULL;
857
858 /* empty control and bulk lists */
859 ohci->ed_controltail = NULL;
860 ohci->ed_bulktail = NULL;
861
862 if ((temp = ohci_run (ohci)) < 0) {
863 ohci_err (ohci, "can't restart, %d\n", temp);
864 return temp;
865 } else {
866 /* here we "know" root ports should always stay powered,
867 * and that if we try to turn them back on the root hub
868 * will respond to CSC processing.
869 */
870 i = roothub_a (ohci) & RH_A_NDP;
871 while (i--)
872 ohci_writel (ohci, RH_PS_PSS,
873 &ohci->regs->roothub.portstatus [temp]);
874 ohci_dbg (ohci, "restart complete\n");
875 }
876 return 0;
877}
878#endif
879
880/*-------------------------------------------------------------------------*/
881
882#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC
883
884MODULE_AUTHOR (DRIVER_AUTHOR);
885MODULE_DESCRIPTION (DRIVER_INFO);
886MODULE_LICENSE ("GPL");
887
888#ifdef CONFIG_PCI
889#include "ohci-pci.c"
890#endif
891
892#ifdef CONFIG_SA1111
893#include "ohci-sa1111.c"
894#endif
895
896#ifdef CONFIG_ARCH_OMAP
897#include "ohci-omap.c"
898#endif
899
900#ifdef CONFIG_ARCH_LH7A404
901#include "ohci-lh7a404.c"
902#endif
903
904#ifdef CONFIG_PXA27x
905#include "ohci-pxa27x.c"
906#endif
907
908#ifdef CONFIG_SOC_AU1X00
909#include "ohci-au1xxx.c"
910#endif
911
912#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC
913#include "ohci-ppc-soc.c"
914#endif
915
916#if !(defined(CONFIG_PCI) \
917 || defined(CONFIG_SA1111) \
918 || defined(CONFIG_ARCH_OMAP) \
919 || defined (CONFIG_ARCH_LH7A404) \
920 || defined (CONFIG_PXA27x) \
921 || defined (CONFIG_SOC_AU1X00) \
922 || defined (CONFIG_USB_OHCI_HCD_PPC_SOC) \
923 )
924#error "missing bus glue for ohci-hcd"
925#endif
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
new file mode 100644
index 000000000000..e2fc4129dfc6
--- /dev/null
+++ b/drivers/usb/host/ohci-hub.c
@@ -0,0 +1,643 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under GPL
8 */
9
10/*-------------------------------------------------------------------------*/
11
12/*
13 * OHCI Root Hub ... the nonsharable stuff
14 */
15
16#define dbg_port(hc,label,num,value) \
17 ohci_dbg (hc, \
18 "%s roothub.portstatus [%d] " \
19 "= 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
20 label, num, temp, \
21 (temp & RH_PS_PRSC) ? " PRSC" : "", \
22 (temp & RH_PS_OCIC) ? " OCIC" : "", \
23 (temp & RH_PS_PSSC) ? " PSSC" : "", \
24 (temp & RH_PS_PESC) ? " PESC" : "", \
25 (temp & RH_PS_CSC) ? " CSC" : "", \
26 \
27 (temp & RH_PS_LSDA) ? " LSDA" : "", \
28 (temp & RH_PS_PPS) ? " PPS" : "", \
29 (temp & RH_PS_PRS) ? " PRS" : "", \
30 (temp & RH_PS_POCI) ? " POCI" : "", \
31 (temp & RH_PS_PSS) ? " PSS" : "", \
32 \
33 (temp & RH_PS_PES) ? " PES" : "", \
34 (temp & RH_PS_CCS) ? " CCS" : "" \
35 );
36
37/*-------------------------------------------------------------------------*/
38
39#if defined(CONFIG_USB_SUSPEND) || defined(CONFIG_PM)
40
41#define OHCI_SCHED_ENABLES \
42 (OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE)
43
44static void dl_done_list (struct ohci_hcd *, struct pt_regs *);
45static void finish_unlinks (struct ohci_hcd *, u16 , struct pt_regs *);
46static int ohci_restart (struct ohci_hcd *ohci);
47
48static int ohci_hub_suspend (struct usb_hcd *hcd)
49{
50 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
51 int status = 0;
52 unsigned long flags;
53
54 spin_lock_irqsave (&ohci->lock, flags);
55
56 ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
57 switch (ohci->hc_control & OHCI_CTRL_HCFS) {
58 case OHCI_USB_RESUME:
59 ohci_dbg (ohci, "resume/suspend?\n");
60 ohci->hc_control &= ~OHCI_CTRL_HCFS;
61 ohci->hc_control |= OHCI_USB_RESET;
62 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
63 (void) ohci_readl (ohci, &ohci->regs->control);
64 /* FALL THROUGH */
65 case OHCI_USB_RESET:
66 status = -EBUSY;
67 ohci_dbg (ohci, "needs reinit!\n");
68 goto done;
69 case OHCI_USB_SUSPEND:
70 ohci_dbg (ohci, "already suspended\n");
71 goto done;
72 }
73 ohci_dbg (ohci, "suspend root hub\n");
74
75 /* First stop any processing */
76 hcd->state = HC_STATE_QUIESCING;
77 if (ohci->hc_control & OHCI_SCHED_ENABLES) {
78 int limit;
79
80 ohci->hc_control &= ~OHCI_SCHED_ENABLES;
81 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
82 ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
83 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
84
85 /* sched disables take effect on the next frame,
86 * then the last WDH could take 6+ msec
87 */
88 ohci_dbg (ohci, "stopping schedules ...\n");
89 limit = 2000;
90 while (limit > 0) {
91 udelay (250);
92 limit =- 250;
93 if (ohci_readl (ohci, &ohci->regs->intrstatus)
94 & OHCI_INTR_SF)
95 break;
96 }
97 dl_done_list (ohci, NULL);
98 mdelay (7);
99 }
100 dl_done_list (ohci, NULL);
101 finish_unlinks (ohci, ohci_frame_no(ohci), NULL);
102 ohci_writel (ohci, ohci_readl (ohci, &ohci->regs->intrstatus),
103 &ohci->regs->intrstatus);
104
105 /* maybe resume can wake root hub */
106 if (hcd->remote_wakeup)
107 ohci->hc_control |= OHCI_CTRL_RWE;
108 else
109 ohci->hc_control &= ~OHCI_CTRL_RWE;
110
111 /* Suspend hub */
112 ohci->hc_control &= ~OHCI_CTRL_HCFS;
113 ohci->hc_control |= OHCI_USB_SUSPEND;
114 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
115 (void) ohci_readl (ohci, &ohci->regs->control);
116
117 /* no resumes until devices finish suspending */
118 ohci->next_statechange = jiffies + msecs_to_jiffies (5);
119
120done:
121 if (status == 0)
122 hcd->state = HC_STATE_SUSPENDED;
123 spin_unlock_irqrestore (&ohci->lock, flags);
124 return status;
125}
126
127static inline struct ed *find_head (struct ed *ed)
128{
129 /* for bulk and control lists */
130 while (ed->ed_prev)
131 ed = ed->ed_prev;
132 return ed;
133}
134
135/* caller has locked the root hub */
136static int ohci_hub_resume (struct usb_hcd *hcd)
137{
138 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
139 u32 temp, enables;
140 int status = -EINPROGRESS;
141
142 if (time_before (jiffies, ohci->next_statechange))
143 msleep(5);
144
145 spin_lock_irq (&ohci->lock);
146 ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
147
148 if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
149 /* this can happen after suspend-to-disk */
150 if (hcd->state == HC_STATE_RESUMING) {
151 ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
152 ohci->hc_control);
153 status = -EBUSY;
154 /* this happens when pmcore resumes HC then root */
155 } else {
156 ohci_dbg (ohci, "duplicate resume\n");
157 status = 0;
158 }
159 } else switch (ohci->hc_control & OHCI_CTRL_HCFS) {
160 case OHCI_USB_SUSPEND:
161 ohci->hc_control &= ~(OHCI_CTRL_HCFS|OHCI_SCHED_ENABLES);
162 ohci->hc_control |= OHCI_USB_RESUME;
163 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
164 (void) ohci_readl (ohci, &ohci->regs->control);
165 ohci_dbg (ohci, "resume root hub\n");
166 break;
167 case OHCI_USB_RESUME:
168 /* HCFS changes sometime after INTR_RD */
169 ohci_info (ohci, "wakeup\n");
170 break;
171 case OHCI_USB_OPER:
172 ohci_dbg (ohci, "already resumed\n");
173 status = 0;
174 break;
175 default: /* RESET, we lost power */
176 ohci_dbg (ohci, "root hub hardware reset\n");
177 status = -EBUSY;
178 }
179 spin_unlock_irq (&ohci->lock);
180 if (status == -EBUSY) {
181 (void) ohci_init (ohci);
182 return ohci_restart (ohci);
183 }
184 if (status != -EINPROGRESS)
185 return status;
186
187 temp = roothub_a (ohci) & RH_A_NDP;
188 enables = 0;
189 while (temp--) {
190 u32 stat = ohci_readl (ohci,
191 &ohci->regs->roothub.portstatus [temp]);
192
193 /* force global, not selective, resume */
194 if (!(stat & RH_PS_PSS))
195 continue;
196 ohci_writel (ohci, RH_PS_POCI,
197 &ohci->regs->roothub.portstatus [temp]);
198 }
199
200 /* Some controllers (lucent erratum) need extra-long delays */
201 hcd->state = HC_STATE_RESUMING;
202 mdelay (20 /* usb 11.5.1.10 */ + 15);
203
204 temp = ohci_readl (ohci, &ohci->regs->control);
205 temp &= OHCI_CTRL_HCFS;
206 if (temp != OHCI_USB_RESUME) {
207 ohci_err (ohci, "controller won't resume\n");
208 return -EBUSY;
209 }
210
211 /* disable old schedule state, reinit from scratch */
212 ohci_writel (ohci, 0, &ohci->regs->ed_controlhead);
213 ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
214 ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead);
215 ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
216 ohci_writel (ohci, 0, &ohci->regs->ed_periodcurrent);
217 ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca);
218
219 /* Sometimes PCI D3 suspend trashes frame timings ... */
220 periodic_reinit (ohci);
221
222 /* interrupts might have been disabled */
223 ohci_writel (ohci, OHCI_INTR_INIT, &ohci->regs->intrenable);
224 if (ohci->ed_rm_list)
225 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
226 ohci_writel (ohci, ohci_readl (ohci, &ohci->regs->intrstatus),
227 &ohci->regs->intrstatus);
228
229 /* Then re-enable operations */
230 ohci_writel (ohci, OHCI_USB_OPER, &ohci->regs->control);
231 (void) ohci_readl (ohci, &ohci->regs->control);
232 msleep (3);
233
234 temp = OHCI_CONTROL_INIT | OHCI_USB_OPER;
235 if (hcd->can_wakeup)
236 temp |= OHCI_CTRL_RWC;
237 ohci->hc_control = temp;
238 ohci_writel (ohci, temp, &ohci->regs->control);
239 (void) ohci_readl (ohci, &ohci->regs->control);
240
241 /* TRSMRCY */
242 msleep (10);
243
244 /* keep it alive for ~5x suspend + resume costs */
245 ohci->next_statechange = jiffies + msecs_to_jiffies (250);
246
247 /* maybe turn schedules back on */
248 enables = 0;
249 temp = 0;
250 if (!ohci->ed_rm_list) {
251 if (ohci->ed_controltail) {
252 ohci_writel (ohci,
253 find_head (ohci->ed_controltail)->dma,
254 &ohci->regs->ed_controlhead);
255 enables |= OHCI_CTRL_CLE;
256 temp |= OHCI_CLF;
257 }
258 if (ohci->ed_bulktail) {
259 ohci_writel (ohci, find_head (ohci->ed_bulktail)->dma,
260 &ohci->regs->ed_bulkhead);
261 enables |= OHCI_CTRL_BLE;
262 temp |= OHCI_BLF;
263 }
264 }
265 if (hcd->self.bandwidth_isoc_reqs || hcd->self.bandwidth_int_reqs)
266 enables |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
267 if (enables) {
268 ohci_dbg (ohci, "restarting schedules ... %08x\n", enables);
269 ohci->hc_control |= enables;
270 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
271 if (temp)
272 ohci_writel (ohci, temp, &ohci->regs->cmdstatus);
273 (void) ohci_readl (ohci, &ohci->regs->control);
274 }
275
276 hcd->state = HC_STATE_RUNNING;
277 return 0;
278}
279
280static void ohci_rh_resume (void *_hcd)
281{
282 struct usb_hcd *hcd = _hcd;
283
284 usb_lock_device (hcd->self.root_hub);
285 (void) ohci_hub_resume (hcd);
286 usb_unlock_device (hcd->self.root_hub);
287}
288
289#else
290
291static void ohci_rh_resume (void *_hcd)
292{
293 struct ohci_hcd *ohci = hcd_to_ohci (_hcd);
294 ohci_dbg(ohci, "rh_resume ??\n");
295}
296
297#endif /* CONFIG_USB_SUSPEND || CONFIG_PM */
298
299/*-------------------------------------------------------------------------*/
300
301/* build "status change" packet (one or two bytes) from HC registers */
302
303static int
304ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
305{
306 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
307 int ports, i, changed = 0, length = 1;
308 int can_suspend = hcd->can_wakeup;
309 unsigned long flags;
310
311 spin_lock_irqsave (&ohci->lock, flags);
312
313 /* handle autosuspended root: finish resuming before
314 * letting khubd or root hub timer see state changes.
315 */
316 if ((ohci->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_OPER
317 || !HC_IS_RUNNING(hcd->state)) {
318 can_suspend = 0;
319 goto done;
320 }
321
322 ports = roothub_a (ohci) & RH_A_NDP;
323 if (ports > MAX_ROOT_PORTS) {
324 ohci_err (ohci, "bogus NDP=%d, rereads as NDP=%d\n", ports,
325 ohci_readl (ohci, &ohci->regs->roothub.a) & RH_A_NDP);
326 /* retry later; "should not happen" */
327 goto done;
328 }
329
330 /* init status */
331 if (roothub_status (ohci) & (RH_HS_LPSC | RH_HS_OCIC))
332 buf [0] = changed = 1;
333 else
334 buf [0] = 0;
335 if (ports > 7) {
336 buf [1] = 0;
337 length++;
338 }
339
340 /* look at each port */
341 for (i = 0; i < ports; i++) {
342 u32 status = roothub_portstatus (ohci, i);
343
344 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC
345 | RH_PS_OCIC | RH_PS_PRSC)) {
346 changed = 1;
347 if (i < 7)
348 buf [0] |= 1 << (i + 1);
349 else
350 buf [1] |= 1 << (i - 7);
351 continue;
352 }
353
354 /* can suspend if no ports are enabled; or if all all
355 * enabled ports are suspended AND remote wakeup is on.
356 */
357 if (!(status & RH_PS_CCS))
358 continue;
359 if ((status & RH_PS_PSS) && hcd->remote_wakeup)
360 continue;
361 can_suspend = 0;
362 }
363done:
364 spin_unlock_irqrestore (&ohci->lock, flags);
365
366#ifdef CONFIG_PM
367 /* save power by suspending idle root hubs;
368 * INTR_RD wakes us when there's work
369 * NOTE: if we can do this, we don't need a root hub timer!
370 */
371 if (can_suspend
372 && !changed
373 && !ohci->ed_rm_list
374 && ((OHCI_CTRL_HCFS | OHCI_SCHED_ENABLES)
375 & ohci->hc_control)
376 == OHCI_USB_OPER
377 && time_after (jiffies, ohci->next_statechange)
378 && usb_trylock_device (hcd->self.root_hub)
379 ) {
380 ohci_vdbg (ohci, "autosuspend\n");
381 (void) ohci_hub_suspend (hcd);
382 hcd->state = HC_STATE_RUNNING;
383 usb_unlock_device (hcd->self.root_hub);
384 }
385#endif
386
387 return changed ? length : 0;
388}
389
390/*-------------------------------------------------------------------------*/
391
392static void
393ohci_hub_descriptor (
394 struct ohci_hcd *ohci,
395 struct usb_hub_descriptor *desc
396) {
397 u32 rh = roothub_a (ohci);
398 int ports = rh & RH_A_NDP;
399 u16 temp;
400
401 desc->bDescriptorType = 0x29;
402 desc->bPwrOn2PwrGood = (rh & RH_A_POTPGT) >> 24;
403 desc->bHubContrCurrent = 0;
404
405 desc->bNbrPorts = ports;
406 temp = 1 + (ports / 8);
407 desc->bDescLength = 7 + 2 * temp;
408
409 temp = 0;
410 if (rh & RH_A_NPS) /* no power switching? */
411 temp |= 0x0002;
412 if (rh & RH_A_PSM) /* per-port power switching? */
413 temp |= 0x0001;
414 if (rh & RH_A_NOCP) /* no overcurrent reporting? */
415 temp |= 0x0010;
416 else if (rh & RH_A_OCPM) /* per-port overcurrent reporting? */
417 temp |= 0x0008;
418 desc->wHubCharacteristics = (__force __u16)cpu_to_hc16(ohci, temp);
419
420 /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
421 rh = roothub_b (ohci);
422 desc->bitmap [0] = rh & RH_B_DR;
423 if (ports > 7) {
424 desc->bitmap [1] = (rh & RH_B_DR) >> 8;
425 desc->bitmap [2] = desc->bitmap [3] = 0xff;
426 } else
427 desc->bitmap [1] = 0xff;
428}
429
430/*-------------------------------------------------------------------------*/
431
432#ifdef CONFIG_USB_OTG
433
434static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
435{
436 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
437 u32 status;
438
439 if (!port)
440 return -EINVAL;
441 port--;
442
443 /* start port reset before HNP protocol times out */
444 status = ohci_readl(ohci, &ohci->regs->roothub.portstatus [port]);
445 if (!(status & RH_PS_CCS))
446 return -ENODEV;
447
448 /* khubd will finish the reset later */
449 ohci_writel(ohci, RH_PS_PRS, &ohci->regs->roothub.portstatus [port]);
450 return 0;
451}
452
453static void start_hnp(struct ohci_hcd *ohci);
454
455#else
456
457#define ohci_start_port_reset NULL
458
459#endif
460
461/*-------------------------------------------------------------------------*/
462
463
464/* See usb 7.1.7.5: root hubs must issue at least 50 msec reset signaling,
465 * not necessarily continuous ... to guard against resume signaling.
466 * The short timeout is safe for non-root hubs, and is backward-compatible
467 * with earlier Linux hosts.
468 */
469#ifdef CONFIG_USB_SUSPEND
470#define PORT_RESET_MSEC 50
471#else
472#define PORT_RESET_MSEC 10
473#endif
474
475/* this timer value might be vendor-specific ... */
476#define PORT_RESET_HW_MSEC 10
477
478/* wrap-aware logic morphed from <linux/jiffies.h> */
479#define tick_before(t1,t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
480
481/* called from some task, normally khubd */
482static inline void root_port_reset (struct ohci_hcd *ohci, unsigned port)
483{
484 __hc32 __iomem *portstat = &ohci->regs->roothub.portstatus [port];
485 u32 temp;
486 u16 now = ohci_readl(ohci, &ohci->regs->fmnumber);
487 u16 reset_done = now + PORT_RESET_MSEC;
488
489 /* build a "continuous enough" reset signal, with up to
490 * 3msec gap between pulses. scheduler HZ==100 must work;
491 * this might need to be deadline-scheduled.
492 */
493 do {
494 /* spin until any current reset finishes */
495 for (;;) {
496 temp = ohci_readl (ohci, portstat);
497 if (!(temp & RH_PS_PRS))
498 break;
499 udelay (500);
500 }
501
502 if (!(temp & RH_PS_CCS))
503 break;
504 if (temp & RH_PS_PRSC)
505 ohci_writel (ohci, RH_PS_PRSC, portstat);
506
507 /* start the next reset, sleep till it's probably done */
508 ohci_writel (ohci, RH_PS_PRS, portstat);
509 msleep(PORT_RESET_HW_MSEC);
510 now = ohci_readl(ohci, &ohci->regs->fmnumber);
511 } while (tick_before(now, reset_done));
512 /* caller synchronizes using PRSC */
513}
514
515static int ohci_hub_control (
516 struct usb_hcd *hcd,
517 u16 typeReq,
518 u16 wValue,
519 u16 wIndex,
520 char *buf,
521 u16 wLength
522) {
523 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
524 int ports = hcd_to_bus (hcd)->root_hub->maxchild;
525 u32 temp;
526 int retval = 0;
527
528 switch (typeReq) {
529 case ClearHubFeature:
530 switch (wValue) {
531 case C_HUB_OVER_CURRENT:
532 ohci_writel (ohci, RH_HS_OCIC,
533 &ohci->regs->roothub.status);
534 case C_HUB_LOCAL_POWER:
535 break;
536 default:
537 goto error;
538 }
539 break;
540 case ClearPortFeature:
541 if (!wIndex || wIndex > ports)
542 goto error;
543 wIndex--;
544
545 switch (wValue) {
546 case USB_PORT_FEAT_ENABLE:
547 temp = RH_PS_CCS;
548 break;
549 case USB_PORT_FEAT_C_ENABLE:
550 temp = RH_PS_PESC;
551 break;
552 case USB_PORT_FEAT_SUSPEND:
553 temp = RH_PS_POCI;
554 if ((ohci->hc_control & OHCI_CTRL_HCFS)
555 != OHCI_USB_OPER)
556 schedule_work (&ohci->rh_resume);
557 break;
558 case USB_PORT_FEAT_C_SUSPEND:
559 temp = RH_PS_PSSC;
560 break;
561 case USB_PORT_FEAT_POWER:
562 temp = RH_PS_LSDA;
563 break;
564 case USB_PORT_FEAT_C_CONNECTION:
565 temp = RH_PS_CSC;
566 break;
567 case USB_PORT_FEAT_C_OVER_CURRENT:
568 temp = RH_PS_OCIC;
569 break;
570 case USB_PORT_FEAT_C_RESET:
571 temp = RH_PS_PRSC;
572 break;
573 default:
574 goto error;
575 }
576 ohci_writel (ohci, temp,
577 &ohci->regs->roothub.portstatus [wIndex]);
578 // ohci_readl (ohci, &ohci->regs->roothub.portstatus [wIndex]);
579 break;
580 case GetHubDescriptor:
581 ohci_hub_descriptor (ohci, (struct usb_hub_descriptor *) buf);
582 break;
583 case GetHubStatus:
584 temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE);
585 *(__le32 *) buf = cpu_to_le32 (temp);
586 break;
587 case GetPortStatus:
588 if (!wIndex || wIndex > ports)
589 goto error;
590 wIndex--;
591 temp = roothub_portstatus (ohci, wIndex);
592 *(__le32 *) buf = cpu_to_le32 (temp);
593
594#ifndef OHCI_VERBOSE_DEBUG
595 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
596#endif
597 dbg_port (ohci, "GetStatus", wIndex, temp);
598 break;
599 case SetHubFeature:
600 switch (wValue) {
601 case C_HUB_OVER_CURRENT:
602 // FIXME: this can be cleared, yes?
603 case C_HUB_LOCAL_POWER:
604 break;
605 default:
606 goto error;
607 }
608 break;
609 case SetPortFeature:
610 if (!wIndex || wIndex > ports)
611 goto error;
612 wIndex--;
613 switch (wValue) {
614 case USB_PORT_FEAT_SUSPEND:
615#ifdef CONFIG_USB_OTG
616 if (hcd->self.otg_port == (wIndex + 1)
617 && hcd->self.b_hnp_enable)
618 start_hnp(ohci);
619 else
620#endif
621 ohci_writel (ohci, RH_PS_PSS,
622 &ohci->regs->roothub.portstatus [wIndex]);
623 break;
624 case USB_PORT_FEAT_POWER:
625 ohci_writel (ohci, RH_PS_PPS,
626 &ohci->regs->roothub.portstatus [wIndex]);
627 break;
628 case USB_PORT_FEAT_RESET:
629 root_port_reset (ohci, wIndex);
630 break;
631 default:
632 goto error;
633 }
634 break;
635
636 default:
637error:
638 /* "protocol stall" on error */
639 retval = -EPIPE;
640 }
641 return retval;
642}
643
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
new file mode 100644
index 000000000000..817620d73841
--- /dev/null
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -0,0 +1,266 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 * (C) Copyright 2002 Hewlett-Packard Company
7 *
8 * Bus Glue for Sharp LH7A404
9 *
10 * Written by Christopher Hoover <ch@hpl.hp.com>
11 * Based on fragments of previous driver by Rusell King et al.
12 *
13 * Modified for LH7A404 from ohci-sa1111.c
14 * by Durgesh Pattamatta <pattamattad@sharpsec.com>
15 *
16 * This file is licenced under the GPL.
17 */
18
19#include <asm/hardware.h>
20#include <asm/mach-types.h>
21#include <asm/arch/hardware.h>
22
23
24extern int usb_disabled(void);
25
26/*-------------------------------------------------------------------------*/
27
28static void lh7a404_start_hc(struct platform_device *dev)
29{
30 printk(KERN_DEBUG __FILE__
31 ": starting LH7A404 OHCI USB Controller\n");
32
33 /*
34 * Now, carefully enable the USB clock, and take
35 * the USB host controller out of reset.
36 */
37 CSC_PWRCNT |= CSC_PWRCNT_USBH_EN; /* Enable clock */
38 udelay(1000);
39 USBH_CMDSTATUS = OHCI_HCR;
40
41 printk(KERN_DEBUG __FILE__
42 ": Clock to USB host has been enabled \n");
43}
44
45static void lh7a404_stop_hc(struct platform_device *dev)
46{
47 printk(KERN_DEBUG __FILE__
48 ": stopping LH7A404 OHCI USB Controller\n");
49
50 CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
51}
52
53
54/*-------------------------------------------------------------------------*/
55
56/* configure so an HC device and id are always provided */
57/* always called with process context; sleeping is OK */
58
59
60/**
61 * usb_hcd_lh7a404_probe - initialize LH7A404-based HCDs
62 * Context: !in_interrupt()
63 *
64 * Allocates basic resources for this USB host controller, and
65 * then invokes the start() method for the HCD associated with it
66 * through the hotplug entry's driver_data.
67 *
68 */
69int usb_hcd_lh7a404_probe (const struct hc_driver *driver,
70 struct platform_device *dev)
71{
72 int retval;
73 struct usb_hcd *hcd;
74
75 if (dev->resource[1].flags != IORESOURCE_IRQ) {
76 pr_debug("resource[1] is not IORESOURCE_IRQ");
77 return -ENOMEM;
78 }
79
80 hcd = usb_create_hcd(driver, &dev->dev, "lh7a404");
81 if (!hcd)
82 return -ENOMEM;
83 hcd->rsrc_start = dev->resource[0].start;
84 hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
85
86 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
87 pr_debug("request_mem_region failed");
88 retval = -EBUSY;
89 goto err1;
90 }
91
92 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
93 if (!hcd->regs) {
94 pr_debug("ioremap failed");
95 retval = -ENOMEM;
96 goto err2;
97 }
98
99 lh7a404_start_hc(dev);
100 ohci_hcd_init(hcd_to_ohci(hcd));
101
102 retval = usb_add_hcd(hcd, dev->resource[1].start, SA_INTERRUPT);
103 if (retval == 0)
104 return retval;
105
106 lh7a404_stop_hc(dev);
107 iounmap(hcd->regs);
108 err2:
109 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
110 err1:
111 usb_put_hcd(hcd);
112 return retval;
113}
114
115
116/* may be called without controller electrically present */
117/* may be called with controller, bus, and devices active */
118
119/**
120 * usb_hcd_lh7a404_remove - shutdown processing for LH7A404-based HCDs
121 * @dev: USB Host Controller being removed
122 * Context: !in_interrupt()
123 *
124 * Reverses the effect of usb_hcd_lh7a404_probe(), first invoking
125 * the HCD's stop() method. It is always called from a thread
126 * context, normally "rmmod", "apmd", or something similar.
127 *
128 */
129void usb_hcd_lh7a404_remove (struct usb_hcd *hcd, struct platform_device *dev)
130{
131 usb_remove_hcd(hcd);
132 lh7a404_stop_hc(dev);
133 iounmap(hcd->regs);
134 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
135 usb_put_hcd(hcd);
136}
137
138/*-------------------------------------------------------------------------*/
139
140static int __devinit
141ohci_lh7a404_start (struct usb_hcd *hcd)
142{
143 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
144 int ret;
145
146 ohci_dbg (ohci, "ohci_lh7a404_start, ohci:%p", ohci);
147 if ((ret = ohci_init(ohci)) < 0)
148 return ret;
149
150 if ((ret = ohci_run (ohci)) < 0) {
151 err ("can't start %s", hcd->self.bus_name);
152 ohci_stop (hcd);
153 return ret;
154 }
155 return 0;
156}
157
158/*-------------------------------------------------------------------------*/
159
160static const struct hc_driver ohci_lh7a404_hc_driver = {
161 .description = hcd_name,
162 .product_desc = "LH7A404 OHCI",
163 .hcd_priv_size = sizeof(struct ohci_hcd),
164
165 /*
166 * generic hardware linkage
167 */
168 .irq = ohci_irq,
169 .flags = HCD_USB11 | HCD_MEMORY,
170
171 /*
172 * basic lifecycle operations
173 */
174 .start = ohci_lh7a404_start,
175#ifdef CONFIG_PM
176 /* suspend: ohci_lh7a404_suspend, -- tbd */
177 /* resume: ohci_lh7a404_resume, -- tbd */
178#endif /*CONFIG_PM*/
179 .stop = ohci_stop,
180
181 /*
182 * managing i/o requests and associated device resources
183 */
184 .urb_enqueue = ohci_urb_enqueue,
185 .urb_dequeue = ohci_urb_dequeue,
186 .endpoint_disable = ohci_endpoint_disable,
187
188 /*
189 * scheduling support
190 */
191 .get_frame_number = ohci_get_frame,
192
193 /*
194 * root hub support
195 */
196 .hub_status_data = ohci_hub_status_data,
197 .hub_control = ohci_hub_control,
198};
199
200/*-------------------------------------------------------------------------*/
201
202static int ohci_hcd_lh7a404_drv_probe(struct device *dev)
203{
204 struct platform_device *pdev = to_platform_device(dev);
205 int ret;
206
207 pr_debug ("In ohci_hcd_lh7a404_drv_probe");
208
209 if (usb_disabled())
210 return -ENODEV;
211
212 ret = usb_hcd_lh7a404_probe(&ohci_lh7a404_hc_driver, pdev);
213 return ret;
214}
215
216static int ohci_hcd_lh7a404_drv_remove(struct device *dev)
217{
218 struct platform_device *pdev = to_platform_device(dev);
219 struct usb_hcd *hcd = dev_get_drvdata(dev);
220
221 usb_hcd_lh7a404_remove(hcd, pdev);
222 return 0;
223}
224 /*TBD*/
225/*static int ohci_hcd_lh7a404_drv_suspend(struct device *dev)
226{
227 struct platform_device *pdev = to_platform_device(dev);
228 struct usb_hcd *hcd = dev_get_drvdata(dev);
229
230 return 0;
231}
232static int ohci_hcd_lh7a404_drv_resume(struct device *dev)
233{
234 struct platform_device *pdev = to_platform_device(dev);
235 struct usb_hcd *hcd = dev_get_drvdata(dev);
236
237
238 return 0;
239}
240*/
241
242static struct device_driver ohci_hcd_lh7a404_driver = {
243 .name = "lh7a404-ohci",
244 .bus = &platform_bus_type,
245 .probe = ohci_hcd_lh7a404_drv_probe,
246 .remove = ohci_hcd_lh7a404_drv_remove,
247 /*.suspend = ohci_hcd_lh7a404_drv_suspend, */
248 /*.resume = ohci_hcd_lh7a404_drv_resume, */
249};
250
251static int __init ohci_hcd_lh7a404_init (void)
252{
253 pr_debug (DRIVER_INFO " (LH7A404)");
254 pr_debug ("block sizes: ed %d td %d\n",
255 sizeof (struct ed), sizeof (struct td));
256
257 return driver_register(&ohci_hcd_lh7a404_driver);
258}
259
260static void __exit ohci_hcd_lh7a404_cleanup (void)
261{
262 driver_unregister(&ohci_hcd_lh7a404_driver);
263}
264
265module_init (ohci_hcd_lh7a404_init);
266module_exit (ohci_hcd_lh7a404_cleanup);
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
new file mode 100644
index 000000000000..e55682b4919d
--- /dev/null
+++ b/drivers/usb/host/ohci-mem.c
@@ -0,0 +1,139 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under the GPL.
8 */
9
10/*-------------------------------------------------------------------------*/
11
12/*
13 * There's basically three types of memory:
14 * - data used only by the HCD ... kmalloc is fine
15 * - async and periodic schedules, shared by HC and HCD ... these
16 * need to use dma_pool or dma_alloc_coherent
17 * - driver buffers, read/written by HC ... the hcd glue or the
18 * device driver provides us with dma addresses
19 *
20 * There's also PCI "register" data, which is memory mapped.
21 * No memory seen by this driver is pagable.
22 */
23
24/*-------------------------------------------------------------------------*/
25
26static void ohci_hcd_init (struct ohci_hcd *ohci)
27{
28 ohci->next_statechange = jiffies;
29 spin_lock_init (&ohci->lock);
30 INIT_LIST_HEAD (&ohci->pending);
31 INIT_WORK (&ohci->rh_resume, ohci_rh_resume, ohci_to_hcd(ohci));
32}
33
34/*-------------------------------------------------------------------------*/
35
36static int ohci_mem_init (struct ohci_hcd *ohci)
37{
38 ohci->td_cache = dma_pool_create ("ohci_td",
39 ohci_to_hcd(ohci)->self.controller,
40 sizeof (struct td),
41 32 /* byte alignment */,
42 0 /* no page-crossing issues */);
43 if (!ohci->td_cache)
44 return -ENOMEM;
45 ohci->ed_cache = dma_pool_create ("ohci_ed",
46 ohci_to_hcd(ohci)->self.controller,
47 sizeof (struct ed),
48 16 /* byte alignment */,
49 0 /* no page-crossing issues */);
50 if (!ohci->ed_cache) {
51 dma_pool_destroy (ohci->td_cache);
52 return -ENOMEM;
53 }
54 return 0;
55}
56
57static void ohci_mem_cleanup (struct ohci_hcd *ohci)
58{
59 if (ohci->td_cache) {
60 dma_pool_destroy (ohci->td_cache);
61 ohci->td_cache = NULL;
62 }
63 if (ohci->ed_cache) {
64 dma_pool_destroy (ohci->ed_cache);
65 ohci->ed_cache = NULL;
66 }
67}
68
69/*-------------------------------------------------------------------------*/
70
71/* ohci "done list" processing needs this mapping */
72static inline struct td *
73dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
74{
75 struct td *td;
76
77 td_dma &= TD_MASK;
78 td = hc->td_hash [TD_HASH_FUNC(td_dma)];
79 while (td && td->td_dma != td_dma)
80 td = td->td_hash;
81 return td;
82}
83
84/* TDs ... */
85static struct td *
86td_alloc (struct ohci_hcd *hc, int mem_flags)
87{
88 dma_addr_t dma;
89 struct td *td;
90
91 td = dma_pool_alloc (hc->td_cache, mem_flags, &dma);
92 if (td) {
93 /* in case hc fetches it, make it look dead */
94 memset (td, 0, sizeof *td);
95 td->hwNextTD = cpu_to_hc32 (hc, dma);
96 td->td_dma = dma;
97 /* hashed in td_fill */
98 }
99 return td;
100}
101
102static void
103td_free (struct ohci_hcd *hc, struct td *td)
104{
105 struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)];
106
107 while (*prev && *prev != td)
108 prev = &(*prev)->td_hash;
109 if (*prev)
110 *prev = td->td_hash;
111 else if ((td->hwINFO & cpu_to_hc32(hc, TD_DONE)) != 0)
112 ohci_dbg (hc, "no hash for td %p\n", td);
113 dma_pool_free (hc->td_cache, td, td->td_dma);
114}
115
116/*-------------------------------------------------------------------------*/
117
118/* EDs ... */
119static struct ed *
120ed_alloc (struct ohci_hcd *hc, int mem_flags)
121{
122 dma_addr_t dma;
123 struct ed *ed;
124
125 ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma);
126 if (ed) {
127 memset (ed, 0, sizeof (*ed));
128 INIT_LIST_HEAD (&ed->td_list);
129 ed->dma = dma;
130 }
131 return ed;
132}
133
134static void
135ed_free (struct ohci_hcd *hc, struct ed *ed)
136{
137 dma_pool_free (hc->ed_cache, ed, ed->dma);
138}
139
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
new file mode 100644
index 000000000000..90285f180f87
--- /dev/null
+++ b/drivers/usb/host/ohci-omap.c
@@ -0,0 +1,560 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2005 David Brownell
6 * (C) Copyright 2002 Hewlett-Packard Company
7 *
8 * OMAP Bus Glue
9 *
10 * Modified for OMAP by Tony Lindgren <tony@atomide.com>
11 * Based on the 2.4 OMAP OHCI driver originally done by MontaVista Software Inc.
12 * and on ohci-sa1111.c by Christopher Hoover <ch@hpl.hp.com>
13 *
14 * This file is licenced under the GPL.
15 */
16
17#include <asm/hardware.h>
18#include <asm/io.h>
19#include <asm/mach-types.h>
20
21#include <asm/arch/hardware.h>
22#include <asm/arch/mux.h>
23#include <asm/arch/irqs.h>
24#include <asm/arch/gpio.h>
25#include <asm/arch/fpga.h>
26#include <asm/arch/usb.h>
27#include <asm/hardware/clock.h>
28
29
30/* OMAP-1510 OHCI has its own MMU for DMA */
31#define OMAP1510_LB_MEMSIZE 32 /* Should be same as SDRAM size */
32#define OMAP1510_LB_CLOCK_DIV 0xfffec10c
33#define OMAP1510_LB_MMU_CTL 0xfffec208
34#define OMAP1510_LB_MMU_LCK 0xfffec224
35#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
36#define OMAP1510_LB_MMU_CAM_H 0xfffec22c
37#define OMAP1510_LB_MMU_CAM_L 0xfffec230
38#define OMAP1510_LB_MMU_RAM_H 0xfffec234
39#define OMAP1510_LB_MMU_RAM_L 0xfffec238
40
41
42#ifndef CONFIG_ARCH_OMAP
43#error "This file is OMAP bus glue. CONFIG_OMAP must be defined."
44#endif
45
46#ifdef CONFIG_TPS65010
47#include <asm/arch/tps65010.h>
48#else
49
50#define LOW 0
51#define HIGH 1
52
53#define GPIO1 1
54
55static inline int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
56{
57 return 0;
58}
59
60#endif
61
62extern int usb_disabled(void);
63extern int ocpi_enable(void);
64
65static struct clk *usb_host_ck;
66
67static void omap_ohci_clock_power(int on)
68{
69 if (on) {
70 clk_enable(usb_host_ck);
71 /* guesstimate for T5 == 1x 32K clock + APLL lock time */
72 udelay(100);
73 } else {
74 clk_disable(usb_host_ck);
75 }
76}
77
78/*
79 * Board specific gang-switched transceiver power on/off.
80 * NOTE: OSK supplies power from DC, not battery.
81 */
82static int omap_ohci_transceiver_power(int on)
83{
84 if (on) {
85 if (machine_is_omap_innovator() && cpu_is_omap1510())
86 fpga_write(fpga_read(INNOVATOR_FPGA_CAM_USB_CONTROL)
87 | ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
88 INNOVATOR_FPGA_CAM_USB_CONTROL);
89 else if (machine_is_omap_osk())
90 tps65010_set_gpio_out_value(GPIO1, LOW);
91 } else {
92 if (machine_is_omap_innovator() && cpu_is_omap1510())
93 fpga_write(fpga_read(INNOVATOR_FPGA_CAM_USB_CONTROL)
94 & ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
95 INNOVATOR_FPGA_CAM_USB_CONTROL);
96 else if (machine_is_omap_osk())
97 tps65010_set_gpio_out_value(GPIO1, HIGH);
98 }
99
100 return 0;
101}
102
103/*
104 * OMAP-1510 specific Local Bus clock on/off
105 */
106static int omap_1510_local_bus_power(int on)
107{
108 if (on) {
109 omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
110 udelay(200);
111 } else {
112 omap_writel(0, OMAP1510_LB_MMU_CTL);
113 }
114
115 return 0;
116}
117
118/*
119 * OMAP-1510 specific Local Bus initialization
120 * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
121 * See also arch/mach-omap/memory.h for __virt_to_dma() and
122 * __dma_to_virt() which need to match with the physical
123 * Local Bus address below.
124 */
125static int omap_1510_local_bus_init(void)
126{
127 unsigned int tlb;
128 unsigned long lbaddr, physaddr;
129
130 omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
131 OMAP1510_LB_CLOCK_DIV);
132
133 /* Configure the Local Bus MMU table */
134 for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
135 lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
136 physaddr = tlb * 0x00100000 + PHYS_OFFSET;
137 omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
138 omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
139 OMAP1510_LB_MMU_CAM_L);
140 omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
141 omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
142 omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
143 omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
144 }
145
146 /* Enable the walking table */
147 omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
148 udelay(200);
149
150 return 0;
151}
152
153#ifdef CONFIG_USB_OTG
154
155static void start_hnp(struct ohci_hcd *ohci)
156{
157 const unsigned port = ohci_to_hcd(ohci)->self.otg_port - 1;
158 unsigned long flags;
159
160 otg_start_hnp(ohci->transceiver);
161
162 local_irq_save(flags);
163 ohci->transceiver->state = OTG_STATE_A_SUSPEND;
164 writel (RH_PS_PSS, &ohci->regs->roothub.portstatus [port]);
165 OTG_CTRL_REG &= ~OTG_A_BUSREQ;
166 local_irq_restore(flags);
167}
168
169#endif
170
171/*-------------------------------------------------------------------------*/
172
173static int omap_start_hc(struct ohci_hcd *ohci, struct platform_device *pdev)
174{
175 struct omap_usb_config *config = pdev->dev.platform_data;
176 int need_transceiver = (config->otg != 0);
177 int ret;
178
179 dev_dbg(&pdev->dev, "starting USB Controller\n");
180
181 if (config->otg) {
182 ohci_to_hcd(ohci)->self.otg_port = config->otg;
183 /* default/minimum OTG power budget: 8 mA */
184 ohci->power_budget = 8;
185 }
186
187 /* boards can use OTG transceivers in non-OTG modes */
188 need_transceiver = need_transceiver
189 || machine_is_omap_h2() || machine_is_omap_h3();
190
191 if (cpu_is_omap16xx())
192 ocpi_enable();
193
194#ifdef CONFIG_ARCH_OMAP_OTG
195 if (need_transceiver) {
196 ohci->transceiver = otg_get_transceiver();
197 if (ohci->transceiver) {
198 int status = otg_set_host(ohci->transceiver,
199 &ohci_to_hcd(ohci)->self);
200 dev_dbg(&pdev->dev, "init %s transceiver, status %d\n",
201 ohci->transceiver->label, status);
202 if (status) {
203 if (ohci->transceiver)
204 put_device(ohci->transceiver->dev);
205 return status;
206 }
207 } else {
208 dev_err(&pdev->dev, "can't find transceiver\n");
209 return -ENODEV;
210 }
211 }
212#endif
213
214 omap_ohci_clock_power(1);
215
216 if (cpu_is_omap1510()) {
217 omap_1510_local_bus_power(1);
218 omap_1510_local_bus_init();
219 }
220
221 if ((ret = ohci_init(ohci)) < 0)
222 return ret;
223
224 /* board-specific power switching and overcurrent support */
225 if (machine_is_omap_osk() || machine_is_omap_innovator()) {
226 u32 rh = roothub_a (ohci);
227
228 /* power switching (ganged by default) */
229 rh &= ~RH_A_NPS;
230
231 /* TPS2045 switch for internal transceiver (port 1) */
232 if (machine_is_omap_osk()) {
233 ohci->power_budget = 250;
234
235 rh &= ~RH_A_NOCP;
236
237 /* gpio9 for overcurrent detction */
238 omap_cfg_reg(W8_1610_GPIO9);
239 omap_request_gpio(9);
240 omap_set_gpio_direction(9, 1 /* IN */);
241
242 /* for paranoia's sake: disable USB.PUEN */
243 omap_cfg_reg(W4_USB_HIGHZ);
244 }
245 ohci_writel(ohci, rh, &ohci->regs->roothub.a);
246 distrust_firmware = 0;
247 }
248
249 /* FIXME khubd hub requests should manage power switching */
250 omap_ohci_transceiver_power(1);
251
252 /* board init will have already handled HMC and mux setup.
253 * any external transceiver should already be initialized
254 * too, so all configured ports use the right signaling now.
255 */
256
257 return 0;
258}
259
260static void omap_stop_hc(struct platform_device *pdev)
261{
262 dev_dbg(&pdev->dev, "stopping USB Controller\n");
263 omap_ohci_clock_power(0);
264}
265
266
267/*-------------------------------------------------------------------------*/
268
269void usb_hcd_omap_remove (struct usb_hcd *, struct platform_device *);
270
271/* configure so an HC device and id are always provided */
272/* always called with process context; sleeping is OK */
273
274
275/**
276 * usb_hcd_omap_probe - initialize OMAP-based HCDs
277 * Context: !in_interrupt()
278 *
279 * Allocates basic resources for this USB host controller, and
280 * then invokes the start() method for the HCD associated with it
281 * through the hotplug entry's driver_data.
282 */
283int usb_hcd_omap_probe (const struct hc_driver *driver,
284 struct platform_device *pdev)
285{
286 int retval;
287 struct usb_hcd *hcd = 0;
288 struct ohci_hcd *ohci;
289
290 if (pdev->num_resources != 2) {
291 printk(KERN_ERR "hcd probe: invalid num_resources: %i\n",
292 pdev->num_resources);
293 return -ENODEV;
294 }
295
296 if (pdev->resource[0].flags != IORESOURCE_MEM
297 || pdev->resource[1].flags != IORESOURCE_IRQ) {
298 printk(KERN_ERR "hcd probe: invalid resource type\n");
299 return -ENODEV;
300 }
301
302 usb_host_ck = clk_get(0, "usb_hhc_ck");
303 if (IS_ERR(usb_host_ck))
304 return PTR_ERR(usb_host_ck);
305
306 hcd = usb_create_hcd (driver, &pdev->dev, pdev->dev.bus_id);
307 if (!hcd) {
308 retval = -ENOMEM;
309 goto err0;
310 }
311 hcd->rsrc_start = pdev->resource[0].start;
312 hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
313
314 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
315 dev_dbg(&pdev->dev, "request_mem_region failed\n");
316 retval = -EBUSY;
317 goto err1;
318 }
319
320 hcd->regs = (void __iomem *) (int) IO_ADDRESS(hcd->rsrc_start);
321
322 ohci = hcd_to_ohci(hcd);
323 ohci_hcd_init(ohci);
324
325 retval = omap_start_hc(ohci, pdev);
326 if (retval < 0)
327 goto err2;
328
329 retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), SA_INTERRUPT);
330 if (retval == 0)
331 return retval;
332
333 omap_stop_hc(pdev);
334err2:
335 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
336err1:
337 usb_put_hcd(hcd);
338err0:
339 clk_put(usb_host_ck);
340 return retval;
341}
342
343
344/* may be called with controller, bus, and devices active */
345
346/**
347 * usb_hcd_omap_remove - shutdown processing for OMAP-based HCDs
348 * @dev: USB Host Controller being removed
349 * Context: !in_interrupt()
350 *
351 * Reverses the effect of usb_hcd_omap_probe(), first invoking
352 * the HCD's stop() method. It is always called from a thread
353 * context, normally "rmmod", "apmd", or something similar.
354 *
355 */
356void usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
357{
358 usb_remove_hcd(hcd);
359 if (machine_is_omap_osk())
360 omap_free_gpio(9);
361 omap_stop_hc(pdev);
362 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
363 usb_put_hcd(hcd);
364 clk_put(usb_host_ck);
365}
366
367/*-------------------------------------------------------------------------*/
368
369static int __devinit
370ohci_omap_start (struct usb_hcd *hcd)
371{
372 struct omap_usb_config *config;
373 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
374 int ret;
375
376 config = hcd->self.controller->platform_data;
377 if (config->otg || config->rwc)
378 writel(OHCI_CTRL_RWC, &ohci->regs->control);
379
380 if ((ret = ohci_run (ohci)) < 0) {
381 dev_err(hcd->self.controller, "can't start\n");
382 ohci_stop (hcd);
383 return ret;
384 }
385 return 0;
386}
387
388/*-------------------------------------------------------------------------*/
389
390static const struct hc_driver ohci_omap_hc_driver = {
391 .description = hcd_name,
392 .product_desc = "OMAP OHCI",
393 .hcd_priv_size = sizeof(struct ohci_hcd),
394
395 /*
396 * generic hardware linkage
397 */
398 .irq = ohci_irq,
399 .flags = HCD_USB11 | HCD_MEMORY,
400
401 /*
402 * basic lifecycle operations
403 */
404 .start = ohci_omap_start,
405 .stop = ohci_stop,
406
407 /*
408 * managing i/o requests and associated device resources
409 */
410 .urb_enqueue = ohci_urb_enqueue,
411 .urb_dequeue = ohci_urb_dequeue,
412 .endpoint_disable = ohci_endpoint_disable,
413
414 /*
415 * scheduling support
416 */
417 .get_frame_number = ohci_get_frame,
418
419 /*
420 * root hub support
421 */
422 .hub_status_data = ohci_hub_status_data,
423 .hub_control = ohci_hub_control,
424#ifdef CONFIG_USB_SUSPEND
425 .hub_suspend = ohci_hub_suspend,
426 .hub_resume = ohci_hub_resume,
427#endif
428 .start_port_reset = ohci_start_port_reset,
429};
430
431/*-------------------------------------------------------------------------*/
432
433static int ohci_hcd_omap_drv_probe(struct device *dev)
434{
435 return usb_hcd_omap_probe(&ohci_omap_hc_driver,
436 to_platform_device(dev));
437}
438
439static int ohci_hcd_omap_drv_remove(struct device *dev)
440{
441 struct platform_device *pdev = to_platform_device(dev);
442 struct usb_hcd *hcd = dev_get_drvdata(dev);
443 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
444
445 usb_hcd_omap_remove(hcd, pdev);
446 if (ohci->transceiver) {
447 (void) otg_set_host(ohci->transceiver, 0);
448 put_device(ohci->transceiver->dev);
449 }
450 dev_set_drvdata(dev, NULL);
451
452 return 0;
453}
454
455/*-------------------------------------------------------------------------*/
456
457#ifdef CONFIG_PM
458
459/* states match PCI usage, always suspending the root hub except that
460 * 4 ~= D3cold (ACPI D3) with clock off (resume sees reset).
461 */
462
463static int ohci_omap_suspend(struct device *dev, u32 state, u32 level)
464{
465 struct ohci_hcd *ohci = hcd_to_ohci(dev_get_drvdata(dev));
466 int status = -EINVAL;
467
468 if (level != SUSPEND_POWER_DOWN)
469 return 0;
470 if (state <= dev->power.power_state)
471 return 0;
472
473 dev_dbg(dev, "suspend to %d\n", state);
474 down(&ohci_to_hcd(ohci)->self.root_hub->serialize);
475 status = ohci_hub_suspend(ohci_to_hcd(ohci));
476 if (status == 0) {
477 if (state >= 4) {
478 omap_ohci_clock_power(0);
479 ohci_to_hcd(ohci)->self.root_hub->state =
480 USB_STATE_SUSPENDED;
481 state = 4;
482 }
483 ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
484 dev->power.power_state = state;
485 }
486 up(&ohci_to_hcd(ohci)->self.root_hub->serialize);
487 return status;
488}
489
490static int ohci_omap_resume(struct device *dev, u32 level)
491{
492 struct ohci_hcd *ohci = hcd_to_ohci(dev_get_drvdata(dev));
493 int status = 0;
494
495 if (level != RESUME_POWER_ON)
496 return 0;
497
498 switch (dev->power.power_state) {
499 case 0:
500 break;
501 case 4:
502 if (time_before(jiffies, ohci->next_statechange))
503 msleep(5);
504 ohci->next_statechange = jiffies;
505 omap_ohci_clock_power(1);
506 /* FALLTHROUGH */
507 default:
508 dev_dbg(dev, "resume from %d\n", dev->power.power_state);
509#ifdef CONFIG_USB_SUSPEND
510 /* get extra cleanup even if remote wakeup isn't in use */
511 status = usb_resume_device(ohci_to_hcd(ohci)->self.root_hub);
512#else
513 down(&ohci_to_hcd(ohci)->self.root_hub->serialize);
514 status = ohci_hub_resume(ohci_to_hcd(ohci));
515 up(&ohci_to_hcd(ohci)->self.root_hub->serialize);
516#endif
517 if (status == 0)
518 dev->power.power_state = 0;
519 break;
520 }
521 return status;
522}
523
524#endif
525
526/*-------------------------------------------------------------------------*/
527
528/*
529 * Driver definition to register with the OMAP bus
530 */
531static struct device_driver ohci_hcd_omap_driver = {
532 .name = "ohci",
533 .bus = &platform_bus_type,
534 .probe = ohci_hcd_omap_drv_probe,
535 .remove = ohci_hcd_omap_drv_remove,
536#ifdef CONFIG_PM
537 .suspend = ohci_omap_suspend,
538 .resume = ohci_omap_resume,
539#endif
540};
541
542static int __init ohci_hcd_omap_init (void)
543{
544 printk (KERN_DEBUG "%s: " DRIVER_INFO " (OMAP)\n", hcd_name);
545 if (usb_disabled())
546 return -ENODEV;
547
548 pr_debug("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
549 sizeof (struct ed), sizeof (struct td));
550
551 return driver_register(&ohci_hcd_omap_driver);
552}
553
554static void __exit ohci_hcd_omap_cleanup (void)
555{
556 driver_unregister(&ohci_hcd_omap_driver);
557}
558
559module_init (ohci_hcd_omap_init);
560module_exit (ohci_hcd_omap_cleanup);
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
new file mode 100644
index 000000000000..b611582e6bcf
--- /dev/null
+++ b/drivers/usb/host/ohci-pci.c
@@ -0,0 +1,264 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * [ Initialisation is based on Linus' ]
8 * [ uhci code and gregs ohci fragments ]
9 * [ (C) Copyright 1999 Linus Torvalds ]
10 * [ (C) Copyright 1999 Gregory P. Smith]
11 *
12 * PCI Bus Glue
13 *
14 * This file is licenced under the GPL.
15 */
16
17#ifdef CONFIG_PMAC_PBOOK
18#include <asm/machdep.h>
19#include <asm/pmac_feature.h>
20#include <asm/pci-bridge.h>
21#include <asm/prom.h>
22#ifndef CONFIG_PM
23# define CONFIG_PM
24#endif
25#endif
26
27#ifndef CONFIG_PCI
28#error "This file is PCI bus glue. CONFIG_PCI must be defined."
29#endif
30
31/*-------------------------------------------------------------------------*/
32
33static int
34ohci_pci_reset (struct usb_hcd *hcd)
35{
36 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
37
38 ohci_hcd_init (ohci);
39 return ohci_init (ohci);
40}
41
42static int __devinit
43ohci_pci_start (struct usb_hcd *hcd)
44{
45 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
46 int ret;
47
48 if(hcd->self.controller && hcd->self.controller->bus == &pci_bus_type) {
49 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
50
51 /* AMD 756, for most chips (early revs), corrupts register
52 * values on read ... so enable the vendor workaround.
53 */
54 if (pdev->vendor == PCI_VENDOR_ID_AMD
55 && pdev->device == 0x740c) {
56 ohci->flags = OHCI_QUIRK_AMD756;
57 ohci_info (ohci, "AMD756 erratum 4 workaround\n");
58 // also somewhat erratum 10 (suspend/resume issues)
59 }
60
61 /* FIXME for some of the early AMD 760 southbridges, OHCI
62 * won't work at all. blacklist them.
63 */
64
65 /* Apple's OHCI driver has a lot of bizarre workarounds
66 * for this chip. Evidently control and bulk lists
67 * can get confused. (B&W G3 models, and ...)
68 */
69 else if (pdev->vendor == PCI_VENDOR_ID_OPTI
70 && pdev->device == 0xc861) {
71 ohci_info (ohci,
72 "WARNING: OPTi workarounds unavailable\n");
73 }
74
75 /* Check for NSC87560. We have to look at the bridge (fn1) to
76 * identify the USB (fn2). This quirk might apply to more or
77 * even all NSC stuff.
78 */
79 else if (pdev->vendor == PCI_VENDOR_ID_NS) {
80 struct pci_dev *b;
81
82 b = pci_find_slot (pdev->bus->number,
83 PCI_DEVFN (PCI_SLOT (pdev->devfn), 1));
84 if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO
85 && b->vendor == PCI_VENDOR_ID_NS) {
86 ohci->flags |= OHCI_QUIRK_SUPERIO;
87 ohci_info (ohci, "Using NSC SuperIO setup\n");
88 }
89 }
90 }
91
92 /* NOTE: there may have already been a first reset, to
93 * keep bios/smm irqs from making trouble
94 */
95 if ((ret = ohci_run (ohci)) < 0) {
96 ohci_err (ohci, "can't start\n");
97 ohci_stop (hcd);
98 return ret;
99 }
100 return 0;
101}
102
103#ifdef CONFIG_PM
104
105static int ohci_pci_suspend (struct usb_hcd *hcd, u32 state)
106{
107 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
108
109 /* suspend root hub, hoping it keeps power during suspend */
110 if (time_before (jiffies, ohci->next_statechange))
111 msleep (100);
112
113#ifdef CONFIG_USB_SUSPEND
114 (void) usb_suspend_device (hcd->self.root_hub, state);
115#else
116 usb_lock_device (hcd->self.root_hub);
117 (void) ohci_hub_suspend (hcd);
118 usb_unlock_device (hcd->self.root_hub);
119#endif
120
121 /* let things settle down a bit */
122 msleep (100);
123
124#ifdef CONFIG_PMAC_PBOOK
125 if (_machine == _MACH_Pmac) {
126 struct device_node *of_node;
127
128 /* Disable USB PAD & cell clock */
129 of_node = pci_device_to_OF_node (to_pci_dev(hcd->self.controller));
130 if (of_node)
131 pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0);
132 }
133#endif /* CONFIG_PMAC_PBOOK */
134 return 0;
135}
136
137
138static int ohci_pci_resume (struct usb_hcd *hcd)
139{
140 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
141 int retval = 0;
142
143#ifdef CONFIG_PMAC_PBOOK
144 if (_machine == _MACH_Pmac) {
145 struct device_node *of_node;
146
147 /* Re-enable USB PAD & cell clock */
148 of_node = pci_device_to_OF_node (to_pci_dev(hcd->self.controller));
149 if (of_node)
150 pmac_call_feature (PMAC_FTR_USB_ENABLE, of_node, 0, 1);
151 }
152#endif /* CONFIG_PMAC_PBOOK */
153
154 /* resume root hub */
155 if (time_before (jiffies, ohci->next_statechange))
156 msleep (100);
157#ifdef CONFIG_USB_SUSPEND
158 /* get extra cleanup even if remote wakeup isn't in use */
159 retval = usb_resume_device (hcd->self.root_hub);
160#else
161 usb_lock_device (hcd->self.root_hub);
162 retval = ohci_hub_resume (hcd);
163 usb_unlock_device (hcd->self.root_hub);
164#endif
165
166 return retval;
167}
168
169#endif /* CONFIG_PM */
170
171
172/*-------------------------------------------------------------------------*/
173
174static const struct hc_driver ohci_pci_hc_driver = {
175 .description = hcd_name,
176 .product_desc = "OHCI Host Controller",
177 .hcd_priv_size = sizeof(struct ohci_hcd),
178
179 /*
180 * generic hardware linkage
181 */
182 .irq = ohci_irq,
183 .flags = HCD_MEMORY | HCD_USB11,
184
185 /*
186 * basic lifecycle operations
187 */
188 .reset = ohci_pci_reset,
189 .start = ohci_pci_start,
190#ifdef CONFIG_PM
191 .suspend = ohci_pci_suspend,
192 .resume = ohci_pci_resume,
193#endif
194 .stop = ohci_stop,
195
196 /*
197 * managing i/o requests and associated device resources
198 */
199 .urb_enqueue = ohci_urb_enqueue,
200 .urb_dequeue = ohci_urb_dequeue,
201 .endpoint_disable = ohci_endpoint_disable,
202
203 /*
204 * scheduling support
205 */
206 .get_frame_number = ohci_get_frame,
207
208 /*
209 * root hub support
210 */
211 .hub_status_data = ohci_hub_status_data,
212 .hub_control = ohci_hub_control,
213#ifdef CONFIG_USB_SUSPEND
214 .hub_suspend = ohci_hub_suspend,
215 .hub_resume = ohci_hub_resume,
216#endif
217 .start_port_reset = ohci_start_port_reset,
218};
219
220/*-------------------------------------------------------------------------*/
221
222
223static const struct pci_device_id pci_ids [] = { {
224 /* handle any USB OHCI controller */
225 PCI_DEVICE_CLASS((PCI_CLASS_SERIAL_USB << 8) | 0x10, ~0),
226 .driver_data = (unsigned long) &ohci_pci_hc_driver,
227 }, { /* end: all zeroes */ }
228};
229MODULE_DEVICE_TABLE (pci, pci_ids);
230
231/* pci driver glue; this is a "new style" PCI driver module */
232static struct pci_driver ohci_pci_driver = {
233 .name = (char *) hcd_name,
234 .id_table = pci_ids,
235
236 .probe = usb_hcd_pci_probe,
237 .remove = usb_hcd_pci_remove,
238
239#ifdef CONFIG_PM
240 .suspend = usb_hcd_pci_suspend,
241 .resume = usb_hcd_pci_resume,
242#endif
243};
244
245
246static int __init ohci_hcd_pci_init (void)
247{
248 printk (KERN_DEBUG "%s: " DRIVER_INFO " (PCI)\n", hcd_name);
249 if (usb_disabled())
250 return -ENODEV;
251
252 pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
253 sizeof (struct ed), sizeof (struct td));
254 return pci_register_driver (&ohci_pci_driver);
255}
256module_init (ohci_hcd_pci_init);
257
258/*-------------------------------------------------------------------------*/
259
260static void __exit ohci_hcd_pci_cleanup (void)
261{
262 pci_unregister_driver (&ohci_pci_driver);
263}
264module_exit (ohci_hcd_pci_cleanup);
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
new file mode 100644
index 000000000000..17964c39d06a
--- /dev/null
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -0,0 +1,234 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 * (C) Copyright 2002 Hewlett-Packard Company
7 * (C) Copyright 2003-2005 MontaVista Software Inc.
8 *
9 * Bus Glue for PPC On-Chip OHCI driver
10 * Tested on Freescale MPC5200 and IBM STB04xxx
11 *
12 * Modified by Dale Farnsworth <dale@farnsworth.org> from ohci-sa1111.c
13 *
14 * This file is licenced under the GPL.
15 */
16
17#include <asm/usb.h>
18
19/* configure so an HC device and id are always provided */
20/* always called with process context; sleeping is OK */
21
22/**
23 * usb_hcd_ppc_soc_probe - initialize On-Chip HCDs
24 * Context: !in_interrupt()
25 *
26 * Allocates basic resources for this USB host controller, and
27 * then invokes the start() method for the HCD associated with it
28 * through the hotplug entry's driver_data.
29 *
30 * Store this function in the HCD's struct pci_driver as probe().
31 */
32static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
33 struct platform_device *pdev)
34{
35 int retval;
36 struct usb_hcd *hcd;
37 struct ohci_hcd *ohci;
38 struct resource *res;
39 int irq;
40 struct usb_hcd_platform_data *pd = pdev->dev.platform_data;
41
42 pr_debug("initializing PPC-SOC USB Controller\n");
43
44 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
45 if (!res) {
46 pr_debug(__FILE__ ": no irq\n");
47 return -ENODEV;
48 }
49 irq = res->start;
50
51 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
52 if (!res) {
53 pr_debug(__FILE__ ": no reg addr\n");
54 return -ENODEV;
55 }
56
57 hcd = usb_create_hcd(driver, &pdev->dev, "PPC-SOC USB");
58 if (!hcd)
59 return -ENOMEM;
60 hcd->rsrc_start = res->start;
61 hcd->rsrc_len = res->end - res->start + 1;
62
63 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
64 pr_debug(__FILE__ ": request_mem_region failed\n");
65 retval = -EBUSY;
66 goto err1;
67 }
68
69 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
70 if (!hcd->regs) {
71 pr_debug(__FILE__ ": ioremap failed\n");
72 retval = -ENOMEM;
73 goto err2;
74 }
75
76 if (pd->start && (retval = pd->start(pdev)))
77 goto err3;
78
79 ohci = hcd_to_ohci(hcd);
80 ohci->flags |= OHCI_BIG_ENDIAN;
81 ohci_hcd_init(ohci);
82
83 retval = usb_add_hcd(hcd, irq, SA_INTERRUPT);
84 if (retval == 0)
85 return retval;
86
87 pr_debug("Removing PPC-SOC USB Controller\n");
88 if (pd && pd->stop)
89 pd->stop(pdev);
90 err3:
91 iounmap(hcd->regs);
92 err2:
93 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
94 err1:
95 usb_put_hcd(hcd);
96 return retval;
97}
98
99
100/* may be called without controller electrically present */
101/* may be called with controller, bus, and devices active */
102
103/**
104 * usb_hcd_ppc_soc_remove - shutdown processing for On-Chip HCDs
105 * @pdev: USB Host Controller being removed
106 * Context: !in_interrupt()
107 *
108 * Reverses the effect of usb_hcd_ppc_soc_probe(), first invoking
109 * the HCD's stop() method. It is always called from a thread
110 * context, normally "rmmod", "apmd", or something similar.
111 *
112 */
113static void usb_hcd_ppc_soc_remove(struct usb_hcd *hcd,
114 struct platform_device *pdev)
115{
116 struct usb_hcd_platform_data *pd = pdev->dev.platform_data;
117
118 usb_remove_hcd(hcd);
119
120 pr_debug("stopping PPC-SOC USB Controller\n");
121 if (pd && pd->stop)
122 pd->stop(pdev);
123
124 iounmap(hcd->regs);
125 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
126 usb_hcd_put(hcd);
127}
128
129static int __devinit
130ohci_ppc_soc_start(struct usb_hcd *hcd)
131{
132 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
133 int ret;
134
135 if ((ret = ohci_init(ohci)) < 0)
136 return ret;
137
138 if ((ret = ohci_run(ohci)) < 0) {
139 err("can't start %s", ohci_to_hcd(ohci)->self.bus_name);
140 ohci_stop(hcd);
141 return ret;
142 }
143
144 return 0;
145}
146
147static const struct hc_driver ohci_ppc_soc_hc_driver = {
148 .description = hcd_name,
149 .hcd_priv_size = sizeof(struct ohci_hcd),
150
151 /*
152 * generic hardware linkage
153 */
154 .irq = ohci_irq,
155 .flags = HCD_USB11 | HCD_MEMORY,
156
157 /*
158 * basic lifecycle operations
159 */
160 .start = ohci_ppc_soc_start,
161 .stop = ohci_stop,
162
163 /*
164 * managing i/o requests and associated device resources
165 */
166 .urb_enqueue = ohci_urb_enqueue,
167 .urb_dequeue = ohci_urb_dequeue,
168 .endpoint_disable = ohci_endpoint_disable,
169
170 /*
171 * scheduling support
172 */
173 .get_frame_number = ohci_get_frame,
174
175 /*
176 * root hub support
177 */
178 .hub_status_data = ohci_hub_status_data,
179 .hub_control = ohci_hub_control,
180#ifdef CONFIG_USB_SUSPEND
181 .hub_suspend = ohci_hub_suspend,
182 .hub_resume = ohci_hub_resume,
183#endif
184 .start_port_reset = ohci_start_port_reset,
185};
186
187static int ohci_hcd_ppc_soc_drv_probe(struct device *dev)
188{
189 struct platform_device *pdev = to_platform_device(dev);
190 int ret;
191
192 if (usb_disabled())
193 return -ENODEV;
194
195 ret = usb_hcd_ppc_soc_probe(&ohci_ppc_soc_hc_driver, pdev);
196 return ret;
197}
198
199static int ohci_hcd_ppc_soc_drv_remove(struct device *dev)
200{
201 struct platform_device *pdev = to_platform_device(dev);
202 struct usb_hcd *hcd = dev_get_drvdata(dev);
203
204 usb_hcd_ppc_soc_remove(hcd, pdev);
205 return 0;
206}
207
208static struct device_driver ohci_hcd_ppc_soc_driver = {
209 .name = "ppc-soc-ohci",
210 .bus = &platform_bus_type,
211 .probe = ohci_hcd_ppc_soc_drv_probe,
212 .remove = ohci_hcd_ppc_soc_drv_remove,
213#if defined(CONFIG_USB_SUSPEND) || defined(CONFIG_PM)
214 /*.suspend = ohci_hcd_ppc_soc_drv_suspend,*/
215 /*.resume = ohci_hcd_ppc_soc_drv_resume,*/
216#endif
217};
218
219static int __init ohci_hcd_ppc_soc_init(void)
220{
221 pr_debug(DRIVER_INFO " (PPC SOC)\n");
222 pr_debug("block sizes: ed %d td %d\n", sizeof(struct ed),
223 sizeof(struct td));
224
225 return driver_register(&ohci_hcd_ppc_soc_driver);
226}
227
228static void __exit ohci_hcd_ppc_soc_cleanup(void)
229{
230 driver_unregister(&ohci_hcd_ppc_soc_driver);
231}
232
233module_init(ohci_hcd_ppc_soc_init);
234module_exit(ohci_hcd_ppc_soc_cleanup);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
new file mode 100644
index 000000000000..6f3464a95779
--- /dev/null
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -0,0 +1,383 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 * (C) Copyright 2002 Hewlett-Packard Company
7 *
8 * Bus Glue for pxa27x
9 *
10 * Written by Christopher Hoover <ch@hpl.hp.com>
11 * Based on fragments of previous driver by Russell King et al.
12 *
13 * Modified for LH7A404 from ohci-sa1111.c
14 * by Durgesh Pattamatta <pattamattad@sharpsec.com>
15 *
16 * Modified for pxa27x from ohci-lh7a404.c
17 * by Nick Bane <nick@cecomputing.co.uk> 26-8-2004
18 *
19 * This file is licenced under the GPL.
20 */
21
22#include <linux/device.h>
23#include <asm/mach-types.h>
24#include <asm/hardware.h>
25#include <asm/arch/pxa-regs.h>
26
27
28#define PMM_NPS_MODE 1
29#define PMM_GLOBAL_MODE 2
30#define PMM_PERPORT_MODE 3
31
32#define PXA_UHC_MAX_PORTNUM 3
33
34#define UHCRHPS(x) __REG2( 0x4C000050, (x)<<2 )
35
36static int pxa27x_ohci_pmm_state;
37
38/*
39 PMM_NPS_MODE -- PMM Non-power switching mode
40 Ports are powered continuously.
41
42 PMM_GLOBAL_MODE -- PMM global switching mode
43 All ports are powered at the same time.
44
45 PMM_PERPORT_MODE -- PMM per port switching mode
46 Ports are powered individually.
47 */
48static int pxa27x_ohci_select_pmm( int mode )
49{
50 pxa27x_ohci_pmm_state = mode;
51
52 switch ( mode ) {
53 case PMM_NPS_MODE:
54 UHCRHDA |= RH_A_NPS;
55 break;
56 case PMM_GLOBAL_MODE:
57 UHCRHDA &= ~(RH_A_NPS & RH_A_PSM);
58 break;
59 case PMM_PERPORT_MODE:
60 UHCRHDA &= ~(RH_A_NPS);
61 UHCRHDA |= RH_A_PSM;
62
63 /* Set port power control mask bits, only 3 ports. */
64 UHCRHDB |= (0x7<<17);
65 break;
66 default:
67 printk( KERN_ERR
68 "Invalid mode %d, set to non-power switch mode.\n",
69 mode );
70
71 pxa27x_ohci_pmm_state = PMM_NPS_MODE;
72 UHCRHDA |= RH_A_NPS;
73 }
74
75 return 0;
76}
77
78/*
79 If you select PMM_PERPORT_MODE, you should set the port power
80 */
81static int pxa27x_ohci_set_port_power( int port )
82{
83 if ( (pxa27x_ohci_pmm_state==PMM_PERPORT_MODE)
84 && (port>0) && (port<PXA_UHC_MAX_PORTNUM) ) {
85 UHCRHPS(port) |= 0x100;
86 return 0;
87 }
88 return -1;
89}
90
91/*
92 If you select PMM_PERPORT_MODE, you should set the port power
93 */
94static int pxa27x_ohci_clear_port_power( int port )
95{
96 if ( (pxa27x_ohci_pmm_state==PMM_PERPORT_MODE)
97 && (port>0) && (port<PXA_UHC_MAX_PORTNUM) ) {
98 UHCRHPS(port) |= 0x200;
99 return 0;
100 }
101
102 return -1;
103}
104
105extern int usb_disabled(void);
106
107/*-------------------------------------------------------------------------*/
108
109static void pxa27x_start_hc(struct platform_device *dev)
110{
111 pxa_set_cken(CKEN10_USBHOST, 1);
112
113 UHCHR |= UHCHR_FHR;
114 udelay(11);
115 UHCHR &= ~UHCHR_FHR;
116
117 UHCHR |= UHCHR_FSBIR;
118 while (UHCHR & UHCHR_FSBIR)
119 cpu_relax();
120
121 /* This could be properly abstracted away through the
122 device data the day more machines are supported and
123 their differences can be figured out correctly. */
124 if (machine_is_mainstone()) {
125 /* setup Port1 GPIO pin. */
126 pxa_gpio_mode( 88 | GPIO_ALT_FN_1_IN); /* USBHPWR1 */
127 pxa_gpio_mode( 89 | GPIO_ALT_FN_2_OUT); /* USBHPEN1 */
128
129 /* Set the Power Control Polarity Low and Power Sense
130 Polarity Low to active low. Supply power to USB ports. */
131 UHCHR = (UHCHR | UHCHR_PCPL | UHCHR_PSPL) &
132 ~(UHCHR_SSEP1 | UHCHR_SSEP2 | UHCHR_SSEP3 | UHCHR_SSE);
133 }
134
135 UHCHR &= ~UHCHR_SSE;
136
137 UHCHIE = (UHCHIE_UPRIE | UHCHIE_RWIE);
138}
139
140static void pxa27x_stop_hc(struct platform_device *dev)
141{
142 UHCHR |= UHCHR_FHR;
143 udelay(11);
144 UHCHR &= ~UHCHR_FHR;
145
146 UHCCOMS |= 1;
147 udelay(10);
148
149 pxa_set_cken(CKEN10_USBHOST, 0);
150}
151
152
153/*-------------------------------------------------------------------------*/
154
155/* configure so an HC device and id are always provided */
156/* always called with process context; sleeping is OK */
157
158
159/**
160 * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs
161 * Context: !in_interrupt()
162 *
163 * Allocates basic resources for this USB host controller, and
164 * then invokes the start() method for the HCD associated with it
165 * through the hotplug entry's driver_data.
166 *
167 */
168int usb_hcd_pxa27x_probe (const struct hc_driver *driver,
169 struct platform_device *dev)
170{
171 int retval;
172 struct usb_hcd *hcd;
173
174 if (dev->resource[1].flags != IORESOURCE_IRQ) {
175 pr_debug ("resource[1] is not IORESOURCE_IRQ");
176 return -ENOMEM;
177 }
178
179 hcd = usb_create_hcd (driver, &dev->dev, "pxa27x");
180 if (!hcd)
181 return -ENOMEM;
182 hcd->rsrc_start = dev->resource[0].start;
183 hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
184
185 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
186 pr_debug("request_mem_region failed");
187 retval = -EBUSY;
188 goto err1;
189 }
190
191 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
192 if (!hcd->regs) {
193 pr_debug("ioremap failed");
194 retval = -ENOMEM;
195 goto err2;
196 }
197
198 pxa27x_start_hc(dev);
199
200 /* Select Power Management Mode */
201 pxa27x_ohci_select_pmm( PMM_PERPORT_MODE );
202
203 /* If choosing PMM_PERPORT_MODE, we should set the port power before we use it. */
204 if (pxa27x_ohci_set_port_power(1) < 0)
205 printk(KERN_ERR "Setting port 1 power failed.\n");
206
207 if (pxa27x_ohci_clear_port_power(2) < 0)
208 printk(KERN_ERR "Setting port 2 power failed.\n");
209
210 if (pxa27x_ohci_clear_port_power(3) < 0)
211 printk(KERN_ERR "Setting port 3 power failed.\n");
212
213 ohci_hcd_init(hcd_to_ohci(hcd));
214
215 retval = usb_add_hcd(hcd, dev->resource[1].start, SA_INTERRUPT);
216 if (retval == 0)
217 return retval;
218
219 pxa27x_stop_hc(dev);
220 iounmap(hcd->regs);
221 err2:
222 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
223 err1:
224 usb_put_hcd(hcd);
225 return retval;
226}
227
228
229/* may be called without controller electrically present */
230/* may be called with controller, bus, and devices active */
231
232/**
233 * usb_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
234 * @dev: USB Host Controller being removed
235 * Context: !in_interrupt()
236 *
237 * Reverses the effect of usb_hcd_pxa27x_probe(), first invoking
238 * the HCD's stop() method. It is always called from a thread
239 * context, normally "rmmod", "apmd", or something similar.
240 *
241 */
242void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *dev)
243{
244 usb_remove_hcd(hcd);
245 pxa27x_stop_hc(dev);
246 iounmap(hcd->regs);
247 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
248 usb_put_hcd(hcd);
249}
250
251/*-------------------------------------------------------------------------*/
252
253static int __devinit
254ohci_pxa27x_start (struct usb_hcd *hcd)
255{
256 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
257 int ret;
258
259 ohci_dbg (ohci, "ohci_pxa27x_start, ohci:%p", ohci);
260
261 if ((ret = ohci_init(ohci)) < 0)
262 return ret;
263
264 if ((ret = ohci_run (ohci)) < 0) {
265 err ("can't start %s", hcd->self.bus_name);
266 ohci_stop (hcd);
267 return ret;
268 }
269
270 return 0;
271}
272
273/*-------------------------------------------------------------------------*/
274
275static const struct hc_driver ohci_pxa27x_hc_driver = {
276 .description = hcd_name,
277 .product_desc = "PXA27x OHCI",
278 .hcd_priv_size = sizeof(struct ohci_hcd),
279
280 /*
281 * generic hardware linkage
282 */
283 .irq = ohci_irq,
284 .flags = HCD_USB11 | HCD_MEMORY,
285
286 /*
287 * basic lifecycle operations
288 */
289 .start = ohci_pxa27x_start,
290 .stop = ohci_stop,
291
292 /*
293 * managing i/o requests and associated device resources
294 */
295 .urb_enqueue = ohci_urb_enqueue,
296 .urb_dequeue = ohci_urb_dequeue,
297 .endpoint_disable = ohci_endpoint_disable,
298
299 /*
300 * scheduling support
301 */
302 .get_frame_number = ohci_get_frame,
303
304 /*
305 * root hub support
306 */
307 .hub_status_data = ohci_hub_status_data,
308 .hub_control = ohci_hub_control,
309#ifdef CONFIG_USB_SUSPEND
310 .hub_suspend = ohci_hub_suspend,
311 .hub_resume = ohci_hub_resume,
312#endif
313};
314
315/*-------------------------------------------------------------------------*/
316
317static int ohci_hcd_pxa27x_drv_probe(struct device *dev)
318{
319 struct platform_device *pdev = to_platform_device(dev);
320 int ret;
321
322 pr_debug ("In ohci_hcd_pxa27x_drv_probe");
323
324 if (usb_disabled())
325 return -ENODEV;
326
327 ret = usb_hcd_pxa27x_probe(&ohci_pxa27x_hc_driver, pdev);
328 return ret;
329}
330
331static int ohci_hcd_pxa27x_drv_remove(struct device *dev)
332{
333 struct platform_device *pdev = to_platform_device(dev);
334 struct usb_hcd *hcd = dev_get_drvdata(dev);
335
336 usb_hcd_pxa27x_remove(hcd, pdev);
337 return 0;
338}
339
340static int ohci_hcd_pxa27x_drv_suspend(struct device *dev, u32 state, u32 level)
341{
342// struct platform_device *pdev = to_platform_device(dev);
343// struct usb_hcd *hcd = dev_get_drvdata(dev);
344 printk("%s: not implemented yet\n", __FUNCTION__);
345
346 return 0;
347}
348
349static int ohci_hcd_pxa27x_drv_resume(struct device *dev, u32 state)
350{
351// struct platform_device *pdev = to_platform_device(dev);
352// struct usb_hcd *hcd = dev_get_drvdata(dev);
353 printk("%s: not implemented yet\n", __FUNCTION__);
354
355 return 0;
356}
357
358
359static struct device_driver ohci_hcd_pxa27x_driver = {
360 .name = "pxa27x-ohci",
361 .bus = &platform_bus_type,
362 .probe = ohci_hcd_pxa27x_drv_probe,
363 .remove = ohci_hcd_pxa27x_drv_remove,
364 .suspend = ohci_hcd_pxa27x_drv_suspend,
365 .resume = ohci_hcd_pxa27x_drv_resume,
366};
367
368static int __init ohci_hcd_pxa27x_init (void)
369{
370 pr_debug (DRIVER_INFO " (pxa27x)");
371 pr_debug ("block sizes: ed %d td %d\n",
372 sizeof (struct ed), sizeof (struct td));
373
374 return driver_register(&ohci_hcd_pxa27x_driver);
375}
376
377static void __exit ohci_hcd_pxa27x_cleanup (void)
378{
379 driver_unregister(&ohci_hcd_pxa27x_driver);
380}
381
382module_init (ohci_hcd_pxa27x_init);
383module_exit (ohci_hcd_pxa27x_cleanup);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
new file mode 100644
index 000000000000..c90114a77277
--- /dev/null
+++ b/drivers/usb/host/ohci-q.c
@@ -0,0 +1,1107 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under the GPL.
8 */
9
10static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
11{
12 int last = urb_priv->length - 1;
13
14 if (last >= 0) {
15 int i;
16 struct td *td;
17
18 for (i = 0; i <= last; i++) {
19 td = urb_priv->td [i];
20 if (td)
21 td_free (hc, td);
22 }
23 }
24
25 list_del (&urb_priv->pending);
26 kfree (urb_priv);
27}
28
29/*-------------------------------------------------------------------------*/
30
31/*
32 * URB goes back to driver, and isn't reissued.
33 * It's completely gone from HC data structures.
34 * PRECONDITION: ohci lock held, irqs blocked.
35 */
36static void
37finish_urb (struct ohci_hcd *ohci, struct urb *urb, struct pt_regs *regs)
38__releases(ohci->lock)
39__acquires(ohci->lock)
40{
41 // ASSERT (urb->hcpriv != 0);
42
43 urb_free_priv (ohci, urb->hcpriv);
44 urb->hcpriv = NULL;
45
46 spin_lock (&urb->lock);
47 if (likely (urb->status == -EINPROGRESS))
48 urb->status = 0;
49 /* report short control reads right even though the data TD always
50 * has TD_R set. (much simpler, but creates the 1-td limit.)
51 */
52 if (unlikely (urb->transfer_flags & URB_SHORT_NOT_OK)
53 && unlikely (usb_pipecontrol (urb->pipe))
54 && urb->actual_length < urb->transfer_buffer_length
55 && usb_pipein (urb->pipe)
56 && urb->status == 0) {
57 urb->status = -EREMOTEIO;
58 }
59 spin_unlock (&urb->lock);
60
61 switch (usb_pipetype (urb->pipe)) {
62 case PIPE_ISOCHRONOUS:
63 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
64 break;
65 case PIPE_INTERRUPT:
66 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
67 break;
68 }
69
70#ifdef OHCI_VERBOSE_DEBUG
71 urb_print (urb, "RET", usb_pipeout (urb->pipe));
72#endif
73
74 /* urb->complete() can reenter this HCD */
75 spin_unlock (&ohci->lock);
76 usb_hcd_giveback_urb (ohci_to_hcd(ohci), urb, regs);
77 spin_lock (&ohci->lock);
78
79 /* stop periodic dma if it's not needed */
80 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
81 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
82 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
83 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
84 }
85}
86
87
88/*-------------------------------------------------------------------------*
89 * ED handling functions
90 *-------------------------------------------------------------------------*/
91
92/* search for the right schedule branch to use for a periodic ed.
93 * does some load balancing; returns the branch, or negative errno.
94 */
95static int balance (struct ohci_hcd *ohci, int interval, int load)
96{
97 int i, branch = -ENOSPC;
98
99 /* iso periods can be huge; iso tds specify frame numbers */
100 if (interval > NUM_INTS)
101 interval = NUM_INTS;
102
103 /* search for the least loaded schedule branch of that period
104 * that has enough bandwidth left unreserved.
105 */
106 for (i = 0; i < interval ; i++) {
107 if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
108#if 1 /* CONFIG_USB_BANDWIDTH */
109 int j;
110
111 /* usb 1.1 says 90% of one frame */
112 for (j = i; j < NUM_INTS; j += interval) {
113 if ((ohci->load [j] + load) > 900)
114 break;
115 }
116 if (j < NUM_INTS)
117 continue;
118#endif
119 branch = i;
120 }
121 }
122 return branch;
123}
124
125/*-------------------------------------------------------------------------*/
126
127/* both iso and interrupt requests have periods; this routine puts them
128 * into the schedule tree in the apppropriate place. most iso devices use
129 * 1msec periods, but that's not required.
130 */
131static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
132{
133 unsigned i;
134
135 ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
136 (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
137 ed, ed->branch, ed->load, ed->interval);
138
139 for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
140 struct ed **prev = &ohci->periodic [i];
141 __hc32 *prev_p = &ohci->hcca->int_table [i];
142 struct ed *here = *prev;
143
144 /* sorting each branch by period (slow before fast)
145 * lets us share the faster parts of the tree.
146 * (plus maybe: put interrupt eds before iso)
147 */
148 while (here && ed != here) {
149 if (ed->interval > here->interval)
150 break;
151 prev = &here->ed_next;
152 prev_p = &here->hwNextED;
153 here = *prev;
154 }
155 if (ed != here) {
156 ed->ed_next = here;
157 if (here)
158 ed->hwNextED = *prev_p;
159 wmb ();
160 *prev = ed;
161 *prev_p = cpu_to_hc32(ohci, ed->dma);
162 wmb();
163 }
164 ohci->load [i] += ed->load;
165 }
166 ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
167}
168
169/* link an ed into one of the HC chains */
170
171static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
172{
173 int branch;
174
175 if (ohci_to_hcd(ohci)->state == HC_STATE_QUIESCING)
176 return -EAGAIN;
177
178 ed->state = ED_OPER;
179 ed->ed_prev = NULL;
180 ed->ed_next = NULL;
181 ed->hwNextED = 0;
182 wmb ();
183
184 /* we care about rm_list when setting CLE/BLE in case the HC was at
185 * work on some TD when CLE/BLE was turned off, and isn't quiesced
186 * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
187 *
188 * control and bulk EDs are doubly linked (ed_next, ed_prev), but
189 * periodic ones are singly linked (ed_next). that's because the
190 * periodic schedule encodes a tree like figure 3-5 in the ohci
191 * spec: each qh can have several "previous" nodes, and the tree
192 * doesn't have unused/idle descriptors.
193 */
194 switch (ed->type) {
195 case PIPE_CONTROL:
196 if (ohci->ed_controltail == NULL) {
197 WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
198 ohci_writel (ohci, ed->dma,
199 &ohci->regs->ed_controlhead);
200 } else {
201 ohci->ed_controltail->ed_next = ed;
202 ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
203 ed->dma);
204 }
205 ed->ed_prev = ohci->ed_controltail;
206 if (!ohci->ed_controltail && !ohci->ed_rm_list) {
207 wmb();
208 ohci->hc_control |= OHCI_CTRL_CLE;
209 ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
210 ohci_writel (ohci, ohci->hc_control,
211 &ohci->regs->control);
212 }
213 ohci->ed_controltail = ed;
214 break;
215
216 case PIPE_BULK:
217 if (ohci->ed_bulktail == NULL) {
218 WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
219 ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
220 } else {
221 ohci->ed_bulktail->ed_next = ed;
222 ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
223 ed->dma);
224 }
225 ed->ed_prev = ohci->ed_bulktail;
226 if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
227 wmb();
228 ohci->hc_control |= OHCI_CTRL_BLE;
229 ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
230 ohci_writel (ohci, ohci->hc_control,
231 &ohci->regs->control);
232 }
233 ohci->ed_bulktail = ed;
234 break;
235
236 // case PIPE_INTERRUPT:
237 // case PIPE_ISOCHRONOUS:
238 default:
239 branch = balance (ohci, ed->interval, ed->load);
240 if (branch < 0) {
241 ohci_dbg (ohci,
242 "ERR %d, interval %d msecs, load %d\n",
243 branch, ed->interval, ed->load);
244 // FIXME if there are TDs queued, fail them!
245 return branch;
246 }
247 ed->branch = branch;
248 periodic_link (ohci, ed);
249 }
250
251 /* the HC may not see the schedule updates yet, but if it does
252 * then they'll be properly ordered.
253 */
254 return 0;
255}
256
257/*-------------------------------------------------------------------------*/
258
259/* scan the periodic table to find and unlink this ED */
260static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
261{
262 int i;
263
264 for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
265 struct ed *temp;
266 struct ed **prev = &ohci->periodic [i];
267 __hc32 *prev_p = &ohci->hcca->int_table [i];
268
269 while (*prev && (temp = *prev) != ed) {
270 prev_p = &temp->hwNextED;
271 prev = &temp->ed_next;
272 }
273 if (*prev) {
274 *prev_p = ed->hwNextED;
275 *prev = ed->ed_next;
276 }
277 ohci->load [i] -= ed->load;
278 }
279 ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
280
281 ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
282 (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
283 ed, ed->branch, ed->load, ed->interval);
284}
285
286/* unlink an ed from one of the HC chains.
287 * just the link to the ed is unlinked.
288 * the link from the ed still points to another operational ed or 0
289 * so the HC can eventually finish the processing of the unlinked ed
290 * (assuming it already started that, which needn't be true).
291 *
292 * ED_UNLINK is a transient state: the HC may still see this ED, but soon
293 * it won't. ED_SKIP means the HC will finish its current transaction,
294 * but won't start anything new. The TD queue may still grow; device
295 * drivers don't know about this HCD-internal state.
296 *
297 * When the HC can't see the ED, something changes ED_UNLINK to one of:
298 *
299 * - ED_OPER: when there's any request queued, the ED gets rescheduled
300 * immediately. HC should be working on them.
301 *
302 * - ED_IDLE: when there's no TD queue. there's no reason for the HC
303 * to care about this ED; safe to disable the endpoint.
304 *
305 * When finish_unlinks() runs later, after SOF interrupt, it will often
306 * complete one or more URB unlinks before making that state change.
307 */
308static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
309{
310 ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
311 wmb ();
312 ed->state = ED_UNLINK;
313
314 /* To deschedule something from the control or bulk list, just
315 * clear CLE/BLE and wait. There's no safe way to scrub out list
316 * head/current registers until later, and "later" isn't very
317 * tightly specified. Figure 6-5 and Section 6.4.2.2 show how
318 * the HC is reading the ED queues (while we modify them).
319 *
320 * For now, ed_schedule() is "later". It might be good paranoia
321 * to scrub those registers in finish_unlinks(), in case of bugs
322 * that make the HC try to use them.
323 */
324 switch (ed->type) {
325 case PIPE_CONTROL:
326 /* remove ED from the HC's list: */
327 if (ed->ed_prev == NULL) {
328 if (!ed->hwNextED) {
329 ohci->hc_control &= ~OHCI_CTRL_CLE;
330 ohci_writel (ohci, ohci->hc_control,
331 &ohci->regs->control);
332 // a ohci_readl() later syncs CLE with the HC
333 } else
334 ohci_writel (ohci,
335 hc32_to_cpup (ohci, &ed->hwNextED),
336 &ohci->regs->ed_controlhead);
337 } else {
338 ed->ed_prev->ed_next = ed->ed_next;
339 ed->ed_prev->hwNextED = ed->hwNextED;
340 }
341 /* remove ED from the HCD's list: */
342 if (ohci->ed_controltail == ed) {
343 ohci->ed_controltail = ed->ed_prev;
344 if (ohci->ed_controltail)
345 ohci->ed_controltail->ed_next = NULL;
346 } else if (ed->ed_next) {
347 ed->ed_next->ed_prev = ed->ed_prev;
348 }
349 break;
350
351 case PIPE_BULK:
352 /* remove ED from the HC's list: */
353 if (ed->ed_prev == NULL) {
354 if (!ed->hwNextED) {
355 ohci->hc_control &= ~OHCI_CTRL_BLE;
356 ohci_writel (ohci, ohci->hc_control,
357 &ohci->regs->control);
358 // a ohci_readl() later syncs BLE with the HC
359 } else
360 ohci_writel (ohci,
361 hc32_to_cpup (ohci, &ed->hwNextED),
362 &ohci->regs->ed_bulkhead);
363 } else {
364 ed->ed_prev->ed_next = ed->ed_next;
365 ed->ed_prev->hwNextED = ed->hwNextED;
366 }
367 /* remove ED from the HCD's list: */
368 if (ohci->ed_bulktail == ed) {
369 ohci->ed_bulktail = ed->ed_prev;
370 if (ohci->ed_bulktail)
371 ohci->ed_bulktail->ed_next = NULL;
372 } else if (ed->ed_next) {
373 ed->ed_next->ed_prev = ed->ed_prev;
374 }
375 break;
376
377 // case PIPE_INTERRUPT:
378 // case PIPE_ISOCHRONOUS:
379 default:
380 periodic_unlink (ohci, ed);
381 break;
382 }
383}
384
385
386/*-------------------------------------------------------------------------*/
387
388/* get and maybe (re)init an endpoint. init _should_ be done only as part
389 * of enumeration, usb_set_configuration() or usb_set_interface().
390 */
391static struct ed *ed_get (
392 struct ohci_hcd *ohci,
393 struct usb_host_endpoint *ep,
394 struct usb_device *udev,
395 unsigned int pipe,
396 int interval
397) {
398 struct ed *ed;
399 unsigned long flags;
400
401 spin_lock_irqsave (&ohci->lock, flags);
402
403 if (!(ed = ep->hcpriv)) {
404 struct td *td;
405 int is_out;
406 u32 info;
407
408 ed = ed_alloc (ohci, GFP_ATOMIC);
409 if (!ed) {
410 /* out of memory */
411 goto done;
412 }
413
414 /* dummy td; end of td list for ed */
415 td = td_alloc (ohci, GFP_ATOMIC);
416 if (!td) {
417 /* out of memory */
418 ed_free (ohci, ed);
419 ed = NULL;
420 goto done;
421 }
422 ed->dummy = td;
423 ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
424 ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
425 ed->state = ED_IDLE;
426
427 is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
428
429 /* FIXME usbcore changes dev->devnum before SET_ADDRESS
430 * suceeds ... otherwise we wouldn't need "pipe".
431 */
432 info = usb_pipedevice (pipe);
433 ed->type = usb_pipetype(pipe);
434
435 info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
436 info |= le16_to_cpu(ep->desc.wMaxPacketSize) << 16;
437 if (udev->speed == USB_SPEED_LOW)
438 info |= ED_LOWSPEED;
439 /* only control transfers store pids in tds */
440 if (ed->type != PIPE_CONTROL) {
441 info |= is_out ? ED_OUT : ED_IN;
442 if (ed->type != PIPE_BULK) {
443 /* periodic transfers... */
444 if (ed->type == PIPE_ISOCHRONOUS)
445 info |= ED_ISO;
446 else if (interval > 32) /* iso can be bigger */
447 interval = 32;
448 ed->interval = interval;
449 ed->load = usb_calc_bus_time (
450 udev->speed, !is_out,
451 ed->type == PIPE_ISOCHRONOUS,
452 le16_to_cpu(ep->desc.wMaxPacketSize))
453 / 1000;
454 }
455 }
456 ed->hwINFO = cpu_to_hc32(ohci, info);
457
458 ep->hcpriv = ed;
459 }
460
461done:
462 spin_unlock_irqrestore (&ohci->lock, flags);
463 return ed;
464}
465
466/*-------------------------------------------------------------------------*/
467
468/* request unlinking of an endpoint from an operational HC.
469 * put the ep on the rm_list
470 * real work is done at the next start frame (SF) hardware interrupt
471 * caller guarantees HCD is running, so hardware access is safe,
472 * and that ed->state is ED_OPER
473 */
474static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
475{
476 ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
477 ed_deschedule (ohci, ed);
478
479 /* rm_list is just singly linked, for simplicity */
480 ed->ed_next = ohci->ed_rm_list;
481 ed->ed_prev = NULL;
482 ohci->ed_rm_list = ed;
483
484 /* enable SOF interrupt */
485 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
486 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
487 // flush those writes, and get latest HCCA contents
488 (void) ohci_readl (ohci, &ohci->regs->control);
489
490 /* SF interrupt might get delayed; record the frame counter value that
491 * indicates when the HC isn't looking at it, so concurrent unlinks
492 * behave. frame_no wraps every 2^16 msec, and changes right before
493 * SF is triggered.
494 */
495 ed->tick = ohci_frame_no(ohci) + 1;
496
497}
498
499/*-------------------------------------------------------------------------*
500 * TD handling functions
501 *-------------------------------------------------------------------------*/
502
503/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
504
505static void
506td_fill (struct ohci_hcd *ohci, u32 info,
507 dma_addr_t data, int len,
508 struct urb *urb, int index)
509{
510 struct td *td, *td_pt;
511 struct urb_priv *urb_priv = urb->hcpriv;
512 int is_iso = info & TD_ISO;
513 int hash;
514
515 // ASSERT (index < urb_priv->length);
516
517 /* aim for only one interrupt per urb. mostly applies to control
518 * and iso; other urbs rarely need more than one TD per urb.
519 * this way, only final tds (or ones with an error) cause IRQs.
520 * at least immediately; use DI=6 in case any control request is
521 * tempted to die part way through. (and to force the hc to flush
522 * its donelist soonish, even on unlink paths.)
523 *
524 * NOTE: could delay interrupts even for the last TD, and get fewer
525 * interrupts ... increasing per-urb latency by sharing interrupts.
526 * Drivers that queue bulk urbs may request that behavior.
527 */
528 if (index != (urb_priv->length - 1)
529 || (urb->transfer_flags & URB_NO_INTERRUPT))
530 info |= TD_DI_SET (6);
531
532 /* use this td as the next dummy */
533 td_pt = urb_priv->td [index];
534
535 /* fill the old dummy TD */
536 td = urb_priv->td [index] = urb_priv->ed->dummy;
537 urb_priv->ed->dummy = td_pt;
538
539 td->ed = urb_priv->ed;
540 td->next_dl_td = NULL;
541 td->index = index;
542 td->urb = urb;
543 td->data_dma = data;
544 if (!len)
545 data = 0;
546
547 td->hwINFO = cpu_to_hc32 (ohci, info);
548 if (is_iso) {
549 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
550 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
551 (data & 0x0FFF) | 0xE000);
552 td->ed->last_iso = info & 0xffff;
553 } else {
554 td->hwCBP = cpu_to_hc32 (ohci, data);
555 }
556 if (data)
557 td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
558 else
559 td->hwBE = 0;
560 td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
561
562 /* append to queue */
563 list_add_tail (&td->td_list, &td->ed->td_list);
564
565 /* hash it for later reverse mapping */
566 hash = TD_HASH_FUNC (td->td_dma);
567 td->td_hash = ohci->td_hash [hash];
568 ohci->td_hash [hash] = td;
569
570 /* HC might read the TD (or cachelines) right away ... */
571 wmb ();
572 td->ed->hwTailP = td->hwNextTD;
573}
574
575/*-------------------------------------------------------------------------*/
576
577/* Prepare all TDs of a transfer, and queue them onto the ED.
578 * Caller guarantees HC is active.
579 * Usually the ED is already on the schedule, so TDs might be
580 * processed as soon as they're queued.
581 */
582static void td_submit_urb (
583 struct ohci_hcd *ohci,
584 struct urb *urb
585) {
586 struct urb_priv *urb_priv = urb->hcpriv;
587 dma_addr_t data;
588 int data_len = urb->transfer_buffer_length;
589 int cnt = 0;
590 u32 info = 0;
591 int is_out = usb_pipeout (urb->pipe);
592 int periodic = 0;
593
594 /* OHCI handles the bulk/interrupt data toggles itself. We just
595 * use the device toggle bits for resetting, and rely on the fact
596 * that resetting toggle is meaningless if the endpoint is active.
597 */
598 if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
599 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
600 is_out, 1);
601 urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
602 }
603
604 urb_priv->td_cnt = 0;
605 list_add (&urb_priv->pending, &ohci->pending);
606
607 if (data_len)
608 data = urb->transfer_dma;
609 else
610 data = 0;
611
612 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
613 * using TD_CC_GET, as well as by seeing them on the done list.
614 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
615 */
616 switch (urb_priv->ed->type) {
617
618 /* Bulk and interrupt are identical except for where in the schedule
619 * their EDs live.
620 */
621 case PIPE_INTERRUPT:
622 /* ... and periodic urbs have extra accounting */
623 periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
624 && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
625 /* FALLTHROUGH */
626 case PIPE_BULK:
627 info = is_out
628 ? TD_T_TOGGLE | TD_CC | TD_DP_OUT
629 : TD_T_TOGGLE | TD_CC | TD_DP_IN;
630 /* TDs _could_ transfer up to 8K each */
631 while (data_len > 4096) {
632 td_fill (ohci, info, data, 4096, urb, cnt);
633 data += 4096;
634 data_len -= 4096;
635 cnt++;
636 }
637 /* maybe avoid ED halt on final TD short read */
638 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
639 info |= TD_R;
640 td_fill (ohci, info, data, data_len, urb, cnt);
641 cnt++;
642 if ((urb->transfer_flags & URB_ZERO_PACKET)
643 && cnt < urb_priv->length) {
644 td_fill (ohci, info, 0, 0, urb, cnt);
645 cnt++;
646 }
647 /* maybe kickstart bulk list */
648 if (urb_priv->ed->type == PIPE_BULK) {
649 wmb ();
650 ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
651 }
652 break;
653
654 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
655 * any DATA phase works normally, and the STATUS ack is special.
656 */
657 case PIPE_CONTROL:
658 info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
659 td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
660 if (data_len > 0) {
661 info = TD_CC | TD_R | TD_T_DATA1;
662 info |= is_out ? TD_DP_OUT : TD_DP_IN;
663 /* NOTE: mishandles transfers >8K, some >4K */
664 td_fill (ohci, info, data, data_len, urb, cnt++);
665 }
666 info = (is_out || data_len == 0)
667 ? TD_CC | TD_DP_IN | TD_T_DATA1
668 : TD_CC | TD_DP_OUT | TD_T_DATA1;
669 td_fill (ohci, info, data, 0, urb, cnt++);
670 /* maybe kickstart control list */
671 wmb ();
672 ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
673 break;
674
675 /* ISO has no retransmit, so no toggle; and it uses special TDs.
676 * Each TD could handle multiple consecutive frames (interval 1);
677 * we could often reduce the number of TDs here.
678 */
679 case PIPE_ISOCHRONOUS:
680 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
681 int frame = urb->start_frame;
682
683 // FIXME scheduling should handle frame counter
684 // roll-around ... exotic case (and OHCI has
685 // a 2^16 iso range, vs other HCs max of 2^10)
686 frame += cnt * urb->interval;
687 frame &= 0xffff;
688 td_fill (ohci, TD_CC | TD_ISO | frame,
689 data + urb->iso_frame_desc [cnt].offset,
690 urb->iso_frame_desc [cnt].length, urb, cnt);
691 }
692 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
693 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
694 break;
695 }
696
697 /* start periodic dma if needed */
698 if (periodic) {
699 wmb ();
700 ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
701 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
702 }
703
704 // ASSERT (urb_priv->length == cnt);
705}
706
707/*-------------------------------------------------------------------------*
708 * Done List handling functions
709 *-------------------------------------------------------------------------*/
710
711/* calculate transfer length/status and update the urb
712 * PRECONDITION: irqsafe (only for urb->status locking)
713 */
714static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
715{
716 u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
717 int cc = 0;
718
719 list_del (&td->td_list);
720
721 /* ISO ... drivers see per-TD length/status */
722 if (tdINFO & TD_ISO) {
723 u16 tdPSW = ohci_hwPSW (ohci, td, 0);
724 int dlen = 0;
725
726 /* NOTE: assumes FC in tdINFO == 0, and that
727 * only the first of 0..MAXPSW psws is used.
728 */
729
730 cc = (tdPSW >> 12) & 0xF;
731 if (tdINFO & TD_CC) /* hc didn't touch? */
732 return;
733
734 if (usb_pipeout (urb->pipe))
735 dlen = urb->iso_frame_desc [td->index].length;
736 else {
737 /* short reads are always OK for ISO */
738 if (cc == TD_DATAUNDERRUN)
739 cc = TD_CC_NOERROR;
740 dlen = tdPSW & 0x3ff;
741 }
742 urb->actual_length += dlen;
743 urb->iso_frame_desc [td->index].actual_length = dlen;
744 urb->iso_frame_desc [td->index].status = cc_to_error [cc];
745
746 if (cc != TD_CC_NOERROR)
747 ohci_vdbg (ohci,
748 "urb %p iso td %p (%d) len %d cc %d\n",
749 urb, td, 1 + td->index, dlen, cc);
750
751 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
752 * except that "setup" bytes aren't counted and "short" transfers
753 * might not be reported as errors.
754 */
755 } else {
756 int type = usb_pipetype (urb->pipe);
757 u32 tdBE = hc32_to_cpup (ohci, &td->hwBE);
758
759 cc = TD_CC_GET (tdINFO);
760
761 /* update packet status if needed (short is normally ok) */
762 if (cc == TD_DATAUNDERRUN
763 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
764 cc = TD_CC_NOERROR;
765 if (cc != TD_CC_NOERROR && cc < 0x0E) {
766 spin_lock (&urb->lock);
767 if (urb->status == -EINPROGRESS)
768 urb->status = cc_to_error [cc];
769 spin_unlock (&urb->lock);
770 }
771
772 /* count all non-empty packets except control SETUP packet */
773 if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
774 if (td->hwCBP == 0)
775 urb->actual_length += tdBE - td->data_dma + 1;
776 else
777 urb->actual_length +=
778 hc32_to_cpup (ohci, &td->hwCBP)
779 - td->data_dma;
780 }
781
782 if (cc != TD_CC_NOERROR && cc < 0x0E)
783 ohci_vdbg (ohci,
784 "urb %p td %p (%d) cc %d, len=%d/%d\n",
785 urb, td, 1 + td->index, cc,
786 urb->actual_length,
787 urb->transfer_buffer_length);
788 }
789}
790
791/*-------------------------------------------------------------------------*/
792
793static inline struct td *
794ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
795{
796 struct urb *urb = td->urb;
797 struct ed *ed = td->ed;
798 struct list_head *tmp = td->td_list.next;
799 __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
800
801 /* clear ed halt; this is the td that caused it, but keep it inactive
802 * until its urb->complete() has a chance to clean up.
803 */
804 ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
805 wmb ();
806 ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
807
808 /* put any later tds from this urb onto the donelist, after 'td',
809 * order won't matter here: no errors, and nothing was transferred.
810 * also patch the ed so it looks as if those tds completed normally.
811 */
812 while (tmp != &ed->td_list) {
813 struct td *next;
814 __hc32 info;
815
816 next = list_entry (tmp, struct td, td_list);
817 tmp = next->td_list.next;
818
819 if (next->urb != urb)
820 break;
821
822 /* NOTE: if multi-td control DATA segments get supported,
823 * this urb had one of them, this td wasn't the last td
824 * in that segment (TD_R clear), this ed halted because
825 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
826 * then we need to leave the control STATUS packet queued
827 * and clear ED_SKIP.
828 */
829 info = next->hwINFO;
830 info |= cpu_to_hc32 (ohci, TD_DONE);
831 info &= ~cpu_to_hc32 (ohci, TD_CC);
832 next->hwINFO = info;
833
834 next->next_dl_td = rev;
835 rev = next;
836
837 ed->hwHeadP = next->hwNextTD | toggle;
838 }
839
840 /* help for troubleshooting: report anything that
841 * looks odd ... that doesn't include protocol stalls
842 * (or maybe some other things)
843 */
844 switch (cc) {
845 case TD_DATAUNDERRUN:
846 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
847 break;
848 /* fallthrough */
849 case TD_CC_STALL:
850 if (usb_pipecontrol (urb->pipe))
851 break;
852 /* fallthrough */
853 default:
854 ohci_dbg (ohci,
855 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
856 urb, urb->dev->devpath,
857 usb_pipeendpoint (urb->pipe),
858 usb_pipein (urb->pipe) ? "in" : "out",
859 hc32_to_cpu (ohci, td->hwINFO),
860 cc, cc_to_error [cc]);
861 }
862
863 return rev;
864}
865
866/* replies to the request have to be on a FIFO basis so
867 * we unreverse the hc-reversed done-list
868 */
869static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
870{
871 u32 td_dma;
872 struct td *td_rev = NULL;
873 struct td *td = NULL;
874
875 td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
876 ohci->hcca->done_head = 0;
877 wmb();
878
879 /* get TD from hc's singly linked list, and
880 * prepend to ours. ed->td_list changes later.
881 */
882 while (td_dma) {
883 int cc;
884
885 td = dma_to_td (ohci, td_dma);
886 if (!td) {
887 ohci_err (ohci, "bad entry %8x\n", td_dma);
888 break;
889 }
890
891 td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
892 cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
893
894 /* Non-iso endpoints can halt on error; un-halt,
895 * and dequeue any other TDs from this urb.
896 * No other TD could have caused the halt.
897 */
898 if (cc != TD_CC_NOERROR
899 && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
900 td_rev = ed_halted (ohci, td, cc, td_rev);
901
902 td->next_dl_td = td_rev;
903 td_rev = td;
904 td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
905 }
906 return td_rev;
907}
908
909/*-------------------------------------------------------------------------*/
910
911/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
912static void
913finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)
914{
915 struct ed *ed, **last;
916
917rescan_all:
918 for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
919 struct list_head *entry, *tmp;
920 int completed, modified;
921 __hc32 *prev;
922
923 /* only take off EDs that the HC isn't using, accounting for
924 * frame counter wraps and EDs with partially retired TDs
925 */
926 if (likely (regs && HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
927 if (tick_before (tick, ed->tick)) {
928skip_ed:
929 last = &ed->ed_next;
930 continue;
931 }
932
933 if (!list_empty (&ed->td_list)) {
934 struct td *td;
935 u32 head;
936
937 td = list_entry (ed->td_list.next, struct td,
938 td_list);
939 head = hc32_to_cpu (ohci, ed->hwHeadP) &
940 TD_MASK;
941
942 /* INTR_WDH may need to clean up first */
943 if (td->td_dma != head)
944 goto skip_ed;
945 }
946 }
947
948 /* reentrancy: if we drop the schedule lock, someone might
949 * have modified this list. normally it's just prepending
950 * entries (which we'd ignore), but paranoia won't hurt.
951 */
952 *last = ed->ed_next;
953 ed->ed_next = NULL;
954 modified = 0;
955
956 /* unlink urbs as requested, but rescan the list after
957 * we call a completion since it might have unlinked
958 * another (earlier) urb
959 *
960 * When we get here, the HC doesn't see this ed. But it
961 * must not be rescheduled until all completed URBs have
962 * been given back to the driver.
963 */
964rescan_this:
965 completed = 0;
966 prev = &ed->hwHeadP;
967 list_for_each_safe (entry, tmp, &ed->td_list) {
968 struct td *td;
969 struct urb *urb;
970 urb_priv_t *urb_priv;
971 __hc32 savebits;
972
973 td = list_entry (entry, struct td, td_list);
974 urb = td->urb;
975 urb_priv = td->urb->hcpriv;
976
977 if (urb->status == -EINPROGRESS) {
978 prev = &td->hwNextTD;
979 continue;
980 }
981
982 /* patch pointer hc uses */
983 savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
984 *prev = td->hwNextTD | savebits;
985
986 /* HC may have partly processed this TD */
987 td_done (ohci, urb, td);
988 urb_priv->td_cnt++;
989
990 /* if URB is done, clean up */
991 if (urb_priv->td_cnt == urb_priv->length) {
992 modified = completed = 1;
993 finish_urb (ohci, urb, regs);
994 }
995 }
996 if (completed && !list_empty (&ed->td_list))
997 goto rescan_this;
998
999 /* ED's now officially unlinked, hc doesn't see */
1000 ed->state = ED_IDLE;
1001 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1002 ed->hwNextED = 0;
1003 wmb ();
1004 ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
1005
1006 /* but if there's work queued, reschedule */
1007 if (!list_empty (&ed->td_list)) {
1008 if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))
1009 ed_schedule (ohci, ed);
1010 }
1011
1012 if (modified)
1013 goto rescan_all;
1014 }
1015
1016 /* maybe reenable control and bulk lists */
1017 if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)
1018 && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING
1019 && !ohci->ed_rm_list) {
1020 u32 command = 0, control = 0;
1021
1022 if (ohci->ed_controltail) {
1023 command |= OHCI_CLF;
1024 if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1025 control |= OHCI_CTRL_CLE;
1026 ohci_writel (ohci, 0,
1027 &ohci->regs->ed_controlcurrent);
1028 }
1029 }
1030 if (ohci->ed_bulktail) {
1031 command |= OHCI_BLF;
1032 if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1033 control |= OHCI_CTRL_BLE;
1034 ohci_writel (ohci, 0,
1035 &ohci->regs->ed_bulkcurrent);
1036 }
1037 }
1038
1039 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1040 if (control) {
1041 ohci->hc_control |= control;
1042 ohci_writel (ohci, ohci->hc_control,
1043 &ohci->regs->control);
1044 }
1045 if (command)
1046 ohci_writel (ohci, command, &ohci->regs->cmdstatus);
1047 }
1048}
1049
1050
1051
1052/*-------------------------------------------------------------------------*/
1053
1054/*
1055 * Process normal completions (error or success) and clean the schedules.
1056 *
1057 * This is the main path for handing urbs back to drivers. The only other
1058 * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
1059 * scanning the (re-reversed) donelist as this does.
1060 */
1061static void
1062dl_done_list (struct ohci_hcd *ohci, struct pt_regs *regs)
1063{
1064 struct td *td = dl_reverse_done_list (ohci);
1065
1066 while (td) {
1067 struct td *td_next = td->next_dl_td;
1068 struct urb *urb = td->urb;
1069 urb_priv_t *urb_priv = urb->hcpriv;
1070 struct ed *ed = td->ed;
1071
1072 /* update URB's length and status from TD */
1073 td_done (ohci, urb, td);
1074 urb_priv->td_cnt++;
1075
1076 /* If all this urb's TDs are done, call complete() */
1077 if (urb_priv->td_cnt == urb_priv->length)
1078 finish_urb (ohci, urb, regs);
1079
1080 /* clean schedule: unlink EDs that are no longer busy */
1081 if (list_empty (&ed->td_list)) {
1082 if (ed->state == ED_OPER)
1083 start_ed_unlink (ohci, ed);
1084
1085 /* ... reenabling halted EDs only after fault cleanup */
1086 } else if ((ed->hwINFO & cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE))
1087 == cpu_to_hc32 (ohci, ED_SKIP)) {
1088 td = list_entry (ed->td_list.next, struct td, td_list);
1089 if (!(td->hwINFO & cpu_to_hc32 (ohci, TD_DONE))) {
1090 ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP);
1091 /* ... hc may need waking-up */
1092 switch (ed->type) {
1093 case PIPE_CONTROL:
1094 ohci_writel (ohci, OHCI_CLF,
1095 &ohci->regs->cmdstatus);
1096 break;
1097 case PIPE_BULK:
1098 ohci_writel (ohci, OHCI_BLF,
1099 &ohci->regs->cmdstatus);
1100 break;
1101 }
1102 }
1103 }
1104
1105 td = td_next;
1106 }
1107}
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
new file mode 100644
index 000000000000..814d2be4ee7b
--- /dev/null
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -0,0 +1,289 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 * (C) Copyright 2002 Hewlett-Packard Company
7 *
8 * SA1111 Bus Glue
9 *
10 * Written by Christopher Hoover <ch@hpl.hp.com>
11 * Based on fragments of previous driver by Rusell King et al.
12 *
13 * This file is licenced under the GPL.
14 */
15
16#include <asm/hardware.h>
17#include <asm/mach-types.h>
18#include <asm/arch/assabet.h>
19#include <asm/arch/badge4.h>
20#include <asm/hardware/sa1111.h>
21
22#ifndef CONFIG_SA1111
23#error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined."
24#endif
25
26extern int usb_disabled(void);
27
28/*-------------------------------------------------------------------------*/
29
30static void sa1111_start_hc(struct sa1111_dev *dev)
31{
32 unsigned int usb_rst = 0;
33
34 printk(KERN_DEBUG __FILE__
35 ": starting SA-1111 OHCI USB Controller\n");
36
37#ifdef CONFIG_SA1100_BADGE4
38 if (machine_is_badge4()) {
39 badge4_set_5V(BADGE4_5V_USB, 1);
40 }
41#endif
42
43 if (machine_is_xp860() ||
44 machine_has_neponset() ||
45 machine_is_pfs168() ||
46 machine_is_badge4())
47 usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW;
48
49 /*
50 * Configure the power sense and control lines. Place the USB
51 * host controller in reset.
52 */
53 sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
54 dev->mapbase + SA1111_USB_RESET);
55
56 /*
57 * Now, carefully enable the USB clock, and take
58 * the USB host controller out of reset.
59 */
60 sa1111_enable_device(dev);
61 udelay(11);
62 sa1111_writel(usb_rst, dev->mapbase + SA1111_USB_RESET);
63}
64
65static void sa1111_stop_hc(struct sa1111_dev *dev)
66{
67 unsigned int usb_rst;
68 printk(KERN_DEBUG __FILE__
69 ": stopping SA-1111 OHCI USB Controller\n");
70
71 /*
72 * Put the USB host controller into reset.
73 */
74 usb_rst = sa1111_readl(dev->mapbase + SA1111_USB_RESET);
75 sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
76 dev->mapbase + SA1111_USB_RESET);
77
78 /*
79 * Stop the USB clock.
80 */
81 sa1111_disable_device(dev);
82
83#ifdef CONFIG_SA1100_BADGE4
84 if (machine_is_badge4()) {
85 /* Disable power to the USB bus */
86 badge4_set_5V(BADGE4_5V_USB, 0);
87 }
88#endif
89}
90
91
92/*-------------------------------------------------------------------------*/
93
94#if 0
95static void dump_hci_status(struct usb_hcd *hcd, const char *label)
96{
97 unsigned long status = sa1111_readl(hcd->regs + SA1111_USB_STATUS);
98
99 dbg ("%s USB_STATUS = { %s%s%s%s%s}", label,
100 ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
101 ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
102 ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
103 ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
104 ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
105}
106#endif
107
108/*-------------------------------------------------------------------------*/
109
110/* configure so an HC device and id are always provided */
111/* always called with process context; sleeping is OK */
112
113
114/**
115 * usb_hcd_sa1111_probe - initialize SA-1111-based HCDs
116 * Context: !in_interrupt()
117 *
118 * Allocates basic resources for this USB host controller, and
119 * then invokes the start() method for the HCD associated with it
120 * through the hotplug entry's driver_data.
121 *
122 * Store this function in the HCD's struct pci_driver as probe().
123 */
124int usb_hcd_sa1111_probe (const struct hc_driver *driver,
125 struct sa1111_dev *dev)
126{
127 struct usb_hcd *hcd;
128 int retval;
129
130 hcd = usb_create_hcd (driver, &dev->dev, "sa1111");
131 if (!hcd)
132 return -ENOMEM;
133 hcd->rsrc_start = dev->res.start;
134 hcd->rsrc_len = dev->res.end - dev->res.start + 1;
135
136 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
137 dbg("request_mem_region failed");
138 retval = -EBUSY;
139 goto err1;
140 }
141 hcd->regs = dev->mapbase;
142
143 sa1111_start_hc(dev);
144 ohci_hcd_init(hcd_to_ohci(hcd));
145
146 retval = usb_add_hcd(hcd, dev->irq[1], SA_INTERRUPT);
147 if (retval == 0)
148 return retval;
149
150 sa1111_stop_hc(dev);
151 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
152 err1:
153 usb_put_hcd(hcd);
154 return retval;
155}
156
157
158/* may be called without controller electrically present */
159/* may be called with controller, bus, and devices active */
160
161/**
162 * usb_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
163 * @dev: USB Host Controller being removed
164 * Context: !in_interrupt()
165 *
166 * Reverses the effect of usb_hcd_sa1111_probe(), first invoking
167 * the HCD's stop() method. It is always called from a thread
168 * context, normally "rmmod", "apmd", or something similar.
169 *
170 */
171void usb_hcd_sa1111_remove (struct usb_hcd *hcd, struct sa1111_dev *dev)
172{
173 usb_remove_hcd(hcd);
174 sa1111_stop_hc(dev);
175 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
176 usb_put_hcd(hcd);
177}
178
179/*-------------------------------------------------------------------------*/
180
181static int __devinit
182ohci_sa1111_start (struct usb_hcd *hcd)
183{
184 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
185 int ret;
186
187 if ((ret = ohci_init(ohci)) < 0)
188 return ret;
189
190 if ((ret = ohci_run (ohci)) < 0) {
191 err ("can't start %s", hcd->self.bus_name);
192 ohci_stop (hcd);
193 return ret;
194 }
195 return 0;
196}
197
198/*-------------------------------------------------------------------------*/
199
200static const struct hc_driver ohci_sa1111_hc_driver = {
201 .description = hcd_name,
202 .product_desc = "SA-1111 OHCI",
203 .hcd_priv_size = sizeof(struct ohci_hcd),
204
205 /*
206 * generic hardware linkage
207 */
208 .irq = ohci_irq,
209 .flags = HCD_USB11 | HCD_MEMORY,
210
211 /*
212 * basic lifecycle operations
213 */
214 .start = ohci_sa1111_start,
215#ifdef CONFIG_PM
216 /* suspend: ohci_sa1111_suspend, -- tbd */
217 /* resume: ohci_sa1111_resume, -- tbd */
218#endif
219 .stop = ohci_stop,
220
221 /*
222 * managing i/o requests and associated device resources
223 */
224 .urb_enqueue = ohci_urb_enqueue,
225 .urb_dequeue = ohci_urb_dequeue,
226 .endpoint_disable = ohci_endpoint_disable,
227
228 /*
229 * scheduling support
230 */
231 .get_frame_number = ohci_get_frame,
232
233 /*
234 * root hub support
235 */
236 .hub_status_data = ohci_hub_status_data,
237 .hub_control = ohci_hub_control,
238#ifdef CONFIG_USB_SUSPEND
239 .hub_suspend = ohci_hub_suspend,
240 .hub_resume = ohci_hub_resume,
241#endif
242};
243
244/*-------------------------------------------------------------------------*/
245
246static int ohci_hcd_sa1111_drv_probe(struct sa1111_dev *dev)
247{
248 int ret;
249
250 if (usb_disabled())
251 return -ENODEV;
252
253 ret = usb_hcd_sa1111_probe(&ohci_sa1111_hc_driver, dev);
254 return ret;
255}
256
257static int ohci_hcd_sa1111_drv_remove(struct sa1111_dev *dev)
258{
259 struct usb_hcd *hcd = sa1111_get_drvdata(dev);
260
261 usb_hcd_sa1111_remove(hcd, dev);
262 return 0;
263}
264
265static struct sa1111_driver ohci_hcd_sa1111_driver = {
266 .drv = {
267 .name = "sa1111-ohci",
268 },
269 .devid = SA1111_DEVID_USB,
270 .probe = ohci_hcd_sa1111_drv_probe,
271 .remove = ohci_hcd_sa1111_drv_remove,
272};
273
274static int __init ohci_hcd_sa1111_init (void)
275{
276 dbg (DRIVER_INFO " (SA-1111)");
277 dbg ("block sizes: ed %d td %d",
278 sizeof (struct ed), sizeof (struct td));
279
280 return sa1111_driver_register(&ohci_hcd_sa1111_driver);
281}
282
283static void __exit ohci_hcd_sa1111_cleanup (void)
284{
285 sa1111_driver_unregister(&ohci_hcd_sa1111_driver);
286}
287
288module_init (ohci_hcd_sa1111_init);
289module_exit (ohci_hcd_sa1111_cleanup);
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
new file mode 100644
index 000000000000..2ba6e2b0210c
--- /dev/null
+++ b/drivers/usb/host/ohci.h
@@ -0,0 +1,636 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under the GPL.
8 */
9
10/*
11 * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
12 * __leXX (normally) or __beXX (given OHCI_BIG_ENDIAN), depending on the
13 * host controller implementation.
14 */
15typedef __u32 __bitwise __hc32;
16typedef __u16 __bitwise __hc16;
17
18/*
19 * OHCI Endpoint Descriptor (ED) ... holds TD queue
20 * See OHCI spec, section 4.2
21 *
22 * This is a "Queue Head" for those transfers, which is why
23 * both EHCI and UHCI call similar structures a "QH".
24 */
25struct ed {
26 /* first fields are hardware-specified */
27 __hc32 hwINFO; /* endpoint config bitmap */
28 /* info bits defined by hcd */
29#define ED_DEQUEUE (1 << 27)
30 /* info bits defined by the hardware */
31#define ED_ISO (1 << 15)
32#define ED_SKIP (1 << 14)
33#define ED_LOWSPEED (1 << 13)
34#define ED_OUT (0x01 << 11)
35#define ED_IN (0x02 << 11)
36 __hc32 hwTailP; /* tail of TD list */
37 __hc32 hwHeadP; /* head of TD list (hc r/w) */
38#define ED_C (0x02) /* toggle carry */
39#define ED_H (0x01) /* halted */
40 __hc32 hwNextED; /* next ED in list */
41
42 /* rest are purely for the driver's use */
43 dma_addr_t dma; /* addr of ED */
44 struct td *dummy; /* next TD to activate */
45
46 /* host's view of schedule */
47 struct ed *ed_next; /* on schedule or rm_list */
48 struct ed *ed_prev; /* for non-interrupt EDs */
49 struct list_head td_list; /* "shadow list" of our TDs */
50
51 /* create --> IDLE --> OPER --> ... --> IDLE --> destroy
52 * usually: OPER --> UNLINK --> (IDLE | OPER) --> ...
53 */
54 u8 state; /* ED_{IDLE,UNLINK,OPER} */
55#define ED_IDLE 0x00 /* NOT linked to HC */
56#define ED_UNLINK 0x01 /* being unlinked from hc */
57#define ED_OPER 0x02 /* IS linked to hc */
58
59 u8 type; /* PIPE_{BULK,...} */
60
61 /* periodic scheduling params (for intr and iso) */
62 u8 branch;
63 u16 interval;
64 u16 load;
65 u16 last_iso; /* iso only */
66
67 /* HC may see EDs on rm_list until next frame (frame_no == tick) */
68 u16 tick;
69} __attribute__ ((aligned(16)));
70
71#define ED_MASK ((u32)~0x0f) /* strip hw status in low addr bits */
72
73
74/*
75 * OHCI Transfer Descriptor (TD) ... one per transfer segment
76 * See OHCI spec, sections 4.3.1 (general = control/bulk/interrupt)
77 * and 4.3.2 (iso)
78 */
79struct td {
80 /* first fields are hardware-specified */
81 __hc32 hwINFO; /* transfer info bitmask */
82
83 /* hwINFO bits for both general and iso tds: */
84#define TD_CC 0xf0000000 /* condition code */
85#define TD_CC_GET(td_p) ((td_p >>28) & 0x0f)
86//#define TD_CC_SET(td_p, cc) (td_p) = ((td_p) & 0x0fffffff) | (((cc) & 0x0f) << 28)
87#define TD_DI 0x00E00000 /* frames before interrupt */
88#define TD_DI_SET(X) (((X) & 0x07)<< 21)
89 /* these two bits are available for definition/use by HCDs in both
90 * general and iso tds ... others are available for only one type
91 */
92#define TD_DONE 0x00020000 /* retired to donelist */
93#define TD_ISO 0x00010000 /* copy of ED_ISO */
94
95 /* hwINFO bits for general tds: */
96#define TD_EC 0x0C000000 /* error count */
97#define TD_T 0x03000000 /* data toggle state */
98#define TD_T_DATA0 0x02000000 /* DATA0 */
99#define TD_T_DATA1 0x03000000 /* DATA1 */
100#define TD_T_TOGGLE 0x00000000 /* uses ED_C */
101#define TD_DP 0x00180000 /* direction/pid */
102#define TD_DP_SETUP 0x00000000 /* SETUP pid */
103#define TD_DP_IN 0x00100000 /* IN pid */
104#define TD_DP_OUT 0x00080000 /* OUT pid */
105 /* 0x00180000 rsvd */
106#define TD_R 0x00040000 /* round: short packets OK? */
107
108 /* (no hwINFO #defines yet for iso tds) */
109
110 __hc32 hwCBP; /* Current Buffer Pointer (or 0) */
111 __hc32 hwNextTD; /* Next TD Pointer */
112 __hc32 hwBE; /* Memory Buffer End Pointer */
113
114 /* PSW is only for ISO. Only 1 PSW entry is used, but on
115 * big-endian PPC hardware that's the second entry.
116 */
117#define MAXPSW 2
118 __hc16 hwPSW [MAXPSW];
119
120 /* rest are purely for the driver's use */
121 __u8 index;
122 struct ed *ed;
123 struct td *td_hash; /* dma-->td hashtable */
124 struct td *next_dl_td;
125 struct urb *urb;
126
127 dma_addr_t td_dma; /* addr of this TD */
128 dma_addr_t data_dma; /* addr of data it points to */
129
130 struct list_head td_list; /* "shadow list", TDs on same ED */
131} __attribute__ ((aligned(32))); /* c/b/i need 16; only iso needs 32 */
132
133#define TD_MASK ((u32)~0x1f) /* strip hw status in low addr bits */
134
135/*
136 * Hardware transfer status codes -- CC from td->hwINFO or td->hwPSW
137 */
138#define TD_CC_NOERROR 0x00
139#define TD_CC_CRC 0x01
140#define TD_CC_BITSTUFFING 0x02
141#define TD_CC_DATATOGGLEM 0x03
142#define TD_CC_STALL 0x04
143#define TD_DEVNOTRESP 0x05
144#define TD_PIDCHECKFAIL 0x06
145#define TD_UNEXPECTEDPID 0x07
146#define TD_DATAOVERRUN 0x08
147#define TD_DATAUNDERRUN 0x09
148 /* 0x0A, 0x0B reserved for hardware */
149#define TD_BUFFEROVERRUN 0x0C
150#define TD_BUFFERUNDERRUN 0x0D
151 /* 0x0E, 0x0F reserved for HCD */
152#define TD_NOTACCESSED 0x0F
153
154
155/* map OHCI TD status codes (CC) to errno values */
156static const int cc_to_error [16] = {
157 /* No Error */ 0,
158 /* CRC Error */ -EILSEQ,
159 /* Bit Stuff */ -EPROTO,
160 /* Data Togg */ -EILSEQ,
161 /* Stall */ -EPIPE,
162 /* DevNotResp */ -ETIMEDOUT,
163 /* PIDCheck */ -EPROTO,
164 /* UnExpPID */ -EPROTO,
165 /* DataOver */ -EOVERFLOW,
166 /* DataUnder */ -EREMOTEIO,
167 /* (for hw) */ -EIO,
168 /* (for hw) */ -EIO,
169 /* BufferOver */ -ECOMM,
170 /* BuffUnder */ -ENOSR,
171 /* (for HCD) */ -EALREADY,
172 /* (for HCD) */ -EALREADY
173};
174
175
176/*
177 * The HCCA (Host Controller Communications Area) is a 256 byte
178 * structure defined section 4.4.1 of the OHCI spec. The HC is
179 * told the base address of it. It must be 256-byte aligned.
180 */
181struct ohci_hcca {
182#define NUM_INTS 32
183 __hc32 int_table [NUM_INTS]; /* periodic schedule */
184
185 /*
186 * OHCI defines u16 frame_no, followed by u16 zero pad.
187 * Since some processors can't do 16 bit bus accesses,
188 * portable access must be a 32 bits wide.
189 */
190 __hc32 frame_no; /* current frame number */
191 __hc32 done_head; /* info returned for an interrupt */
192 u8 reserved_for_hc [116];
193 u8 what [4]; /* spec only identifies 252 bytes :) */
194} __attribute__ ((aligned(256)));
195
196/*
197 * This is the structure of the OHCI controller's memory mapped I/O region.
198 * You must use readl() and writel() (in <asm/io.h>) to access these fields!!
199 * Layout is in section 7 (and appendix B) of the spec.
200 */
201struct ohci_regs {
202 /* control and status registers (section 7.1) */
203 __hc32 revision;
204 __hc32 control;
205 __hc32 cmdstatus;
206 __hc32 intrstatus;
207 __hc32 intrenable;
208 __hc32 intrdisable;
209
210 /* memory pointers (section 7.2) */
211 __hc32 hcca;
212 __hc32 ed_periodcurrent;
213 __hc32 ed_controlhead;
214 __hc32 ed_controlcurrent;
215 __hc32 ed_bulkhead;
216 __hc32 ed_bulkcurrent;
217 __hc32 donehead;
218
219 /* frame counters (section 7.3) */
220 __hc32 fminterval;
221 __hc32 fmremaining;
222 __hc32 fmnumber;
223 __hc32 periodicstart;
224 __hc32 lsthresh;
225
226 /* Root hub ports (section 7.4) */
227 struct ohci_roothub_regs {
228 __hc32 a;
229 __hc32 b;
230 __hc32 status;
231#define MAX_ROOT_PORTS 15 /* maximum OHCI root hub ports (RH_A_NDP) */
232 __hc32 portstatus [MAX_ROOT_PORTS];
233 } roothub;
234
235 /* and optional "legacy support" registers (appendix B) at 0x0100 */
236
237} __attribute__ ((aligned(32)));
238
239
240/* OHCI CONTROL AND STATUS REGISTER MASKS */
241
242/*
243 * HcControl (control) register masks
244 */
245#define OHCI_CTRL_CBSR (3 << 0) /* control/bulk service ratio */
246#define OHCI_CTRL_PLE (1 << 2) /* periodic list enable */
247#define OHCI_CTRL_IE (1 << 3) /* isochronous enable */
248#define OHCI_CTRL_CLE (1 << 4) /* control list enable */
249#define OHCI_CTRL_BLE (1 << 5) /* bulk list enable */
250#define OHCI_CTRL_HCFS (3 << 6) /* host controller functional state */
251#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */
252#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
253#define OHCI_CTRL_RWE (1 << 10) /* remote wakeup enable */
254
255/* pre-shifted values for HCFS */
256# define OHCI_USB_RESET (0 << 6)
257# define OHCI_USB_RESUME (1 << 6)
258# define OHCI_USB_OPER (2 << 6)
259# define OHCI_USB_SUSPEND (3 << 6)
260
261/*
262 * HcCommandStatus (cmdstatus) register masks
263 */
264#define OHCI_HCR (1 << 0) /* host controller reset */
265#define OHCI_CLF (1 << 1) /* control list filled */
266#define OHCI_BLF (1 << 2) /* bulk list filled */
267#define OHCI_OCR (1 << 3) /* ownership change request */
268#define OHCI_SOC (3 << 16) /* scheduling overrun count */
269
270/*
271 * masks used with interrupt registers:
272 * HcInterruptStatus (intrstatus)
273 * HcInterruptEnable (intrenable)
274 * HcInterruptDisable (intrdisable)
275 */
276#define OHCI_INTR_SO (1 << 0) /* scheduling overrun */
277#define OHCI_INTR_WDH (1 << 1) /* writeback of done_head */
278#define OHCI_INTR_SF (1 << 2) /* start frame */
279#define OHCI_INTR_RD (1 << 3) /* resume detect */
280#define OHCI_INTR_UE (1 << 4) /* unrecoverable error */
281#define OHCI_INTR_FNO (1 << 5) /* frame number overflow */
282#define OHCI_INTR_RHSC (1 << 6) /* root hub status change */
283#define OHCI_INTR_OC (1 << 30) /* ownership change */
284#define OHCI_INTR_MIE (1 << 31) /* master interrupt enable */
285
286
287/* OHCI ROOT HUB REGISTER MASKS */
288
289/* roothub.portstatus [i] bits */
290#define RH_PS_CCS 0x00000001 /* current connect status */
291#define RH_PS_PES 0x00000002 /* port enable status*/
292#define RH_PS_PSS 0x00000004 /* port suspend status */
293#define RH_PS_POCI 0x00000008 /* port over current indicator */
294#define RH_PS_PRS 0x00000010 /* port reset status */
295#define RH_PS_PPS 0x00000100 /* port power status */
296#define RH_PS_LSDA 0x00000200 /* low speed device attached */
297#define RH_PS_CSC 0x00010000 /* connect status change */
298#define RH_PS_PESC 0x00020000 /* port enable status change */
299#define RH_PS_PSSC 0x00040000 /* port suspend status change */
300#define RH_PS_OCIC 0x00080000 /* over current indicator change */
301#define RH_PS_PRSC 0x00100000 /* port reset status change */
302
303/* roothub.status bits */
304#define RH_HS_LPS 0x00000001 /* local power status */
305#define RH_HS_OCI 0x00000002 /* over current indicator */
306#define RH_HS_DRWE 0x00008000 /* device remote wakeup enable */
307#define RH_HS_LPSC 0x00010000 /* local power status change */
308#define RH_HS_OCIC 0x00020000 /* over current indicator change */
309#define RH_HS_CRWE 0x80000000 /* clear remote wakeup enable */
310
311/* roothub.b masks */
312#define RH_B_DR 0x0000ffff /* device removable flags */
313#define RH_B_PPCM 0xffff0000 /* port power control mask */
314
315/* roothub.a masks */
316#define RH_A_NDP (0xff << 0) /* number of downstream ports */
317#define RH_A_PSM (1 << 8) /* power switching mode */
318#define RH_A_NPS (1 << 9) /* no power switching */
319#define RH_A_DT (1 << 10) /* device type (mbz) */
320#define RH_A_OCPM (1 << 11) /* over current protection mode */
321#define RH_A_NOCP (1 << 12) /* no over current protection */
322#define RH_A_POTPGT (0xff << 24) /* power on to power good time */
323
324
325/* hcd-private per-urb state */
326typedef struct urb_priv {
327 struct ed *ed;
328 u16 length; // # tds in this request
329 u16 td_cnt; // tds already serviced
330 struct list_head pending;
331 struct td *td [0]; // all TDs in this request
332
333} urb_priv_t;
334
335#define TD_HASH_SIZE 64 /* power'o'two */
336// sizeof (struct td) ~= 64 == 2^6 ...
337#define TD_HASH_FUNC(td_dma) ((td_dma ^ (td_dma >> 6)) % TD_HASH_SIZE)
338
339
340/*
341 * This is the full ohci controller description
342 *
343 * Note how the "proper" USB information is just
344 * a subset of what the full implementation needs. (Linus)
345 */
346
347struct ohci_hcd {
348 spinlock_t lock;
349
350 /*
351 * I/O memory used to communicate with the HC (dma-consistent)
352 */
353 struct ohci_regs __iomem *regs;
354
355 /*
356 * main memory used to communicate with the HC (dma-consistent).
357 * hcd adds to schedule for a live hc any time, but removals finish
358 * only at the start of the next frame.
359 */
360 struct ohci_hcca *hcca;
361 dma_addr_t hcca_dma;
362
363 struct ed *ed_rm_list; /* to be removed */
364
365 struct ed *ed_bulktail; /* last in bulk list */
366 struct ed *ed_controltail; /* last in ctrl list */
367 struct ed *periodic [NUM_INTS]; /* shadow int_table */
368
369 /*
370 * OTG controllers and transceivers need software interaction;
371 * other external transceivers should be software-transparent
372 */
373 struct otg_transceiver *transceiver;
374 unsigned power_budget;
375
376 /*
377 * memory management for queue data structures
378 */
379 struct dma_pool *td_cache;
380 struct dma_pool *ed_cache;
381 struct td *td_hash [TD_HASH_SIZE];
382 struct list_head pending;
383
384 /*
385 * driver state
386 */
387 int load [NUM_INTS];
388 u32 hc_control; /* copy of hc control reg */
389 unsigned long next_statechange; /* suspend/resume */
390 u32 fminterval; /* saved register */
391
392 struct work_struct rh_resume;
393
394 unsigned long flags; /* for HC bugs */
395#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */
396#define OHCI_QUIRK_SUPERIO 0x02 /* natsemi */
397#define OHCI_QUIRK_INITRESET 0x04 /* SiS, OPTi, ... */
398#define OHCI_BIG_ENDIAN 0x08 /* big endian HC */
399 // there are also chip quirks/bugs in init logic
400
401};
402
403/* convert between an hcd pointer and the corresponding ohci_hcd */
404static inline struct ohci_hcd *hcd_to_ohci (struct usb_hcd *hcd)
405{
406 return (struct ohci_hcd *) (hcd->hcd_priv);
407}
408static inline struct usb_hcd *ohci_to_hcd (const struct ohci_hcd *ohci)
409{
410 return container_of ((void *) ohci, struct usb_hcd, hcd_priv);
411}
412
413/*-------------------------------------------------------------------------*/
414
415#ifndef DEBUG
416#define STUB_DEBUG_FILES
417#endif /* DEBUG */
418
419#define ohci_dbg(ohci, fmt, args...) \
420 dev_dbg (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
421#define ohci_err(ohci, fmt, args...) \
422 dev_err (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
423#define ohci_info(ohci, fmt, args...) \
424 dev_info (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
425#define ohci_warn(ohci, fmt, args...) \
426 dev_warn (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
427
428#ifdef OHCI_VERBOSE_DEBUG
429# define ohci_vdbg ohci_dbg
430#else
431# define ohci_vdbg(ohci, fmt, args...) do { } while (0)
432#endif
433
434/*-------------------------------------------------------------------------*/
435
436/*
437 * While most USB host controllers implement their registers and
438 * in-memory communication descriptors in little-endian format,
439 * a minority (notably the IBM STB04XXX and the Motorola MPC5200
440 * processors) implement them in big endian format.
441 *
442 * This attempts to support either format at compile time without a
443 * runtime penalty, or both formats with the additional overhead
444 * of checking a flag bit.
445 */
446
447#ifdef CONFIG_USB_OHCI_BIG_ENDIAN
448
449#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN
450#define big_endian(ohci) (ohci->flags & OHCI_BIG_ENDIAN) /* either */
451#else
452#define big_endian(ohci) 1 /* only big endian */
453#endif
454
455/*
456 * Big-endian read/write functions are arch-specific.
457 * Other arches can be added if/when they're needed.
458 */
459#if defined(CONFIG_PPC)
460#define readl_be(addr) in_be32((__force unsigned *)addr)
461#define writel_be(val, addr) out_be32((__force unsigned *)addr, val)
462#endif
463
464static inline unsigned int ohci_readl (const struct ohci_hcd *ohci,
465 __hc32 __iomem * regs)
466{
467 return big_endian(ohci) ? readl_be (regs) : readl ((__force u32 *)regs);
468}
469
470static inline void ohci_writel (const struct ohci_hcd *ohci,
471 const unsigned int val, __hc32 __iomem *regs)
472{
473 big_endian(ohci) ? writel_be (val, regs) :
474 writel (val, (__force u32 *)regs);
475}
476
477#else /* !CONFIG_USB_OHCI_BIG_ENDIAN */
478
479#define big_endian(ohci) 0 /* only little endian */
480
481#ifdef CONFIG_ARCH_LH7A404
482 /* Marc Singer: at the time this code was written, the LH7A404
483 * had a problem reading the USB host registers. This
484 * implementation of the ohci_readl function performs the read
485 * twice as a work-around.
486 */
487static inline unsigned int
488ohci_readl (const struct ohci_hcd *ohci, const __hc32 *regs)
489{
490 *(volatile __force unsigned int*) regs;
491 return *(volatile __force unsigned int*) regs;
492}
493#else
494 /* Standard version of ohci_readl uses standard, platform
495 * specific implementation. */
496static inline unsigned int
497ohci_readl (const struct ohci_hcd *ohci, __hc32 __iomem * regs)
498{
499 return readl(regs);
500}
501#endif
502
503static inline void ohci_writel (const struct ohci_hcd *ohci,
504 const unsigned int val, __hc32 __iomem *regs)
505{
506 writel (val, regs);
507}
508
509#endif /* !CONFIG_USB_OHCI_BIG_ENDIAN */
510
511/*-------------------------------------------------------------------------*/
512
513/* cpu to ohci */
514static inline __hc16 cpu_to_hc16 (const struct ohci_hcd *ohci, const u16 x)
515{
516 return big_endian(ohci) ? (__force __hc16)cpu_to_be16(x) : (__force __hc16)cpu_to_le16(x);
517}
518
519static inline __hc16 cpu_to_hc16p (const struct ohci_hcd *ohci, const u16 *x)
520{
521 return big_endian(ohci) ? cpu_to_be16p(x) : cpu_to_le16p(x);
522}
523
524static inline __hc32 cpu_to_hc32 (const struct ohci_hcd *ohci, const u32 x)
525{
526 return big_endian(ohci) ? (__force __hc32)cpu_to_be32(x) : (__force __hc32)cpu_to_le32(x);
527}
528
529static inline __hc32 cpu_to_hc32p (const struct ohci_hcd *ohci, const u32 *x)
530{
531 return big_endian(ohci) ? cpu_to_be32p(x) : cpu_to_le32p(x);
532}
533
534/* ohci to cpu */
535static inline u16 hc16_to_cpu (const struct ohci_hcd *ohci, const __hc16 x)
536{
537 return big_endian(ohci) ? be16_to_cpu((__force __be16)x) : le16_to_cpu((__force __le16)x);
538}
539
540static inline u16 hc16_to_cpup (const struct ohci_hcd *ohci, const __hc16 *x)
541{
542 return big_endian(ohci) ? be16_to_cpup((__force __be16 *)x) : le16_to_cpup((__force __le16 *)x);
543}
544
545static inline u32 hc32_to_cpu (const struct ohci_hcd *ohci, const __hc32 x)
546{
547 return big_endian(ohci) ? be32_to_cpu((__force __be32)x) : le32_to_cpu((__force __le32)x);
548}
549
550static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
551{
552 return big_endian(ohci) ? be32_to_cpup((__force __be32 *)x) : le32_to_cpup((__force __le32 *)x);
553}
554
555/*-------------------------------------------------------------------------*/
556
557/* HCCA frame number is 16 bits, but is accessed as 32 bits since not all
558 * hardware handles 16 bit reads. That creates a different confusion on
559 * some big-endian SOC implementations. Same thing happens with PSW access.
560 */
561
562#ifdef CONFIG_STB03xxx
563#define OHCI_BE_FRAME_NO_SHIFT 16
564#else
565#define OHCI_BE_FRAME_NO_SHIFT 0
566#endif
567
568static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
569{
570 u32 tmp;
571 if (big_endian(ohci)) {
572 tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no);
573 tmp >>= OHCI_BE_FRAME_NO_SHIFT;
574 } else
575 tmp = le32_to_cpup((__force __le32 *)&ohci->hcca->frame_no);
576
577 return (u16)tmp;
578}
579
580static inline __hc16 *ohci_hwPSWp(const struct ohci_hcd *ohci,
581 const struct td *td, int index)
582{
583 return (__hc16 *)(big_endian(ohci) ?
584 &td->hwPSW[index ^ 1] : &td->hwPSW[index]);
585}
586
587static inline u16 ohci_hwPSW(const struct ohci_hcd *ohci,
588 const struct td *td, int index)
589{
590 return hc16_to_cpup(ohci, ohci_hwPSWp(ohci, td, index));
591}
592
593/*-------------------------------------------------------------------------*/
594
595static inline void disable (struct ohci_hcd *ohci)
596{
597 ohci_to_hcd(ohci)->state = HC_STATE_HALT;
598}
599
600#define FI 0x2edf /* 12000 bits per frame (-1) */
601#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
602#define FIT (1 << 31)
603#define LSTHRESH 0x628 /* lowspeed bit threshold */
604
605static void periodic_reinit (struct ohci_hcd *ohci)
606{
607 u32 fi = ohci->fminterval & 0x03fff;
608 u32 fit = ohci_readl(ohci, &ohci->regs->fminterval) & FIT;
609
610 ohci_writel (ohci, (fit ^ FIT) | ohci->fminterval,
611 &ohci->regs->fminterval);
612 ohci_writel (ohci, ((9 * fi) / 10) & 0x3fff,
613 &ohci->regs->periodicstart);
614}
615
616/* AMD-756 (D2 rev) reports corrupt register contents in some cases.
617 * The erratum (#4) description is incorrect. AMD's workaround waits
618 * till some bits (mostly reserved) are clear; ok for all revs.
619 */
620#define read_roothub(hc, register, mask) ({ \
621 u32 temp = ohci_readl (hc, &hc->regs->roothub.register); \
622 if (temp == -1) \
623 disable (hc); \
624 else if (hc->flags & OHCI_QUIRK_AMD756) \
625 while (temp & mask) \
626 temp = ohci_readl (hc, &hc->regs->roothub.register); \
627 temp; })
628
629static u32 roothub_a (struct ohci_hcd *hc)
630 { return read_roothub (hc, a, 0xfc0fe000); }
631static inline u32 roothub_b (struct ohci_hcd *hc)
632 { return ohci_readl (hc, &hc->regs->roothub.b); }
633static inline u32 roothub_status (struct ohci_hcd *hc)
634 { return ohci_readl (hc, &hc->regs->roothub.status); }
635static u32 roothub_portstatus (struct ohci_hcd *hc, int i)
636 { return read_roothub (hc, portstatus [i], 0xffe0fce0); }
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
new file mode 100644
index 000000000000..d309e292198e
--- /dev/null
+++ b/drivers/usb/host/sl811-hcd.c
@@ -0,0 +1,1851 @@
1/*
2 * SL811HS HCD (Host Controller Driver) for USB.
3 *
4 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
5 * Copyright (C) 2004 David Brownell
6 *
7 * Periodic scheduling is based on Roman's OHCI code
8 * Copyright (C) 1999 Roman Weissgaerber
9 *
10 * The SL811HS controller handles host side USB (like the SL11H, but with
11 * another register set and SOF generation) as well as peripheral side USB
12 * (like the SL811S). This driver version doesn't implement the Gadget API
13 * for the peripheral role; or OTG (that'd need much external circuitry).
14 *
15 * For documentation, see the SL811HS spec and the "SL811HS Embedded Host"
16 * document (providing significant pieces missing from that spec); plus
17 * the SL811S spec if you want peripheral side info.
18 */
19
20/*
21 * Status: Passed basic stress testing, works with hubs, mice, keyboards,
22 * and usb-storage.
23 *
24 * TODO:
25 * - usb suspend/resume triggered by sl811 (with USB_SUSPEND)
26 * - various issues noted in the code
27 * - performance work; use both register banks; ...
28 * - use urb->iso_frame_desc[] with ISO transfers
29 */
30
31#undef VERBOSE
32#undef PACKET_TRACE
33
34#include <linux/config.h>
35
36#ifdef CONFIG_USB_DEBUG
37# define DEBUG
38#else
39# undef DEBUG
40#endif
41
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <linux/kernel.h>
45#include <linux/delay.h>
46#include <linux/ioport.h>
47#include <linux/sched.h>
48#include <linux/slab.h>
49#include <linux/smp_lock.h>
50#include <linux/errno.h>
51#include <linux/init.h>
52#include <linux/timer.h>
53#include <linux/list.h>
54#include <linux/interrupt.h>
55#include <linux/usb.h>
56#include <linux/usb_sl811.h>
57
58#include <asm/io.h>
59#include <asm/irq.h>
60#include <asm/system.h>
61#include <asm/byteorder.h>
62
63#include "../core/hcd.h"
64#include "sl811.h"
65
66
67MODULE_DESCRIPTION("SL811HS USB Host Controller Driver");
68MODULE_LICENSE("GPL");
69
70#define DRIVER_VERSION "15 Dec 2004"
71
72
73#ifndef DEBUG
74# define STUB_DEBUG_FILE
75#endif
76
77/* for now, use only one transfer register bank */
78#undef USE_B
79
80/* this doesn't understand urb->iso_frame_desc[], but if you had a driver
81 * that just queued one ISO frame per URB then iso transfers "should" work
82 * using the normal urb status fields.
83 */
84#define DISABLE_ISO
85
86// #define QUIRK2
87#define QUIRK3
88
89static const char hcd_name[] = "sl811-hcd";
90
91/*-------------------------------------------------------------------------*/
92
93static void port_power(struct sl811 *sl811, int is_on)
94{
95 struct usb_hcd *hcd = sl811_to_hcd(sl811);
96
97 /* hub is inactive unless the port is powered */
98 if (is_on) {
99 if (sl811->port1 & (1 << USB_PORT_FEAT_POWER))
100 return;
101
102 sl811->port1 = (1 << USB_PORT_FEAT_POWER);
103 sl811->irq_enable = SL11H_INTMASK_INSRMV;
104 hcd->self.controller->power.power_state = PMSG_ON;
105 } else {
106 sl811->port1 = 0;
107 sl811->irq_enable = 0;
108 hcd->state = HC_STATE_HALT;
109 hcd->self.controller->power.power_state = PMSG_SUSPEND;
110 }
111 sl811->ctrl1 = 0;
112 sl811_write(sl811, SL11H_IRQ_ENABLE, 0);
113 sl811_write(sl811, SL11H_IRQ_STATUS, ~0);
114
115 if (sl811->board && sl811->board->port_power) {
116 /* switch VBUS, at 500mA unless hub power budget gets set */
117 DBG("power %s\n", is_on ? "on" : "off");
118 sl811->board->port_power(hcd->self.controller, is_on);
119 }
120
121 /* reset as thoroughly as we can */
122 if (sl811->board && sl811->board->reset)
123 sl811->board->reset(hcd->self.controller);
124
125 sl811_write(sl811, SL11H_IRQ_ENABLE, 0);
126 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
127 sl811_write(sl811, SL811HS_CTLREG2, SL811HS_CTL2_INIT);
128 sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
129
130 // if !is_on, put into lowpower mode now
131}
132
133/*-------------------------------------------------------------------------*/
134
135/* This is a PIO-only HCD. Queueing appends URBs to the endpoint's queue,
136 * and may start I/O. Endpoint queues are scanned during completion irq
137 * handlers (one per packet: ACK, NAK, faults, etc) and urb cancelation.
138 *
139 * Using an external DMA engine to copy a packet at a time could work,
140 * though setup/teardown costs may be too big to make it worthwhile.
141 */
142
143/* SETUP starts a new control request. Devices are not allowed to
144 * STALL or NAK these; they must cancel any pending control requests.
145 */
146static void setup_packet(
147 struct sl811 *sl811,
148 struct sl811h_ep *ep,
149 struct urb *urb,
150 u8 bank,
151 u8 control
152)
153{
154 u8 addr;
155 u8 len;
156 void __iomem *data_reg;
157
158 addr = SL811HS_PACKET_BUF(bank == 0);
159 len = sizeof(struct usb_ctrlrequest);
160 data_reg = sl811->data_reg;
161 sl811_write_buf(sl811, addr, urb->setup_packet, len);
162
163 /* autoincrementing */
164 sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
165 writeb(len, data_reg);
166 writeb(SL_SETUP /* | ep->epnum */, data_reg);
167 writeb(usb_pipedevice(urb->pipe), data_reg);
168
169 /* always OUT/data0 */ ;
170 sl811_write(sl811, bank + SL11H_HOSTCTLREG,
171 control | SL11H_HCTLMASK_OUT);
172 ep->length = 0;
173 PACKET("SETUP qh%p\n", ep);
174}
175
176/* STATUS finishes control requests, often after IN or OUT data packets */
177static void status_packet(
178 struct sl811 *sl811,
179 struct sl811h_ep *ep,
180 struct urb *urb,
181 u8 bank,
182 u8 control
183)
184{
185 int do_out;
186 void __iomem *data_reg;
187
188 do_out = urb->transfer_buffer_length && usb_pipein(urb->pipe);
189 data_reg = sl811->data_reg;
190
191 /* autoincrementing */
192 sl811_write(sl811, bank + SL11H_BUFADDRREG, 0);
193 writeb(0, data_reg);
194 writeb((do_out ? SL_OUT : SL_IN) /* | ep->epnum */, data_reg);
195 writeb(usb_pipedevice(urb->pipe), data_reg);
196
197 /* always data1; sometimes IN */
198 control |= SL11H_HCTLMASK_TOGGLE;
199 if (do_out)
200 control |= SL11H_HCTLMASK_OUT;
201 sl811_write(sl811, bank + SL11H_HOSTCTLREG, control);
202 ep->length = 0;
203 PACKET("STATUS%s/%s qh%p\n", ep->nak_count ? "/retry" : "",
204 do_out ? "out" : "in", ep);
205}
206
207/* IN packets can be used with any type of endpoint. here we just
208 * start the transfer, data from the peripheral may arrive later.
209 * urb->iso_frame_desc is currently ignored here...
210 */
211static void in_packet(
212 struct sl811 *sl811,
213 struct sl811h_ep *ep,
214 struct urb *urb,
215 u8 bank,
216 u8 control
217)
218{
219 u8 addr;
220 u8 len;
221 void __iomem *data_reg;
222
223 /* avoid losing data on overflow */
224 len = ep->maxpacket;
225 addr = SL811HS_PACKET_BUF(bank == 0);
226 if (!(control & SL11H_HCTLMASK_ISOCH)
227 && usb_gettoggle(urb->dev, ep->epnum, 0))
228 control |= SL11H_HCTLMASK_TOGGLE;
229 data_reg = sl811->data_reg;
230
231 /* autoincrementing */
232 sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
233 writeb(len, data_reg);
234 writeb(SL_IN | ep->epnum, data_reg);
235 writeb(usb_pipedevice(urb->pipe), data_reg);
236
237 sl811_write(sl811, bank + SL11H_HOSTCTLREG, control);
238 ep->length = min((int)len,
239 urb->transfer_buffer_length - urb->actual_length);
240 PACKET("IN%s/%d qh%p len%d\n", ep->nak_count ? "/retry" : "",
241 !!usb_gettoggle(urb->dev, ep->epnum, 0), ep, len);
242}
243
244/* OUT packets can be used with any type of endpoint.
245 * urb->iso_frame_desc is currently ignored here...
246 */
247static void out_packet(
248 struct sl811 *sl811,
249 struct sl811h_ep *ep,
250 struct urb *urb,
251 u8 bank,
252 u8 control
253)
254{
255 void *buf;
256 u8 addr;
257 u8 len;
258 void __iomem *data_reg;
259
260 buf = urb->transfer_buffer + urb->actual_length;
261 prefetch(buf);
262
263 len = min((int)ep->maxpacket,
264 urb->transfer_buffer_length - urb->actual_length);
265
266 if (!(control & SL11H_HCTLMASK_ISOCH)
267 && usb_gettoggle(urb->dev, ep->epnum, 1))
268 control |= SL11H_HCTLMASK_TOGGLE;
269 addr = SL811HS_PACKET_BUF(bank == 0);
270 data_reg = sl811->data_reg;
271
272 sl811_write_buf(sl811, addr, buf, len);
273
274 /* autoincrementing */
275 sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
276 writeb(len, data_reg);
277 writeb(SL_OUT | ep->epnum, data_reg);
278 writeb(usb_pipedevice(urb->pipe), data_reg);
279
280 sl811_write(sl811, bank + SL11H_HOSTCTLREG,
281 control | SL11H_HCTLMASK_OUT);
282 ep->length = len;
283 PACKET("OUT%s/%d qh%p len%d\n", ep->nak_count ? "/retry" : "",
284 !!usb_gettoggle(urb->dev, ep->epnum, 1), ep, len);
285}
286
287/*-------------------------------------------------------------------------*/
288
289/* caller updates on-chip enables later */
290
291static inline void sofirq_on(struct sl811 *sl811)
292{
293 if (sl811->irq_enable & SL11H_INTMASK_SOFINTR)
294 return;
295 VDBG("sof irq on\n");
296 sl811->irq_enable |= SL11H_INTMASK_SOFINTR;
297}
298
299static inline void sofirq_off(struct sl811 *sl811)
300{
301 if (!(sl811->irq_enable & SL11H_INTMASK_SOFINTR))
302 return;
303 VDBG("sof irq off\n");
304 sl811->irq_enable &= ~SL11H_INTMASK_SOFINTR;
305}
306
307/*-------------------------------------------------------------------------*/
308
309/* pick the next endpoint for a transaction, and issue it.
310 * frames start with periodic transfers (after whatever is pending
311 * from the previous frame), and the rest of the time is async
312 * transfers, scheduled round-robin.
313 */
314static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
315{
316 struct sl811h_ep *ep;
317 struct urb *urb;
318 int fclock;
319 u8 control;
320
321 /* use endpoint at schedule head */
322 if (sl811->next_periodic) {
323 ep = sl811->next_periodic;
324 sl811->next_periodic = ep->next;
325 } else {
326 if (sl811->next_async)
327 ep = sl811->next_async;
328 else if (!list_empty(&sl811->async))
329 ep = container_of(sl811->async.next,
330 struct sl811h_ep, schedule);
331 else {
332 /* could set up the first fullspeed periodic
333 * transfer for the next frame ...
334 */
335 return NULL;
336 }
337
338#ifdef USE_B
339 if ((bank && sl811->active_b == ep) || sl811->active_a == ep)
340 return NULL;
341#endif
342
343 if (ep->schedule.next == &sl811->async)
344 sl811->next_async = NULL;
345 else
346 sl811->next_async = container_of(ep->schedule.next,
347 struct sl811h_ep, schedule);
348 }
349
350 if (unlikely(list_empty(&ep->hep->urb_list))) {
351 DBG("empty %p queue?\n", ep);
352 return NULL;
353 }
354
355 urb = container_of(ep->hep->urb_list.next, struct urb, urb_list);
356 control = ep->defctrl;
357
358 /* if this frame doesn't have enough time left to transfer this
359 * packet, wait till the next frame. too-simple algorithm...
360 */
361 fclock = sl811_read(sl811, SL11H_SOFTMRREG) << 6;
362 fclock -= 100; /* setup takes not much time */
363 if (urb->dev->speed == USB_SPEED_LOW) {
364 if (control & SL11H_HCTLMASK_PREAMBLE) {
365 /* also note erratum 1: some hubs won't work */
366 fclock -= 800;
367 }
368 fclock -= ep->maxpacket << 8;
369
370 /* erratum 2: AFTERSOF only works for fullspeed */
371 if (fclock < 0) {
372 if (ep->period)
373 sl811->stat_overrun++;
374 sofirq_on(sl811);
375 return NULL;
376 }
377 } else {
378 fclock -= 12000 / 19; /* 19 64byte packets/msec */
379 if (fclock < 0) {
380 if (ep->period)
381 sl811->stat_overrun++;
382 control |= SL11H_HCTLMASK_AFTERSOF;
383
384 /* throttle bulk/control irq noise */
385 } else if (ep->nak_count)
386 control |= SL11H_HCTLMASK_AFTERSOF;
387 }
388
389
390 switch (ep->nextpid) {
391 case USB_PID_IN:
392 in_packet(sl811, ep, urb, bank, control);
393 break;
394 case USB_PID_OUT:
395 out_packet(sl811, ep, urb, bank, control);
396 break;
397 case USB_PID_SETUP:
398 setup_packet(sl811, ep, urb, bank, control);
399 break;
400 case USB_PID_ACK: /* for control status */
401 status_packet(sl811, ep, urb, bank, control);
402 break;
403 default:
404 DBG("bad ep%p pid %02x\n", ep, ep->nextpid);
405 ep = NULL;
406 }
407 return ep;
408}
409
410#define MIN_JIFFIES ((msecs_to_jiffies(2) > 1) ? msecs_to_jiffies(2) : 2)
411
412static inline void start_transfer(struct sl811 *sl811)
413{
414 if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND))
415 return;
416 if (sl811->active_a == NULL) {
417 sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF));
418 if (sl811->active_a != NULL)
419 sl811->jiffies_a = jiffies + MIN_JIFFIES;
420 }
421#ifdef USE_B
422 if (sl811->active_b == NULL) {
423 sl811->active_b = start(sl811, SL811_EP_B(SL811_HOST_BUF));
424 if (sl811->active_b != NULL)
425 sl811->jiffies_b = jiffies + MIN_JIFFIES;
426 }
427#endif
428}
429
430static void finish_request(
431 struct sl811 *sl811,
432 struct sl811h_ep *ep,
433 struct urb *urb,
434 struct pt_regs *regs,
435 int status
436) __releases(sl811->lock) __acquires(sl811->lock)
437{
438 unsigned i;
439
440 if (usb_pipecontrol(urb->pipe))
441 ep->nextpid = USB_PID_SETUP;
442
443 spin_lock(&urb->lock);
444 if (urb->status == -EINPROGRESS)
445 urb->status = status;
446 spin_unlock(&urb->lock);
447
448 spin_unlock(&sl811->lock);
449 usb_hcd_giveback_urb(sl811_to_hcd(sl811), urb, regs);
450 spin_lock(&sl811->lock);
451
452 /* leave active endpoints in the schedule */
453 if (!list_empty(&ep->hep->urb_list))
454 return;
455
456 /* async deschedule? */
457 if (!list_empty(&ep->schedule)) {
458 list_del_init(&ep->schedule);
459 if (ep == sl811->next_async)
460 sl811->next_async = NULL;
461 return;
462 }
463
464 /* periodic deschedule */
465 DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
466 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
467 struct sl811h_ep *temp;
468 struct sl811h_ep **prev = &sl811->periodic[i];
469
470 while (*prev && ((temp = *prev) != ep))
471 prev = &temp->next;
472 if (*prev)
473 *prev = ep->next;
474 sl811->load[i] -= ep->load;
475 }
476 ep->branch = PERIODIC_SIZE;
477 sl811->periodic_count--;
478 sl811_to_hcd(sl811)->self.bandwidth_allocated
479 -= ep->load / ep->period;
480 if (ep == sl811->next_periodic)
481 sl811->next_periodic = ep->next;
482
483 /* we might turn SOFs back on again for the async schedule */
484 if (sl811->periodic_count == 0)
485 sofirq_off(sl811);
486}
487
488static void
489done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank, struct pt_regs *regs)
490{
491 u8 status;
492 struct urb *urb;
493 int urbstat = -EINPROGRESS;
494
495 if (unlikely(!ep))
496 return;
497
498 status = sl811_read(sl811, bank + SL11H_PKTSTATREG);
499
500 urb = container_of(ep->hep->urb_list.next, struct urb, urb_list);
501
502 /* we can safely ignore NAKs */
503 if (status & SL11H_STATMASK_NAK) {
504 // PACKET("...NAK_%02x qh%p\n", bank, ep);
505 if (!ep->period)
506 ep->nak_count++;
507 ep->error_count = 0;
508
509 /* ACK advances transfer, toggle, and maybe queue */
510 } else if (status & SL11H_STATMASK_ACK) {
511 struct usb_device *udev = urb->dev;
512 int len;
513 unsigned char *buf;
514
515 /* urb->iso_frame_desc is currently ignored here... */
516
517 ep->nak_count = ep->error_count = 0;
518 switch (ep->nextpid) {
519 case USB_PID_OUT:
520 // PACKET("...ACK/out_%02x qh%p\n", bank, ep);
521 urb->actual_length += ep->length;
522 usb_dotoggle(udev, ep->epnum, 1);
523 if (urb->actual_length
524 == urb->transfer_buffer_length) {
525 if (usb_pipecontrol(urb->pipe))
526 ep->nextpid = USB_PID_ACK;
527
528 /* some bulk protocols terminate OUT transfers
529 * by a short packet, using ZLPs not padding.
530 */
531 else if (ep->length < ep->maxpacket
532 || !(urb->transfer_flags
533 & URB_ZERO_PACKET))
534 urbstat = 0;
535 }
536 break;
537 case USB_PID_IN:
538 // PACKET("...ACK/in_%02x qh%p\n", bank, ep);
539 buf = urb->transfer_buffer + urb->actual_length;
540 prefetchw(buf);
541 len = ep->maxpacket - sl811_read(sl811,
542 bank + SL11H_XFERCNTREG);
543 if (len > ep->length) {
544 len = ep->length;
545 urb->status = -EOVERFLOW;
546 }
547 urb->actual_length += len;
548 sl811_read_buf(sl811, SL811HS_PACKET_BUF(bank == 0),
549 buf, len);
550 usb_dotoggle(udev, ep->epnum, 0);
551 if (urb->actual_length == urb->transfer_buffer_length)
552 urbstat = 0;
553 else if (len < ep->maxpacket) {
554 if (urb->transfer_flags & URB_SHORT_NOT_OK)
555 urbstat = -EREMOTEIO;
556 else
557 urbstat = 0;
558 }
559 if (usb_pipecontrol(urb->pipe)
560 && (urbstat == -EREMOTEIO
561 || urbstat == 0)) {
562
563 /* NOTE if the status stage STALLs (why?),
564 * this reports the wrong urb status.
565 */
566 spin_lock(&urb->lock);
567 if (urb->status == -EINPROGRESS)
568 urb->status = urbstat;
569 spin_unlock(&urb->lock);
570
571 urb = NULL;
572 ep->nextpid = USB_PID_ACK;
573 }
574 break;
575 case USB_PID_SETUP:
576 // PACKET("...ACK/setup_%02x qh%p\n", bank, ep);
577 if (urb->transfer_buffer_length == urb->actual_length)
578 ep->nextpid = USB_PID_ACK;
579 else if (usb_pipeout(urb->pipe)) {
580 usb_settoggle(udev, 0, 1, 1);
581 ep->nextpid = USB_PID_OUT;
582 } else {
583 usb_settoggle(udev, 0, 0, 1);
584 ep->nextpid = USB_PID_IN;
585 }
586 break;
587 case USB_PID_ACK:
588 // PACKET("...ACK/status_%02x qh%p\n", bank, ep);
589 urbstat = 0;
590 break;
591 }
592
593 /* STALL stops all transfers */
594 } else if (status & SL11H_STATMASK_STALL) {
595 PACKET("...STALL_%02x qh%p\n", bank, ep);
596 ep->nak_count = ep->error_count = 0;
597 urbstat = -EPIPE;
598
599 /* error? retry, until "3 strikes" */
600 } else if (++ep->error_count >= 3) {
601 if (status & SL11H_STATMASK_TMOUT)
602 urbstat = -ETIMEDOUT;
603 else if (status & SL11H_STATMASK_OVF)
604 urbstat = -EOVERFLOW;
605 else
606 urbstat = -EPROTO;
607 ep->error_count = 0;
608 PACKET("...3STRIKES_%02x %02x qh%p stat %d\n",
609 bank, status, ep, urbstat);
610 }
611
612 if (urb && (urbstat != -EINPROGRESS || urb->status != -EINPROGRESS))
613 finish_request(sl811, ep, urb, regs, urbstat);
614}
615
616static inline u8 checkdone(struct sl811 *sl811)
617{
618 u8 ctl;
619 u8 irqstat = 0;
620
621 if (sl811->active_a && time_before_eq(sl811->jiffies_a, jiffies)) {
622 ctl = sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG));
623 if (ctl & SL11H_HCTLMASK_ARM)
624 sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0);
625 DBG("%s DONE_A: ctrl %02x sts %02x\n",
626 (ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
627 ctl,
628 sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG)));
629 irqstat |= SL11H_INTMASK_DONE_A;
630 }
631#ifdef USE_B
632 if (sl811->active_b && time_before_eq(sl811->jiffies_b, jiffies)) {
633 ctl = sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG));
634 if (ctl & SL11H_HCTLMASK_ARM)
635 sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0);
636 DBG("%s DONE_B: ctrl %02x sts %02x\n",
637 (ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
638 ctl,
639 sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG)));
640 irqstat |= SL11H_INTMASK_DONE_A;
641 }
642#endif
643 return irqstat;
644}
645
646static irqreturn_t sl811h_irq(struct usb_hcd *hcd, struct pt_regs *regs)
647{
648 struct sl811 *sl811 = hcd_to_sl811(hcd);
649 u8 irqstat;
650 irqreturn_t ret = IRQ_NONE;
651 unsigned retries = 5;
652
653 spin_lock(&sl811->lock);
654
655retry:
656 irqstat = sl811_read(sl811, SL11H_IRQ_STATUS) & ~SL11H_INTMASK_DP;
657 if (irqstat) {
658 sl811_write(sl811, SL11H_IRQ_STATUS, irqstat);
659 irqstat &= sl811->irq_enable;
660 }
661
662#ifdef QUIRK2
663 /* this may no longer be necessary ... */
664 if (irqstat == 0 && ret == IRQ_NONE) {
665 irqstat = checkdone(sl811);
666 if (irqstat /* && irq != ~0 */ )
667 sl811->stat_lost++;
668 }
669#endif
670
671 /* USB packets, not necessarily handled in the order they're
672 * issued ... that's fine if they're different endpoints.
673 */
674 if (irqstat & SL11H_INTMASK_DONE_A) {
675 done(sl811, sl811->active_a, SL811_EP_A(SL811_HOST_BUF), regs);
676 sl811->active_a = NULL;
677 sl811->stat_a++;
678 }
679#ifdef USE_B
680 if (irqstat & SL11H_INTMASK_DONE_B) {
681 done(sl811, sl811->active_b, SL811_EP_B(SL811_HOST_BUF), regs);
682 sl811->active_b = NULL;
683 sl811->stat_b++;
684 }
685#endif
686 if (irqstat & SL11H_INTMASK_SOFINTR) {
687 unsigned index;
688
689 index = sl811->frame++ % (PERIODIC_SIZE - 1);
690 sl811->stat_sof++;
691
692 /* be graceful about almost-inevitable periodic schedule
693 * overruns: continue the previous frame's transfers iff
694 * this one has nothing scheduled.
695 */
696 if (sl811->next_periodic) {
697 // ERR("overrun to slot %d\n", index);
698 sl811->stat_overrun++;
699 }
700 if (sl811->periodic[index])
701 sl811->next_periodic = sl811->periodic[index];
702 }
703
704 /* khubd manages debouncing and wakeup */
705 if (irqstat & SL11H_INTMASK_INSRMV) {
706 sl811->stat_insrmv++;
707
708 /* most stats are reset for each VBUS session */
709 sl811->stat_wake = 0;
710 sl811->stat_sof = 0;
711 sl811->stat_a = 0;
712 sl811->stat_b = 0;
713 sl811->stat_lost = 0;
714
715 sl811->ctrl1 = 0;
716 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
717
718 sl811->irq_enable = SL11H_INTMASK_INSRMV;
719 sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
720
721 /* usbcore nukes other pending transactions on disconnect */
722 if (sl811->active_a) {
723 sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0);
724 finish_request(sl811, sl811->active_a,
725 container_of(sl811->active_a->hep->urb_list.next,
726 struct urb, urb_list),
727 NULL, -ESHUTDOWN);
728 sl811->active_a = NULL;
729 }
730#ifdef USE_B
731 if (sl811->active_b) {
732 sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0);
733 finish_request(sl811, sl811->active_b,
734 container_of(sl811->active_b->hep->urb_list.next,
735 struct urb, urb_list),
736 NULL, -ESHUTDOWN);
737 sl811->active_b = NULL;
738 }
739#endif
740
741 /* port status seems wierd until after reset, so
742 * force the reset and make khubd clean up later.
743 */
744 sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION)
745 | (1 << USB_PORT_FEAT_CONNECTION);
746
747 } else if (irqstat & SL11H_INTMASK_RD) {
748 if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) {
749 DBG("wakeup\n");
750 sl811->port1 |= 1 << USB_PORT_FEAT_C_SUSPEND;
751 sl811->stat_wake++;
752 } else
753 irqstat &= ~SL11H_INTMASK_RD;
754 }
755
756 if (irqstat) {
757 if (sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
758 start_transfer(sl811);
759 ret = IRQ_HANDLED;
760 if (retries--)
761 goto retry;
762 }
763
764 if (sl811->periodic_count == 0 && list_empty(&sl811->async))
765 sofirq_off(sl811);
766 sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
767
768 spin_unlock(&sl811->lock);
769
770 return ret;
771}
772
773/*-------------------------------------------------------------------------*/
774
775/* usb 1.1 says max 90% of a frame is available for periodic transfers.
776 * this driver doesn't promise that much since it's got to handle an
777 * IRQ per packet; irq handling latencies also use up that time.
778 */
779#define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */
780
781static int balance(struct sl811 *sl811, u16 period, u16 load)
782{
783 int i, branch = -ENOSPC;
784
785 /* search for the least loaded schedule branch of that period
786 * which has enough bandwidth left unreserved.
787 */
788 for (i = 0; i < period ; i++) {
789 if (branch < 0 || sl811->load[branch] > sl811->load[i]) {
790 int j;
791
792 for (j = i; j < PERIODIC_SIZE; j += period) {
793 if ((sl811->load[j] + load)
794 > MAX_PERIODIC_LOAD)
795 break;
796 }
797 if (j < PERIODIC_SIZE)
798 continue;
799 branch = i;
800 }
801 }
802 return branch;
803}
804
805/*-------------------------------------------------------------------------*/
806
807static int sl811h_urb_enqueue(
808 struct usb_hcd *hcd,
809 struct usb_host_endpoint *hep,
810 struct urb *urb,
811 int mem_flags
812) {
813 struct sl811 *sl811 = hcd_to_sl811(hcd);
814 struct usb_device *udev = urb->dev;
815 unsigned int pipe = urb->pipe;
816 int is_out = !usb_pipein(pipe);
817 int type = usb_pipetype(pipe);
818 int epnum = usb_pipeendpoint(pipe);
819 struct sl811h_ep *ep = NULL;
820 unsigned long flags;
821 int i;
822 int retval = 0;
823
824#ifdef DISABLE_ISO
825 if (type == PIPE_ISOCHRONOUS)
826 return -ENOSPC;
827#endif
828
829 /* avoid all allocations within spinlocks */
830 if (!hep->hcpriv)
831 ep = kcalloc(1, sizeof *ep, mem_flags);
832
833 spin_lock_irqsave(&sl811->lock, flags);
834
835 /* don't submit to a dead or disabled port */
836 if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
837 || !HC_IS_RUNNING(hcd->state)) {
838 retval = -ENODEV;
839 goto fail;
840 }
841
842 if (hep->hcpriv) {
843 kfree(ep);
844 ep = hep->hcpriv;
845 } else if (!ep) {
846 retval = -ENOMEM;
847 goto fail;
848
849 } else {
850 INIT_LIST_HEAD(&ep->schedule);
851 ep->udev = usb_get_dev(udev);
852 ep->epnum = epnum;
853 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
854 ep->defctrl = SL11H_HCTLMASK_ARM | SL11H_HCTLMASK_ENABLE;
855 usb_settoggle(udev, epnum, is_out, 0);
856
857 if (type == PIPE_CONTROL)
858 ep->nextpid = USB_PID_SETUP;
859 else if (is_out)
860 ep->nextpid = USB_PID_OUT;
861 else
862 ep->nextpid = USB_PID_IN;
863
864 if (ep->maxpacket > H_MAXPACKET) {
865 /* iso packets up to 240 bytes could work... */
866 DBG("dev %d ep%d maxpacket %d\n",
867 udev->devnum, epnum, ep->maxpacket);
868 retval = -EINVAL;
869 goto fail;
870 }
871
872 if (udev->speed == USB_SPEED_LOW) {
873 /* send preamble for external hub? */
874 if (!(sl811->ctrl1 & SL11H_CTL1MASK_LSPD))
875 ep->defctrl |= SL11H_HCTLMASK_PREAMBLE;
876 }
877 switch (type) {
878 case PIPE_ISOCHRONOUS:
879 case PIPE_INTERRUPT:
880 if (urb->interval > PERIODIC_SIZE)
881 urb->interval = PERIODIC_SIZE;
882 ep->period = urb->interval;
883 ep->branch = PERIODIC_SIZE;
884 if (type == PIPE_ISOCHRONOUS)
885 ep->defctrl |= SL11H_HCTLMASK_ISOCH;
886 ep->load = usb_calc_bus_time(udev->speed, !is_out,
887 (type == PIPE_ISOCHRONOUS),
888 usb_maxpacket(udev, pipe, is_out))
889 / 1000;
890 break;
891 }
892
893 hep->hcpriv = ep;
894 }
895
896 /* maybe put endpoint into schedule */
897 switch (type) {
898 case PIPE_CONTROL:
899 case PIPE_BULK:
900 if (list_empty(&ep->schedule))
901 list_add_tail(&ep->schedule, &sl811->async);
902 break;
903 case PIPE_ISOCHRONOUS:
904 case PIPE_INTERRUPT:
905 urb->interval = ep->period;
906 if (ep->branch < PERIODIC_SIZE)
907 break;
908
909 retval = balance(sl811, ep->period, ep->load);
910 if (retval < 0)
911 goto fail;
912 ep->branch = retval;
913 retval = 0;
914 urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
915 + ep->branch;
916
917 /* sort each schedule branch by period (slow before fast)
918 * to share the faster parts of the tree without needing
919 * dummy/placeholder nodes
920 */
921 DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
922 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
923 struct sl811h_ep **prev = &sl811->periodic[i];
924 struct sl811h_ep *here = *prev;
925
926 while (here && ep != here) {
927 if (ep->period > here->period)
928 break;
929 prev = &here->next;
930 here = *prev;
931 }
932 if (ep != here) {
933 ep->next = here;
934 *prev = ep;
935 }
936 sl811->load[i] += ep->load;
937 }
938 sl811->periodic_count++;
939 hcd->self.bandwidth_allocated += ep->load / ep->period;
940 sofirq_on(sl811);
941 }
942
943 /* in case of unlink-during-submit */
944 spin_lock(&urb->lock);
945 if (urb->status != -EINPROGRESS) {
946 spin_unlock(&urb->lock);
947 finish_request(sl811, ep, urb, NULL, 0);
948 retval = 0;
949 goto fail;
950 }
951 urb->hcpriv = hep;
952 spin_unlock(&urb->lock);
953
954 start_transfer(sl811);
955 sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
956fail:
957 spin_unlock_irqrestore(&sl811->lock, flags);
958 return retval;
959}
960
961static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
962{
963 struct sl811 *sl811 = hcd_to_sl811(hcd);
964 struct usb_host_endpoint *hep = urb->hcpriv;
965 unsigned long flags;
966 struct sl811h_ep *ep;
967 int retval = 0;
968
969 if (!hep)
970 return -EINVAL;
971
972 spin_lock_irqsave(&sl811->lock, flags);
973 ep = hep->hcpriv;
974 if (ep) {
975 /* finish right away if this urb can't be active ...
976 * note that some drivers wrongly expect delays
977 */
978 if (ep->hep->urb_list.next != &urb->urb_list) {
979 /* not front of queue? never active */
980
981 /* for active transfers, we expect an IRQ */
982 } else if (sl811->active_a == ep) {
983 if (time_before_eq(sl811->jiffies_a, jiffies)) {
984 /* happens a lot with lowspeed?? */
985 DBG("giveup on DONE_A: ctrl %02x sts %02x\n",
986 sl811_read(sl811,
987 SL811_EP_A(SL11H_HOSTCTLREG)),
988 sl811_read(sl811,
989 SL811_EP_A(SL11H_PKTSTATREG)));
990 sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG),
991 0);
992 sl811->active_a = NULL;
993 } else
994 urb = NULL;
995#ifdef USE_B
996 } else if (sl811->active_b == ep) {
997 if (time_before_eq(sl811->jiffies_a, jiffies)) {
998 /* happens a lot with lowspeed?? */
999 DBG("giveup on DONE_B: ctrl %02x sts %02x\n",
1000 sl811_read(sl811,
1001 SL811_EP_B(SL11H_HOSTCTLREG)),
1002 sl811_read(sl811,
1003 SL811_EP_B(SL11H_PKTSTATREG)));
1004 sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG),
1005 0);
1006 sl811->active_b = NULL;
1007 } else
1008 urb = NULL;
1009#endif
1010 } else {
1011 /* front of queue for inactive endpoint */
1012 }
1013
1014 if (urb)
1015 finish_request(sl811, ep, urb, NULL, 0);
1016 else
1017 VDBG("dequeue, urb %p active %s; wait4irq\n", urb,
1018 (sl811->active_a == ep) ? "A" : "B");
1019 } else
1020 retval = -EINVAL;
1021 spin_unlock_irqrestore(&sl811->lock, flags);
1022 return retval;
1023}
1024
1025static void
1026sl811h_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1027{
1028 struct sl811h_ep *ep = hep->hcpriv;
1029
1030 if (!ep)
1031 return;
1032
1033 /* assume we'd just wait for the irq */
1034 if (!list_empty(&hep->urb_list))
1035 msleep(3);
1036 if (!list_empty(&hep->urb_list))
1037 WARN("ep %p not empty?\n", ep);
1038
1039 usb_put_dev(ep->udev);
1040 kfree(ep);
1041 hep->hcpriv = NULL;
1042}
1043
1044static int
1045sl811h_get_frame(struct usb_hcd *hcd)
1046{
1047 struct sl811 *sl811 = hcd_to_sl811(hcd);
1048
1049 /* wrong except while periodic transfers are scheduled;
1050 * never matches the on-the-wire frame;
1051 * subject to overruns.
1052 */
1053 return sl811->frame;
1054}
1055
1056
1057/*-------------------------------------------------------------------------*/
1058
1059/* the virtual root hub timer IRQ checks for hub status */
1060static int
1061sl811h_hub_status_data(struct usb_hcd *hcd, char *buf)
1062{
1063 struct sl811 *sl811 = hcd_to_sl811(hcd);
1064#ifdef QUIRK3
1065 unsigned long flags;
1066
1067 /* non-SMP HACK: use root hub timer as i/o watchdog
1068 * this seems essential when SOF IRQs aren't in use...
1069 */
1070 local_irq_save(flags);
1071 if (!timer_pending(&sl811->timer)) {
1072 if (sl811h_irq( /* ~0, */ hcd, NULL) != IRQ_NONE)
1073 sl811->stat_lost++;
1074 }
1075 local_irq_restore(flags);
1076#endif
1077
1078 if (!(sl811->port1 & (0xffff << 16)))
1079 return 0;
1080
1081 /* tell khubd port 1 changed */
1082 *buf = (1 << 1);
1083 return 1;
1084}
1085
1086static void
1087sl811h_hub_descriptor (
1088 struct sl811 *sl811,
1089 struct usb_hub_descriptor *desc
1090) {
1091 u16 temp = 0;
1092
1093 desc->bDescriptorType = 0x29;
1094 desc->bHubContrCurrent = 0;
1095
1096 desc->bNbrPorts = 1;
1097 desc->bDescLength = 9;
1098
1099 /* per-port power switching (gang of one!), or none */
1100 desc->bPwrOn2PwrGood = 0;
1101 if (sl811->board && sl811->board->port_power) {
1102 desc->bPwrOn2PwrGood = sl811->board->potpg;
1103 if (!desc->bPwrOn2PwrGood)
1104 desc->bPwrOn2PwrGood = 10;
1105 temp = 0x0001;
1106 } else
1107 temp = 0x0002;
1108
1109 /* no overcurrent errors detection/handling */
1110 temp |= 0x0010;
1111
1112 desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
1113
1114 /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
1115 desc->bitmap[0] = 1 << 1;
1116 desc->bitmap[1] = ~0;
1117}
1118
1119static void
1120sl811h_timer(unsigned long _sl811)
1121{
1122 struct sl811 *sl811 = (void *) _sl811;
1123 unsigned long flags;
1124 u8 irqstat;
1125 u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
1126 const u32 mask = (1 << USB_PORT_FEAT_CONNECTION)
1127 | (1 << USB_PORT_FEAT_ENABLE)
1128 | (1 << USB_PORT_FEAT_LOWSPEED);
1129
1130 spin_lock_irqsave(&sl811->lock, flags);
1131
1132 /* stop special signaling */
1133 sl811->ctrl1 &= ~SL11H_CTL1MASK_FORCE;
1134 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
1135 udelay(3);
1136
1137 irqstat = sl811_read(sl811, SL11H_IRQ_STATUS);
1138
1139 switch (signaling) {
1140 case SL11H_CTL1MASK_SE0:
1141 DBG("end reset\n");
1142 sl811->port1 = (1 << USB_PORT_FEAT_C_RESET)
1143 | (1 << USB_PORT_FEAT_POWER);
1144 sl811->ctrl1 = 0;
1145 /* don't wrongly ack RD */
1146 if (irqstat & SL11H_INTMASK_INSRMV)
1147 irqstat &= ~SL11H_INTMASK_RD;
1148 break;
1149 case SL11H_CTL1MASK_K:
1150 DBG("end resume\n");
1151 sl811->port1 &= ~(1 << USB_PORT_FEAT_SUSPEND);
1152 break;
1153 default:
1154 DBG("odd timer signaling: %02x\n", signaling);
1155 break;
1156 }
1157 sl811_write(sl811, SL11H_IRQ_STATUS, irqstat);
1158
1159 if (irqstat & SL11H_INTMASK_RD) {
1160 /* usbcore nukes all pending transactions on disconnect */
1161 if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION))
1162 sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION)
1163 | (1 << USB_PORT_FEAT_C_ENABLE);
1164 sl811->port1 &= ~mask;
1165 sl811->irq_enable = SL11H_INTMASK_INSRMV;
1166 } else {
1167 sl811->port1 |= mask;
1168 if (irqstat & SL11H_INTMASK_DP)
1169 sl811->port1 &= ~(1 << USB_PORT_FEAT_LOWSPEED);
1170 sl811->irq_enable = SL11H_INTMASK_INSRMV | SL11H_INTMASK_RD;
1171 }
1172
1173 if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION)) {
1174 u8 ctrl2 = SL811HS_CTL2_INIT;
1175
1176 sl811->irq_enable |= SL11H_INTMASK_DONE_A;
1177#ifdef USE_B
1178 sl811->irq_enable |= SL11H_INTMASK_DONE_B;
1179#endif
1180 if (sl811->port1 & (1 << USB_PORT_FEAT_LOWSPEED)) {
1181 sl811->ctrl1 |= SL11H_CTL1MASK_LSPD;
1182 ctrl2 |= SL811HS_CTL2MASK_DSWAP;
1183 }
1184
1185 /* start SOFs flowing, kickstarting with A registers */
1186 sl811->ctrl1 |= SL11H_CTL1MASK_SOF_ENA;
1187 sl811_write(sl811, SL11H_SOFLOWREG, 0xe0);
1188 sl811_write(sl811, SL811HS_CTLREG2, ctrl2);
1189
1190 /* autoincrementing */
1191 sl811_write(sl811, SL811_EP_A(SL11H_BUFLNTHREG), 0);
1192 writeb(SL_SOF, sl811->data_reg);
1193 writeb(0, sl811->data_reg);
1194 sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG),
1195 SL11H_HCTLMASK_ARM);
1196
1197 /* khubd provides debounce delay */
1198 } else {
1199 sl811->ctrl1 = 0;
1200 }
1201 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
1202
1203 /* reenable irqs */
1204 sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
1205 spin_unlock_irqrestore(&sl811->lock, flags);
1206}
1207
1208static int
1209sl811h_hub_control(
1210 struct usb_hcd *hcd,
1211 u16 typeReq,
1212 u16 wValue,
1213 u16 wIndex,
1214 char *buf,
1215 u16 wLength
1216) {
1217 struct sl811 *sl811 = hcd_to_sl811(hcd);
1218 int retval = 0;
1219 unsigned long flags;
1220
1221 spin_lock_irqsave(&sl811->lock, flags);
1222
1223 switch (typeReq) {
1224 case ClearHubFeature:
1225 case SetHubFeature:
1226 switch (wValue) {
1227 case C_HUB_OVER_CURRENT:
1228 case C_HUB_LOCAL_POWER:
1229 break;
1230 default:
1231 goto error;
1232 }
1233 break;
1234 case ClearPortFeature:
1235 if (wIndex != 1 || wLength != 0)
1236 goto error;
1237
1238 switch (wValue) {
1239 case USB_PORT_FEAT_ENABLE:
1240 sl811->port1 &= (1 << USB_PORT_FEAT_POWER);
1241 sl811->ctrl1 = 0;
1242 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
1243 sl811->irq_enable = SL11H_INTMASK_INSRMV;
1244 sl811_write(sl811, SL11H_IRQ_ENABLE,
1245 sl811->irq_enable);
1246 break;
1247 case USB_PORT_FEAT_SUSPEND:
1248 if (!(sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)))
1249 break;
1250
1251 /* 20 msec of resume/K signaling, other irqs blocked */
1252 DBG("start resume...\n");
1253 sl811->irq_enable = 0;
1254 sl811_write(sl811, SL11H_IRQ_ENABLE,
1255 sl811->irq_enable);
1256 sl811->ctrl1 |= SL11H_CTL1MASK_K;
1257 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
1258
1259 mod_timer(&sl811->timer, jiffies
1260 + msecs_to_jiffies(20));
1261 break;
1262 case USB_PORT_FEAT_POWER:
1263 port_power(sl811, 0);
1264 break;
1265 case USB_PORT_FEAT_C_ENABLE:
1266 case USB_PORT_FEAT_C_SUSPEND:
1267 case USB_PORT_FEAT_C_CONNECTION:
1268 case USB_PORT_FEAT_C_OVER_CURRENT:
1269 case USB_PORT_FEAT_C_RESET:
1270 break;
1271 default:
1272 goto error;
1273 }
1274 sl811->port1 &= ~(1 << wValue);
1275 break;
1276 case GetHubDescriptor:
1277 sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
1278 break;
1279 case GetHubStatus:
1280 *(__le32 *) buf = cpu_to_le32(0);
1281 break;
1282 case GetPortStatus:
1283 if (wIndex != 1)
1284 goto error;
1285 *(__le32 *) buf = cpu_to_le32(sl811->port1);
1286
1287#ifndef VERBOSE
1288 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
1289#endif
1290 DBG("GetPortStatus %08x\n", sl811->port1);
1291 break;
1292 case SetPortFeature:
1293 if (wIndex != 1 || wLength != 0)
1294 goto error;
1295 switch (wValue) {
1296 case USB_PORT_FEAT_SUSPEND:
1297 if (sl811->port1 & (1 << USB_PORT_FEAT_RESET))
1298 goto error;
1299 if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)))
1300 goto error;
1301
1302 DBG("suspend...\n");
1303 sl811->ctrl1 &= ~SL11H_CTL1MASK_SOF_ENA;
1304 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
1305 break;
1306 case USB_PORT_FEAT_POWER:
1307 port_power(sl811, 1);
1308 break;
1309 case USB_PORT_FEAT_RESET:
1310 if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND))
1311 goto error;
1312 if (!(sl811->port1 & (1 << USB_PORT_FEAT_POWER)))
1313 break;
1314
1315 /* 50 msec of reset/SE0 signaling, irqs blocked */
1316 sl811->irq_enable = 0;
1317 sl811_write(sl811, SL11H_IRQ_ENABLE,
1318 sl811->irq_enable);
1319 sl811->ctrl1 = SL11H_CTL1MASK_SE0;
1320 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
1321 sl811->port1 |= (1 << USB_PORT_FEAT_RESET);
1322 mod_timer(&sl811->timer, jiffies
1323 + msecs_to_jiffies(50));
1324 break;
1325 default:
1326 goto error;
1327 }
1328 sl811->port1 |= 1 << wValue;
1329 break;
1330
1331 default:
1332error:
1333 /* "protocol stall" on error */
1334 retval = -EPIPE;
1335 }
1336
1337 spin_unlock_irqrestore(&sl811->lock, flags);
1338 return retval;
1339}
1340
1341#ifdef CONFIG_PM
1342
1343static int
1344sl811h_hub_suspend(struct usb_hcd *hcd)
1345{
1346 // SOFs off
1347 DBG("%s\n", __FUNCTION__);
1348 return 0;
1349}
1350
1351static int
1352sl811h_hub_resume(struct usb_hcd *hcd)
1353{
1354 // SOFs on
1355 DBG("%s\n", __FUNCTION__);
1356 return 0;
1357}
1358
1359#else
1360
1361#define sl811h_hub_suspend NULL
1362#define sl811h_hub_resume NULL
1363
1364#endif
1365
1366
1367/*-------------------------------------------------------------------------*/
1368
1369#ifdef STUB_DEBUG_FILE
1370
1371static inline void create_debug_file(struct sl811 *sl811) { }
1372static inline void remove_debug_file(struct sl811 *sl811) { }
1373
1374#else
1375
1376#include <linux/proc_fs.h>
1377#include <linux/seq_file.h>
1378
1379static void dump_irq(struct seq_file *s, char *label, u8 mask)
1380{
1381 seq_printf(s, "%s %02x%s%s%s%s%s%s\n", label, mask,
1382 (mask & SL11H_INTMASK_DONE_A) ? " done_a" : "",
1383 (mask & SL11H_INTMASK_DONE_B) ? " done_b" : "",
1384 (mask & SL11H_INTMASK_SOFINTR) ? " sof" : "",
1385 (mask & SL11H_INTMASK_INSRMV) ? " ins/rmv" : "",
1386 (mask & SL11H_INTMASK_RD) ? " rd" : "",
1387 (mask & SL11H_INTMASK_DP) ? " dp" : "");
1388}
1389
1390static int proc_sl811h_show(struct seq_file *s, void *unused)
1391{
1392 struct sl811 *sl811 = s->private;
1393 struct sl811h_ep *ep;
1394 unsigned i;
1395
1396 seq_printf(s, "%s\n%s version %s\nportstatus[1] = %08x\n",
1397 sl811_to_hcd(sl811)->product_desc,
1398 hcd_name, DRIVER_VERSION,
1399 sl811->port1);
1400
1401 seq_printf(s, "insert/remove: %ld\n", sl811->stat_insrmv);
1402 seq_printf(s, "current session: done_a %ld done_b %ld "
1403 "wake %ld sof %ld overrun %ld lost %ld\n\n",
1404 sl811->stat_a, sl811->stat_b,
1405 sl811->stat_wake, sl811->stat_sof,
1406 sl811->stat_overrun, sl811->stat_lost);
1407
1408 spin_lock_irq(&sl811->lock);
1409
1410 if (sl811->ctrl1 & SL11H_CTL1MASK_SUSPEND)
1411 seq_printf(s, "(suspended)\n\n");
1412 else {
1413 u8 t = sl811_read(sl811, SL11H_CTLREG1);
1414
1415 seq_printf(s, "ctrl1 %02x%s%s%s%s\n", t,
1416 (t & SL11H_CTL1MASK_SOF_ENA) ? " sofgen" : "",
1417 ({char *s; switch (t & SL11H_CTL1MASK_FORCE) {
1418 case SL11H_CTL1MASK_NORMAL: s = ""; break;
1419 case SL11H_CTL1MASK_SE0: s = " se0/reset"; break;
1420 case SL11H_CTL1MASK_K: s = " k/resume"; break;
1421 default: s = "j"; break;
1422 }; s; }),
1423 (t & SL11H_CTL1MASK_LSPD) ? " lowspeed" : "",
1424 (t & SL11H_CTL1MASK_SUSPEND) ? " suspend" : "");
1425
1426 dump_irq(s, "irq_enable",
1427 sl811_read(sl811, SL11H_IRQ_ENABLE));
1428 dump_irq(s, "irq_status",
1429 sl811_read(sl811, SL11H_IRQ_STATUS));
1430 seq_printf(s, "frame clocks remaining: %d\n",
1431 sl811_read(sl811, SL11H_SOFTMRREG) << 6);
1432 }
1433
1434 seq_printf(s, "A: qh%p ctl %02x sts %02x\n", sl811->active_a,
1435 sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG)),
1436 sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG)));
1437 seq_printf(s, "B: qh%p ctl %02x sts %02x\n", sl811->active_b,
1438 sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG)),
1439 sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG)));
1440 seq_printf(s, "\n");
1441 list_for_each_entry (ep, &sl811->async, schedule) {
1442 struct urb *urb;
1443
1444 seq_printf(s, "%s%sqh%p, ep%d%s, maxpacket %d"
1445 " nak %d err %d\n",
1446 (ep == sl811->active_a) ? "(A) " : "",
1447 (ep == sl811->active_b) ? "(B) " : "",
1448 ep, ep->epnum,
1449 ({ char *s; switch (ep->nextpid) {
1450 case USB_PID_IN: s = "in"; break;
1451 case USB_PID_OUT: s = "out"; break;
1452 case USB_PID_SETUP: s = "setup"; break;
1453 case USB_PID_ACK: s = "status"; break;
1454 default: s = "?"; break;
1455 }; s;}),
1456 ep->maxpacket,
1457 ep->nak_count, ep->error_count);
1458 list_for_each_entry (urb, &ep->hep->urb_list, urb_list) {
1459 seq_printf(s, " urb%p, %d/%d\n", urb,
1460 urb->actual_length,
1461 urb->transfer_buffer_length);
1462 }
1463 }
1464 if (!list_empty(&sl811->async))
1465 seq_printf(s, "\n");
1466
1467 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
1468
1469 for (i = 0; i < PERIODIC_SIZE; i++) {
1470 ep = sl811->periodic[i];
1471 if (!ep)
1472 continue;
1473 seq_printf(s, "%2d [%3d]:\n", i, sl811->load[i]);
1474
1475 /* DUMB: prints shared entries multiple times */
1476 do {
1477 seq_printf(s,
1478 " %s%sqh%d/%p (%sdev%d ep%d%s max %d) "
1479 "err %d\n",
1480 (ep == sl811->active_a) ? "(A) " : "",
1481 (ep == sl811->active_b) ? "(B) " : "",
1482 ep->period, ep,
1483 (ep->udev->speed == USB_SPEED_FULL)
1484 ? "" : "ls ",
1485 ep->udev->devnum, ep->epnum,
1486 (ep->epnum == 0) ? ""
1487 : ((ep->nextpid == USB_PID_IN)
1488 ? "in"
1489 : "out"),
1490 ep->maxpacket, ep->error_count);
1491 ep = ep->next;
1492 } while (ep);
1493 }
1494
1495 spin_unlock_irq(&sl811->lock);
1496 seq_printf(s, "\n");
1497
1498 return 0;
1499}
1500
1501static int proc_sl811h_open(struct inode *inode, struct file *file)
1502{
1503 return single_open(file, proc_sl811h_show, PDE(inode)->data);
1504}
1505
1506static struct file_operations proc_ops = {
1507 .open = proc_sl811h_open,
1508 .read = seq_read,
1509 .llseek = seq_lseek,
1510 .release = single_release,
1511};
1512
1513/* expect just one sl811 per system */
1514static const char proc_filename[] = "driver/sl811h";
1515
1516static void create_debug_file(struct sl811 *sl811)
1517{
1518 struct proc_dir_entry *pde;
1519
1520 pde = create_proc_entry(proc_filename, 0, NULL);
1521 if (pde == NULL)
1522 return;
1523
1524 pde->proc_fops = &proc_ops;
1525 pde->data = sl811;
1526 sl811->pde = pde;
1527}
1528
1529static void remove_debug_file(struct sl811 *sl811)
1530{
1531 if (sl811->pde)
1532 remove_proc_entry(proc_filename, NULL);
1533}
1534
1535#endif
1536
1537/*-------------------------------------------------------------------------*/
1538
1539static void
1540sl811h_stop(struct usb_hcd *hcd)
1541{
1542 struct sl811 *sl811 = hcd_to_sl811(hcd);
1543 unsigned long flags;
1544
1545 del_timer_sync(&hcd->rh_timer);
1546
1547 spin_lock_irqsave(&sl811->lock, flags);
1548 port_power(sl811, 0);
1549 spin_unlock_irqrestore(&sl811->lock, flags);
1550}
1551
1552static int
1553sl811h_start(struct usb_hcd *hcd)
1554{
1555 struct sl811 *sl811 = hcd_to_sl811(hcd);
1556 struct usb_device *udev;
1557
1558 /* chip has been reset, VBUS power is off */
1559
1560 udev = usb_alloc_dev(NULL, &hcd->self, 0);
1561 if (!udev)
1562 return -ENOMEM;
1563
1564 udev->speed = USB_SPEED_FULL;
1565 hcd->state = HC_STATE_RUNNING;
1566
1567 if (sl811->board)
1568 hcd->can_wakeup = sl811->board->can_wakeup;
1569
1570 if (usb_hcd_register_root_hub(udev, hcd) != 0) {
1571 usb_put_dev(udev);
1572 sl811h_stop(hcd);
1573 return -ENODEV;
1574 }
1575
1576 if (sl811->board && sl811->board->power)
1577 hub_set_power_budget(udev, sl811->board->power * 2);
1578
1579 return 0;
1580}
1581
1582/*-------------------------------------------------------------------------*/
1583
1584static struct hc_driver sl811h_hc_driver = {
1585 .description = hcd_name,
1586 .hcd_priv_size = sizeof(struct sl811),
1587
1588 /*
1589 * generic hardware linkage
1590 */
1591 .irq = sl811h_irq,
1592 .flags = HCD_USB11 | HCD_MEMORY,
1593
1594 /* Basic lifecycle operations */
1595 .start = sl811h_start,
1596 .stop = sl811h_stop,
1597
1598 /*
1599 * managing i/o requests and associated device resources
1600 */
1601 .urb_enqueue = sl811h_urb_enqueue,
1602 .urb_dequeue = sl811h_urb_dequeue,
1603 .endpoint_disable = sl811h_endpoint_disable,
1604
1605 /*
1606 * periodic schedule support
1607 */
1608 .get_frame_number = sl811h_get_frame,
1609
1610 /*
1611 * root hub support
1612 */
1613 .hub_status_data = sl811h_hub_status_data,
1614 .hub_control = sl811h_hub_control,
1615 .hub_suspend = sl811h_hub_suspend,
1616 .hub_resume = sl811h_hub_resume,
1617};
1618
1619/*-------------------------------------------------------------------------*/
1620
1621static int __init_or_module
1622sl811h_remove(struct device *dev)
1623{
1624 struct usb_hcd *hcd = dev_get_drvdata(dev);
1625 struct sl811 *sl811 = hcd_to_sl811(hcd);
1626 struct platform_device *pdev;
1627 struct resource *res;
1628
1629 pdev = container_of(dev, struct platform_device, dev);
1630
1631 remove_debug_file(sl811);
1632 usb_remove_hcd(hcd);
1633
1634 iounmap(sl811->data_reg);
1635 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1636 release_mem_region(res->start, 1);
1637
1638 iounmap(sl811->addr_reg);
1639 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1640 release_mem_region(res->start, 1);
1641
1642 usb_put_hcd(hcd);
1643 return 0;
1644}
1645
1646#define resource_len(r) (((r)->end - (r)->start) + 1)
1647
1648static int __init
1649sl811h_probe(struct device *dev)
1650{
1651 struct usb_hcd *hcd;
1652 struct sl811 *sl811;
1653 struct platform_device *pdev;
1654 struct resource *addr, *data;
1655 int irq;
1656 void __iomem *addr_reg;
1657 void __iomem *data_reg;
1658 int retval;
1659 u8 tmp;
1660
1661 /* basic sanity checks first. board-specific init logic should
1662 * have initialized these three resources and probably board
1663 * specific platform_data. we don't probe for IRQs, and do only
1664 * minimal sanity checking.
1665 */
1666 pdev = container_of(dev, struct platform_device, dev);
1667 if (pdev->num_resources < 3)
1668 return -ENODEV;
1669
1670 addr = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1671 data = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1672 irq = platform_get_irq(pdev, 0);
1673 if (!addr || !data || irq < 0)
1674 return -ENODEV;
1675
1676 /* refuse to confuse usbcore */
1677 if (dev->dma_mask) {
1678 DBG("no we won't dma\n");
1679 return -EINVAL;
1680 }
1681
1682 if (!request_mem_region(addr->start, 1, hcd_name)) {
1683 retval = -EBUSY;
1684 goto err1;
1685 }
1686 addr_reg = ioremap(addr->start, resource_len(addr));
1687 if (addr_reg == NULL) {
1688 retval = -ENOMEM;
1689 goto err2;
1690 }
1691
1692 if (!request_mem_region(data->start, 1, hcd_name)) {
1693 retval = -EBUSY;
1694 goto err3;
1695 }
1696 data_reg = ioremap(data->start, resource_len(addr));
1697 if (data_reg == NULL) {
1698 retval = -ENOMEM;
1699 goto err4;
1700 }
1701
1702 /* allocate and initialize hcd */
1703 hcd = usb_create_hcd(&sl811h_hc_driver, dev, dev->bus_id);
1704 if (!hcd) {
1705 retval = -ENOMEM;
1706 goto err5;
1707 }
1708 hcd->rsrc_start = addr->start;
1709 sl811 = hcd_to_sl811(hcd);
1710
1711 spin_lock_init(&sl811->lock);
1712 INIT_LIST_HEAD(&sl811->async);
1713 sl811->board = dev->platform_data;
1714 init_timer(&sl811->timer);
1715 sl811->timer.function = sl811h_timer;
1716 sl811->timer.data = (unsigned long) sl811;
1717 sl811->addr_reg = addr_reg;
1718 sl811->data_reg = data_reg;
1719
1720 spin_lock_irq(&sl811->lock);
1721 port_power(sl811, 0);
1722 spin_unlock_irq(&sl811->lock);
1723 msleep(200);
1724
1725 tmp = sl811_read(sl811, SL11H_HWREVREG);
1726 switch (tmp >> 4) {
1727 case 1:
1728 hcd->product_desc = "SL811HS v1.2";
1729 break;
1730 case 2:
1731 hcd->product_desc = "SL811HS v1.5";
1732 break;
1733 default:
1734 /* reject case 0, SL11S is less functional */
1735 DBG("chiprev %02x\n", tmp);
1736 retval = -ENXIO;
1737 goto err6;
1738 }
1739
1740 /* sl811s would need a different handler for this irq */
1741#ifdef CONFIG_ARM
1742 /* Cypress docs say the IRQ is IRQT_HIGH ... */
1743 set_irq_type(irq, IRQT_RISING);
1744#endif
1745 retval = usb_add_hcd(hcd, irq, SA_INTERRUPT);
1746 if (retval != 0)
1747 goto err6;
1748
1749 create_debug_file(sl811);
1750 return retval;
1751
1752 err6:
1753 usb_put_hcd(hcd);
1754 err5:
1755 iounmap(data_reg);
1756 err4:
1757 release_mem_region(data->start, 1);
1758 err3:
1759 iounmap(addr_reg);
1760 err2:
1761 release_mem_region(addr->start, 1);
1762 err1:
1763 DBG("init error, %d\n", retval);
1764 return retval;
1765}
1766
1767#ifdef CONFIG_PM
1768
1769/* for this device there's no useful distinction between the controller
1770 * and its root hub, except that the root hub only gets direct PM calls
1771 * when CONFIG_USB_SUSPEND is enabled.
1772 */
1773
1774static int
1775sl811h_suspend(struct device *dev, pm_message_t state, u32 phase)
1776{
1777 struct usb_hcd *hcd = dev_get_drvdata(dev);
1778 struct sl811 *sl811 = hcd_to_sl811(hcd);
1779 int retval = 0;
1780
1781 if (phase != SUSPEND_POWER_DOWN)
1782 return retval;
1783
1784 if (state <= PM_SUSPEND_MEM)
1785 retval = sl811h_hub_suspend(hcd);
1786 else
1787 port_power(sl811, 0);
1788 if (retval == 0)
1789 dev->power.power_state = state;
1790 return retval;
1791}
1792
1793static int
1794sl811h_resume(struct device *dev, u32 phase)
1795{
1796 struct usb_hcd *hcd = dev_get_drvdata(dev);
1797 struct sl811 *sl811 = hcd_to_sl811(hcd);
1798
1799 if (phase != RESUME_POWER_ON)
1800 return 0;
1801
1802 /* with no "check to see if VBUS is still powered" board hook,
1803 * let's assume it'd only be powered to enable remote wakeup.
1804 */
1805 if (dev->power.power_state > PM_SUSPEND_MEM
1806 || !hcd->can_wakeup) {
1807 sl811->port1 = 0;
1808 port_power(sl811, 1);
1809 return 0;
1810 }
1811
1812 dev->power.power_state = PMSG_ON;
1813 return sl811h_hub_resume(hcd);
1814}
1815
1816#else
1817
1818#define sl811h_suspend NULL
1819#define sl811h_resume NULL
1820
1821#endif
1822
1823
1824static struct device_driver sl811h_driver = {
1825 .name = (char *) hcd_name,
1826 .bus = &platform_bus_type,
1827
1828 .probe = sl811h_probe,
1829 .remove = sl811h_remove,
1830
1831 .suspend = sl811h_suspend,
1832 .resume = sl811h_resume,
1833};
1834
1835/*-------------------------------------------------------------------------*/
1836
1837static int __init sl811h_init(void)
1838{
1839 if (usb_disabled())
1840 return -ENODEV;
1841
1842 INFO("driver %s, %s\n", hcd_name, DRIVER_VERSION);
1843 return driver_register(&sl811h_driver);
1844}
1845module_init(sl811h_init);
1846
1847static void __exit sl811h_cleanup(void)
1848{
1849 driver_unregister(&sl811h_driver);
1850}
1851module_exit(sl811h_cleanup);
diff --git a/drivers/usb/host/sl811.h b/drivers/usb/host/sl811.h
new file mode 100644
index 000000000000..7690d98e42a7
--- /dev/null
+++ b/drivers/usb/host/sl811.h
@@ -0,0 +1,266 @@
1/*
2 * SL811HS register declarations and HCD data structures
3 *
4 * Copyright (C) 2004 Psion Teklogix
5 * Copyright (C) 2004 David Brownell
6 * Copyright (C) 2001 Cypress Semiconductor Inc.
7 */
8
9/*
10 * SL811HS has transfer registers, and control registers. In host/master
11 * mode one set of registers is used; in peripheral/slave mode, another.
12 * - SL11H only has some "A" transfer registers from 0x00-0x04
13 * - SL811HS also has "B" registers from 0x08-0x0c
14 * - SL811S (or HS in slave mode) has four A+B sets, at 00, 10, 20, 30
15 */
16
17#define SL811_EP_A(base) ((base) + 0)
18#define SL811_EP_B(base) ((base) + 8)
19
20#define SL811_HOST_BUF 0x00
21#define SL811_PERIPH_EP0 0x00
22#define SL811_PERIPH_EP1 0x10
23#define SL811_PERIPH_EP2 0x20
24#define SL811_PERIPH_EP3 0x30
25
26
27/* TRANSFER REGISTERS: host and peripheral sides are similar
28 * except for the control models (master vs slave).
29 */
30#define SL11H_HOSTCTLREG 0
31# define SL11H_HCTLMASK_ARM 0x01
32# define SL11H_HCTLMASK_ENABLE 0x02
33# define SL11H_HCTLMASK_IN 0x00
34# define SL11H_HCTLMASK_OUT 0x04
35# define SL11H_HCTLMASK_ISOCH 0x10
36# define SL11H_HCTLMASK_AFTERSOF 0x20
37# define SL11H_HCTLMASK_TOGGLE 0x40
38# define SL11H_HCTLMASK_PREAMBLE 0x80
39#define SL11H_BUFADDRREG 1
40#define SL11H_BUFLNTHREG 2
41#define SL11H_PKTSTATREG 3 /* read */
42# define SL11H_STATMASK_ACK 0x01
43# define SL11H_STATMASK_ERROR 0x02
44# define SL11H_STATMASK_TMOUT 0x04
45# define SL11H_STATMASK_SEQ 0x08
46# define SL11H_STATMASK_SETUP 0x10
47# define SL11H_STATMASK_OVF 0x20
48# define SL11H_STATMASK_NAK 0x40
49# define SL11H_STATMASK_STALL 0x80
50#define SL11H_PIDEPREG 3 /* write */
51# define SL_SETUP 0xd0
52# define SL_IN 0x90
53# define SL_OUT 0x10
54# define SL_SOF 0x50
55# define SL_PREAMBLE 0xc0
56# define SL_NAK 0xa0
57# define SL_STALL 0xe0
58# define SL_DATA0 0x30
59# define SL_DATA1 0xb0
60#define SL11H_XFERCNTREG 4 /* read */
61#define SL11H_DEVADDRREG 4 /* write */
62
63
64/* CONTROL REGISTERS: host and peripheral are very different.
65 */
66#define SL11H_CTLREG1 5
67# define SL11H_CTL1MASK_SOF_ENA 0x01
68# define SL11H_CTL1MASK_FORCE 0x18
69# define SL11H_CTL1MASK_NORMAL 0x00
70# define SL11H_CTL1MASK_SE0 0x08 /* reset */
71# define SL11H_CTL1MASK_J 0x10
72# define SL11H_CTL1MASK_K 0x18 /* resume */
73# define SL11H_CTL1MASK_LSPD 0x20
74# define SL11H_CTL1MASK_SUSPEND 0x40
75#define SL11H_IRQ_ENABLE 6
76# define SL11H_INTMASK_DONE_A 0x01
77# define SL11H_INTMASK_DONE_B 0x02
78# define SL11H_INTMASK_SOFINTR 0x10
79# define SL11H_INTMASK_INSRMV 0x20 /* to/from SE0 */
80# define SL11H_INTMASK_RD 0x40
81# define SL11H_INTMASK_DP 0x80 /* only in INTSTATREG */
82#define SL11S_ADDRESS 7
83
84/* 0x08-0x0c are for the B buffer (not in SL11) */
85
86#define SL11H_IRQ_STATUS 0x0D /* write to ack */
87#define SL11H_HWREVREG 0x0E /* read */
88# define SL11H_HWRMASK_HWREV 0xF0
89#define SL11H_SOFLOWREG 0x0E /* write */
90#define SL11H_SOFTMRREG 0x0F /* read */
91
92/* a write to this register enables SL811HS features.
93 * HOST flag presumably overrides the chip input signal?
94 */
95#define SL811HS_CTLREG2 0x0F
96# define SL811HS_CTL2MASK_SOF_MASK 0x3F
97# define SL811HS_CTL2MASK_DSWAP 0x40
98# define SL811HS_CTL2MASK_HOST 0x80
99
100#define SL811HS_CTL2_INIT (SL811HS_CTL2MASK_HOST | 0x2e)
101
102
103/* DATA BUFFERS: registers from 0x10..0xff are for data buffers;
104 * that's 240 bytes, which we'll split evenly between A and B sides.
105 * Only ISO can use more than 64 bytes per packet.
106 * (The SL11S has 0x40..0xff for buffers.)
107 */
108#define H_MAXPACKET 120 /* bytes in A or B fifos */
109
110#define SL11H_DATA_START 0x10
111#define SL811HS_PACKET_BUF(is_a) ((is_a) \
112 ? SL11H_DATA_START \
113 : (SL11H_DATA_START + H_MAXPACKET))
114
115/*-------------------------------------------------------------------------*/
116
117#define LOG2_PERIODIC_SIZE 5 /* arbitrary; this matches OHCI */
118#define PERIODIC_SIZE (1 << LOG2_PERIODIC_SIZE)
119
120struct sl811 {
121 spinlock_t lock;
122 void __iomem *addr_reg;
123 void __iomem *data_reg;
124 struct sl811_platform_data *board;
125 struct proc_dir_entry *pde;
126
127 unsigned long stat_insrmv;
128 unsigned long stat_wake;
129 unsigned long stat_sof;
130 unsigned long stat_a;
131 unsigned long stat_b;
132 unsigned long stat_lost;
133 unsigned long stat_overrun;
134
135 /* sw model */
136 struct timer_list timer;
137 struct sl811h_ep *next_periodic;
138 struct sl811h_ep *next_async;
139
140 struct sl811h_ep *active_a;
141 unsigned long jiffies_a;
142 struct sl811h_ep *active_b;
143 unsigned long jiffies_b;
144
145 u32 port1;
146 u8 ctrl1, ctrl2, irq_enable;
147 u16 frame;
148
149 /* async schedule: control, bulk */
150 struct list_head async;
151
152 /* periodic schedule: interrupt, iso */
153 u16 load[PERIODIC_SIZE];
154 struct sl811h_ep *periodic[PERIODIC_SIZE];
155 unsigned periodic_count;
156};
157
158static inline struct sl811 *hcd_to_sl811(struct usb_hcd *hcd)
159{
160 return (struct sl811 *) (hcd->hcd_priv);
161}
162
163static inline struct usb_hcd *sl811_to_hcd(struct sl811 *sl811)
164{
165 return container_of((void *) sl811, struct usb_hcd, hcd_priv);
166}
167
168struct sl811h_ep {
169 struct usb_host_endpoint *hep;
170 struct usb_device *udev;
171
172 u8 defctrl;
173 u8 maxpacket;
174 u8 epnum;
175 u8 nextpid;
176
177 u16 error_count;
178 u16 nak_count;
179 u16 length; /* of current packet */
180
181 /* periodic schedule */
182 u16 period;
183 u16 branch;
184 u16 load;
185 struct sl811h_ep *next;
186
187 /* async schedule */
188 struct list_head schedule;
189};
190
191/*-------------------------------------------------------------------------*/
192
193/* These register utilities should work for the SL811S register API too
194 * NOTE: caller must hold sl811->lock.
195 */
196
197static inline u8 sl811_read(struct sl811 *sl811, int reg)
198{
199 writeb(reg, sl811->addr_reg);
200 return readb(sl811->data_reg);
201}
202
203static inline void sl811_write(struct sl811 *sl811, int reg, u8 val)
204{
205 writeb(reg, sl811->addr_reg);
206 writeb(val, sl811->data_reg);
207}
208
209static inline void
210sl811_write_buf(struct sl811 *sl811, int addr, const void *buf, size_t count)
211{
212 const u8 *data;
213 void __iomem *data_reg;
214
215 if (!count)
216 return;
217 writeb(addr, sl811->addr_reg);
218
219 data = buf;
220 data_reg = sl811->data_reg;
221 do {
222 writeb(*data++, data_reg);
223 } while (--count);
224}
225
226static inline void
227sl811_read_buf(struct sl811 *sl811, int addr, void *buf, size_t count)
228{
229 u8 *data;
230 void __iomem *data_reg;
231
232 if (!count)
233 return;
234 writeb(addr, sl811->addr_reg);
235
236 data = buf;
237 data_reg = sl811->data_reg;
238 do {
239 *data++ = readb(data_reg);
240 } while (--count);
241}
242
243/*-------------------------------------------------------------------------*/
244
245#ifdef DEBUG
246#define DBG(stuff...) printk(KERN_DEBUG "sl811: " stuff)
247#else
248#define DBG(stuff...) do{}while(0)
249#endif
250
251#ifdef VERBOSE
252# define VDBG DBG
253#else
254# define VDBG(stuff...) do{}while(0)
255#endif
256
257#ifdef PACKET_TRACE
258# define PACKET VDBG
259#else
260# define PACKET(stuff...) do{}while(0)
261#endif
262
263#define ERR(stuff...) printk(KERN_ERR "sl811: " stuff)
264#define WARN(stuff...) printk(KERN_WARNING "sl811: " stuff)
265#define INFO(stuff...) printk(KERN_INFO "sl811: " stuff)
266
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
new file mode 100644
index 000000000000..24c73c5a3435
--- /dev/null
+++ b/drivers/usb/host/uhci-debug.c
@@ -0,0 +1,587 @@
1/*
2 * UHCI-specific debugging code. Invaluable when something
3 * goes wrong, but don't get in my face.
4 *
5 * Kernel visible pointers are surrounded in []'s and bus
6 * visible pointers are surrounded in ()'s
7 *
8 * (C) Copyright 1999 Linus Torvalds
9 * (C) Copyright 1999-2001 Johannes Erdfelt
10 */
11
12#include <linux/config.h>
13#include <linux/kernel.h>
14#include <linux/debugfs.h>
15#include <linux/smp_lock.h>
16#include <asm/io.h>
17
18#include "uhci-hcd.h"
19
20static struct dentry *uhci_debugfs_root = NULL;
21
22/* Handle REALLY large printk's so we don't overflow buffers */
23static inline void lprintk(char *buf)
24{
25 char *p;
26
27 /* Just write one line at a time */
28 while (buf) {
29 p = strchr(buf, '\n');
30 if (p)
31 *p = 0;
32 printk(KERN_DEBUG "%s\n", buf);
33 buf = p;
34 if (buf)
35 buf++;
36 }
37}
38
39static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
40{
41 char *out = buf;
42 char *spid;
43 u32 status, token;
44
45 /* Try to make sure there's enough memory */
46 if (len < 160)
47 return 0;
48
49 status = td_status(td);
50 out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, le32_to_cpu(td->link));
51 out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
52 ((status >> 27) & 3),
53 (status & TD_CTRL_SPD) ? "SPD " : "",
54 (status & TD_CTRL_LS) ? "LS " : "",
55 (status & TD_CTRL_IOC) ? "IOC " : "",
56 (status & TD_CTRL_ACTIVE) ? "Active " : "",
57 (status & TD_CTRL_STALLED) ? "Stalled " : "",
58 (status & TD_CTRL_DBUFERR) ? "DataBufErr " : "",
59 (status & TD_CTRL_BABBLE) ? "Babble " : "",
60 (status & TD_CTRL_NAK) ? "NAK " : "",
61 (status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "",
62 (status & TD_CTRL_BITSTUFF) ? "BitStuff " : "",
63 status & 0x7ff);
64
65 token = td_token(td);
66 switch (uhci_packetid(token)) {
67 case USB_PID_SETUP:
68 spid = "SETUP";
69 break;
70 case USB_PID_OUT:
71 spid = "OUT";
72 break;
73 case USB_PID_IN:
74 spid = "IN";
75 break;
76 default:
77 spid = "?";
78 break;
79 }
80
81 out += sprintf(out, "MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ",
82 token >> 21,
83 ((token >> 19) & 1),
84 (token >> 15) & 15,
85 (token >> 8) & 127,
86 (token & 0xff),
87 spid);
88 out += sprintf(out, "(buf=%08x)\n", le32_to_cpu(td->buffer));
89
90 return out - buf;
91}
92
93static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
94{
95 char *out = buf;
96 struct urb_priv *urbp;
97 struct list_head *head, *tmp;
98 struct uhci_td *td;
99 int i = 0, checked = 0, prevactive = 0;
100 __le32 element = qh_element(qh);
101
102 /* Try to make sure there's enough memory */
103 if (len < 80 * 6)
104 return 0;
105
106 out += sprintf(out, "%*s[%p] link (%08x) element (%08x)\n", space, "",
107 qh, le32_to_cpu(qh->link), le32_to_cpu(element));
108
109 if (element & UHCI_PTR_QH)
110 out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
111
112 if (element & UHCI_PTR_DEPTH)
113 out += sprintf(out, "%*s Depth traverse\n", space, "");
114
115 if (element & cpu_to_le32(8))
116 out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, "");
117
118 if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH)))
119 out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
120
121 if (!qh->urbp) {
122 out += sprintf(out, "%*s urbp == NULL\n", space, "");
123 goto out;
124 }
125
126 urbp = qh->urbp;
127
128 head = &urbp->td_list;
129 tmp = head->next;
130
131 td = list_entry(tmp, struct uhci_td, list);
132
133 if (cpu_to_le32(td->dma_handle) != (element & ~UHCI_PTR_BITS))
134 out += sprintf(out, "%*s Element != First TD\n", space, "");
135
136 while (tmp != head) {
137 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
138
139 tmp = tmp->next;
140
141 out += sprintf(out, "%*s%d: ", space + 2, "", i++);
142 out += uhci_show_td(td, out, len - (out - buf), 0);
143
144 if (i > 10 && !checked && prevactive && tmp != head &&
145 debug <= 2) {
146 struct list_head *ntmp = tmp;
147 struct uhci_td *ntd = td;
148 int active = 1, ni = i;
149
150 checked = 1;
151
152 while (ntmp != head && ntmp->next != head && active) {
153 ntd = list_entry(ntmp, struct uhci_td, list);
154
155 ntmp = ntmp->next;
156
157 active = td_status(ntd) & TD_CTRL_ACTIVE;
158
159 ni++;
160 }
161
162 if (active && ni > i) {
163 out += sprintf(out, "%*s[skipped %d active TD's]\n", space, "", ni - i);
164 tmp = ntmp;
165 td = ntd;
166 i = ni;
167 }
168 }
169
170 prevactive = td_status(td) & TD_CTRL_ACTIVE;
171 }
172
173 if (list_empty(&urbp->queue_list) || urbp->queued)
174 goto out;
175
176 out += sprintf(out, "%*sQueued QH's:\n", -space, "--");
177
178 head = &urbp->queue_list;
179 tmp = head->next;
180
181 while (tmp != head) {
182 struct urb_priv *nurbp = list_entry(tmp, struct urb_priv,
183 queue_list);
184 tmp = tmp->next;
185
186 out += uhci_show_qh(nurbp->qh, out, len - (out - buf), space);
187 }
188
189out:
190 return out - buf;
191}
192
193#define show_frame_num() \
194 if (!shown) { \
195 shown = 1; \
196 out += sprintf(out, "- Frame %d\n", i); \
197 }
198
199#ifdef CONFIG_PROC_FS
200static const char *qh_names[] = {
201 "skel_int128_qh", "skel_int64_qh",
202 "skel_int32_qh", "skel_int16_qh",
203 "skel_int8_qh", "skel_int4_qh",
204 "skel_int2_qh", "skel_int1_qh",
205 "skel_ls_control_qh", "skel_fs_control_qh",
206 "skel_bulk_qh", "skel_term_qh"
207};
208
209#define show_qh_name() \
210 if (!shown) { \
211 shown = 1; \
212 out += sprintf(out, "- %s\n", qh_names[i]); \
213 }
214
215static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
216{
217 char *out = buf;
218
219 /* Try to make sure there's enough memory */
220 if (len < 160)
221 return 0;
222
223 out += sprintf(out, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n",
224 port,
225 status,
226 (status & USBPORTSC_SUSP) ? " Suspend" : "",
227 (status & USBPORTSC_OCC) ? " OverCurrentChange" : "",
228 (status & USBPORTSC_OC) ? " OverCurrent" : "",
229 (status & USBPORTSC_PR) ? " Reset" : "",
230 (status & USBPORTSC_LSDA) ? " LowSpeed" : "",
231 (status & USBPORTSC_RD) ? " ResumeDetect" : "",
232 (status & USBPORTSC_PEC) ? " EnableChange" : "",
233 (status & USBPORTSC_PE) ? " Enabled" : "",
234 (status & USBPORTSC_CSC) ? " ConnectChange" : "",
235 (status & USBPORTSC_CCS) ? " Connected" : "");
236
237 return out - buf;
238}
239
240static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
241{
242 char *out = buf;
243 unsigned long io_addr = uhci->io_addr;
244 unsigned short usbcmd, usbstat, usbint, usbfrnum;
245 unsigned int flbaseadd;
246 unsigned char sof;
247 unsigned short portsc1, portsc2;
248
249 /* Try to make sure there's enough memory */
250 if (len < 80 * 6)
251 return 0;
252
253 usbcmd = inw(io_addr + 0);
254 usbstat = inw(io_addr + 2);
255 usbint = inw(io_addr + 4);
256 usbfrnum = inw(io_addr + 6);
257 flbaseadd = inl(io_addr + 8);
258 sof = inb(io_addr + 12);
259 portsc1 = inw(io_addr + 16);
260 portsc2 = inw(io_addr + 18);
261
262 out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n",
263 usbcmd,
264 (usbcmd & USBCMD_MAXP) ? "Maxp64 " : "Maxp32 ",
265 (usbcmd & USBCMD_CF) ? "CF " : "",
266 (usbcmd & USBCMD_SWDBG) ? "SWDBG " : "",
267 (usbcmd & USBCMD_FGR) ? "FGR " : "",
268 (usbcmd & USBCMD_EGSM) ? "EGSM " : "",
269 (usbcmd & USBCMD_GRESET) ? "GRESET " : "",
270 (usbcmd & USBCMD_HCRESET) ? "HCRESET " : "",
271 (usbcmd & USBCMD_RS) ? "RS " : "");
272
273 out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n",
274 usbstat,
275 (usbstat & USBSTS_HCH) ? "HCHalted " : "",
276 (usbstat & USBSTS_HCPE) ? "HostControllerProcessError " : "",
277 (usbstat & USBSTS_HSE) ? "HostSystemError " : "",
278 (usbstat & USBSTS_RD) ? "ResumeDetect " : "",
279 (usbstat & USBSTS_ERROR) ? "USBError " : "",
280 (usbstat & USBSTS_USBINT) ? "USBINT " : "");
281
282 out += sprintf(out, " usbint = %04x\n", usbint);
283 out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1,
284 0xfff & (4*(unsigned int)usbfrnum));
285 out += sprintf(out, " flbaseadd = %08x\n", flbaseadd);
286 out += sprintf(out, " sof = %02x\n", sof);
287 out += uhci_show_sc(1, portsc1, out, len - (out - buf));
288 out += uhci_show_sc(2, portsc2, out, len - (out - buf));
289
290 return out - buf;
291}
292
293static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp, char *buf, int len)
294{
295 struct list_head *tmp;
296 char *out = buf;
297 int count = 0;
298
299 if (len < 200)
300 return 0;
301
302 out += sprintf(out, "urb_priv [%p] ", urbp);
303 out += sprintf(out, "urb [%p] ", urbp->urb);
304 out += sprintf(out, "qh [%p] ", urbp->qh);
305 out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
306 out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe), (usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
307
308 switch (usb_pipetype(urbp->urb->pipe)) {
309 case PIPE_ISOCHRONOUS: out += sprintf(out, "ISO "); break;
310 case PIPE_INTERRUPT: out += sprintf(out, "INT "); break;
311 case PIPE_BULK: out += sprintf(out, "BLK "); break;
312 case PIPE_CONTROL: out += sprintf(out, "CTL "); break;
313 }
314
315 out += sprintf(out, "%s", (urbp->fsbr ? "FSBR " : ""));
316 out += sprintf(out, "%s", (urbp->fsbr_timeout ? "FSBR_TO " : ""));
317
318 if (urbp->urb->status != -EINPROGRESS)
319 out += sprintf(out, "Status=%d ", urbp->urb->status);
320 //out += sprintf(out, "Inserttime=%lx ",urbp->inserttime);
321 //out += sprintf(out, "FSBRtime=%lx ",urbp->fsbrtime);
322
323 count = 0;
324 list_for_each(tmp, &urbp->td_list)
325 count++;
326 out += sprintf(out, "TDs=%d ",count);
327
328 if (urbp->queued)
329 out += sprintf(out, "queued\n");
330 else {
331 count = 0;
332 list_for_each(tmp, &urbp->queue_list)
333 count++;
334 out += sprintf(out, "queued URBs=%d\n", count);
335 }
336
337 return out - buf;
338}
339
340static int uhci_show_lists(struct uhci_hcd *uhci, char *buf, int len)
341{
342 char *out = buf;
343 struct list_head *head, *tmp;
344 int count;
345
346 out += sprintf(out, "Main list URBs:");
347 if (list_empty(&uhci->urb_list))
348 out += sprintf(out, " Empty\n");
349 else {
350 out += sprintf(out, "\n");
351 count = 0;
352 head = &uhci->urb_list;
353 tmp = head->next;
354 while (tmp != head) {
355 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
356
357 out += sprintf(out, " %d: ", ++count);
358 out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
359 tmp = tmp->next;
360 }
361 }
362
363 out += sprintf(out, "Remove list URBs:");
364 if (list_empty(&uhci->urb_remove_list))
365 out += sprintf(out, " Empty\n");
366 else {
367 out += sprintf(out, "\n");
368 count = 0;
369 head = &uhci->urb_remove_list;
370 tmp = head->next;
371 while (tmp != head) {
372 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
373
374 out += sprintf(out, " %d: ", ++count);
375 out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
376 tmp = tmp->next;
377 }
378 }
379
380 out += sprintf(out, "Complete list URBs:");
381 if (list_empty(&uhci->complete_list))
382 out += sprintf(out, " Empty\n");
383 else {
384 out += sprintf(out, "\n");
385 count = 0;
386 head = &uhci->complete_list;
387 tmp = head->next;
388 while (tmp != head) {
389 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
390
391 out += sprintf(out, " %d: ", ++count);
392 out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
393 tmp = tmp->next;
394 }
395 }
396
397 return out - buf;
398}
399
400static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
401{
402 unsigned long flags;
403 char *out = buf;
404 int i, j;
405 struct uhci_qh *qh;
406 struct uhci_td *td;
407 struct list_head *tmp, *head;
408
409 spin_lock_irqsave(&uhci->lock, flags);
410
411 out += sprintf(out, "HC status\n");
412 out += uhci_show_status(uhci, out, len - (out - buf));
413
414 out += sprintf(out, "Frame List\n");
415 for (i = 0; i < UHCI_NUMFRAMES; ++i) {
416 int shown = 0;
417 td = uhci->fl->frame_cpu[i];
418 if (!td)
419 continue;
420
421 if (td->dma_handle != (dma_addr_t)uhci->fl->frame[i]) {
422 show_frame_num();
423 out += sprintf(out, " frame list does not match td->dma_handle!\n");
424 }
425 show_frame_num();
426
427 head = &td->fl_list;
428 tmp = head;
429 do {
430 td = list_entry(tmp, struct uhci_td, fl_list);
431 tmp = tmp->next;
432 out += uhci_show_td(td, out, len - (out - buf), 4);
433 } while (tmp != head);
434 }
435
436 out += sprintf(out, "Skeleton QH's\n");
437
438 for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
439 int shown = 0;
440
441 qh = uhci->skelqh[i];
442
443 if (debug > 1) {
444 show_qh_name();
445 out += uhci_show_qh(qh, out, len - (out - buf), 4);
446 }
447
448 /* Last QH is the Terminating QH, it's different */
449 if (i == UHCI_NUM_SKELQH - 1) {
450 if (qh->link != UHCI_PTR_TERM)
451 out += sprintf(out, " bandwidth reclamation on!\n");
452
453 if (qh_element(qh) != cpu_to_le32(uhci->term_td->dma_handle))
454 out += sprintf(out, " skel_term_qh element is not set to term_td!\n");
455
456 continue;
457 }
458
459 j = (i < 7) ? 7 : i+1; /* Next skeleton */
460 if (list_empty(&qh->list)) {
461 if (i < UHCI_NUM_SKELQH - 1) {
462 if (qh->link !=
463 (cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH)) {
464 show_qh_name();
465 out += sprintf(out, " skeleton QH not linked to next skeleton QH!\n");
466 }
467 }
468
469 continue;
470 }
471
472 show_qh_name();
473
474 head = &qh->list;
475 tmp = head->next;
476
477 while (tmp != head) {
478 qh = list_entry(tmp, struct uhci_qh, list);
479
480 tmp = tmp->next;
481
482 out += uhci_show_qh(qh, out, len - (out - buf), 4);
483 }
484
485 if (i < UHCI_NUM_SKELQH - 1) {
486 if (qh->link !=
487 (cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH))
488 out += sprintf(out, " last QH not linked to next skeleton!\n");
489 }
490 }
491
492 if (debug > 2)
493 out += uhci_show_lists(uhci, out, len - (out - buf));
494
495 spin_unlock_irqrestore(&uhci->lock, flags);
496
497 return out - buf;
498}
499
500#define MAX_OUTPUT (64 * 1024)
501
502struct uhci_debug {
503 int size;
504 char *data;
505 struct uhci_hcd *uhci;
506};
507
508static int uhci_debug_open(struct inode *inode, struct file *file)
509{
510 struct uhci_hcd *uhci = inode->u.generic_ip;
511 struct uhci_debug *up;
512 int ret = -ENOMEM;
513
514 lock_kernel();
515 up = kmalloc(sizeof(*up), GFP_KERNEL);
516 if (!up)
517 goto out;
518
519 up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
520 if (!up->data) {
521 kfree(up);
522 goto out;
523 }
524
525 up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT);
526
527 file->private_data = up;
528
529 ret = 0;
530out:
531 unlock_kernel();
532 return ret;
533}
534
535static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
536{
537 struct uhci_debug *up;
538 loff_t new = -1;
539
540 lock_kernel();
541 up = file->private_data;
542
543 switch (whence) {
544 case 0:
545 new = off;
546 break;
547 case 1:
548 new = file->f_pos + off;
549 break;
550 }
551 if (new < 0 || new > up->size) {
552 unlock_kernel();
553 return -EINVAL;
554 }
555 unlock_kernel();
556 return (file->f_pos = new);
557}
558
559static ssize_t uhci_debug_read(struct file *file, char __user *buf,
560 size_t nbytes, loff_t *ppos)
561{
562 struct uhci_debug *up = file->private_data;
563 return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size);
564}
565
566static int uhci_debug_release(struct inode *inode, struct file *file)
567{
568 struct uhci_debug *up = file->private_data;
569
570 kfree(up->data);
571 kfree(up);
572
573 return 0;
574}
575
576static struct file_operations uhci_debug_operations = {
577 .open = uhci_debug_open,
578 .llseek = uhci_debug_lseek,
579 .read = uhci_debug_read,
580 .release = uhci_debug_release,
581};
582
583#else /* CONFIG_DEBUG_FS */
584
585#define uhci_debug_operations (* (struct file_operations *) NULL)
586
587#endif
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
new file mode 100644
index 000000000000..324a1a9bbdb2
--- /dev/null
+++ b/drivers/usb/host/uhci-hcd.c
@@ -0,0 +1,919 @@
1/*
2 * Universal Host Controller Interface driver for USB.
3 *
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5 *
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
17 *
18 * Intel documents this fairly well, and as far as I know there
19 * are no royalties or anything like that, but even so there are
20 * people who decided that they want to do the same thing in a
21 * completely different way.
22 *
23 * WARNING! The USB documentation is downright evil. Most of it
24 * is just crap, written by a committee. You're better off ignoring
25 * most of it, the important stuff is:
26 * - the low-level protocol (fairly simple but lots of small details)
27 * - working around the horridness of the rest
28 */
29
30#include <linux/config.h>
31#ifdef CONFIG_USB_DEBUG
32#define DEBUG
33#else
34#undef DEBUG
35#endif
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/kernel.h>
39#include <linux/init.h>
40#include <linux/delay.h>
41#include <linux/ioport.h>
42#include <linux/sched.h>
43#include <linux/slab.h>
44#include <linux/smp_lock.h>
45#include <linux/errno.h>
46#include <linux/unistd.h>
47#include <linux/interrupt.h>
48#include <linux/spinlock.h>
49#include <linux/debugfs.h>
50#include <linux/pm.h>
51#include <linux/dmapool.h>
52#include <linux/dma-mapping.h>
53#include <linux/usb.h>
54#include <linux/bitops.h>
55
56#include <asm/uaccess.h>
57#include <asm/io.h>
58#include <asm/irq.h>
59#include <asm/system.h>
60
61#include "../core/hcd.h"
62#include "uhci-hcd.h"
63
64/*
65 * Version Information
66 */
67#define DRIVER_VERSION "v2.2"
68#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
70Alan Stern"
71#define DRIVER_DESC "USB Universal Host Controller Interface driver"
72
73/*
74 * debug = 0, no debugging messages
75 * debug = 1, dump failed URB's except for stalls
76 * debug = 2, dump all failed URB's (including stalls)
77 * show all queues in /debug/uhci/[pci_addr]
78 * debug = 3, show all TD's in URB's when dumping
79 */
80#ifdef DEBUG
81static int debug = 1;
82#else
83static int debug = 0;
84#endif
85module_param(debug, int, S_IRUGO | S_IWUSR);
86MODULE_PARM_DESC(debug, "Debug level");
87static char *errbuf;
88#define ERRBUF_LEN (32 * 1024)
89
90static kmem_cache_t *uhci_up_cachep; /* urb_priv */
91
92static void uhci_get_current_frame_number(struct uhci_hcd *uhci);
93static void hc_state_transitions(struct uhci_hcd *uhci);
94
95/* If a transfer is still active after this much time, turn off FSBR */
96#define IDLE_TIMEOUT msecs_to_jiffies(50)
97#define FSBR_DELAY msecs_to_jiffies(50)
98
99/* When we timeout an idle transfer for FSBR, we'll switch it over to */
100/* depth first traversal. We'll do it in groups of this number of TD's */
101/* to make sure it doesn't hog all of the bandwidth */
102#define DEPTH_INTERVAL 5
103
104#include "uhci-hub.c"
105#include "uhci-debug.c"
106#include "uhci-q.c"
107
108static int init_stall_timer(struct usb_hcd *hcd);
109
110static void stall_callback(unsigned long ptr)
111{
112 struct usb_hcd *hcd = (struct usb_hcd *)ptr;
113 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
114 struct urb_priv *up;
115 unsigned long flags;
116
117 spin_lock_irqsave(&uhci->lock, flags);
118 uhci_scan_schedule(uhci, NULL);
119
120 list_for_each_entry(up, &uhci->urb_list, urb_list) {
121 struct urb *u = up->urb;
122
123 spin_lock(&u->lock);
124
125 /* Check if the FSBR timed out */
126 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
127 uhci_fsbr_timeout(uhci, u);
128
129 spin_unlock(&u->lock);
130 }
131
132 /* Really disable FSBR */
133 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
134 uhci->fsbrtimeout = 0;
135 uhci->skel_term_qh->link = UHCI_PTR_TERM;
136 }
137
138 /* Poll for and perform state transitions */
139 hc_state_transitions(uhci);
140 if (unlikely(uhci->suspended_ports && uhci->state != UHCI_SUSPENDED))
141 uhci_check_ports(uhci);
142
143 init_stall_timer(hcd);
144 spin_unlock_irqrestore(&uhci->lock, flags);
145}
146
147static int init_stall_timer(struct usb_hcd *hcd)
148{
149 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
150
151 init_timer(&uhci->stall_timer);
152 uhci->stall_timer.function = stall_callback;
153 uhci->stall_timer.data = (unsigned long)hcd;
154 uhci->stall_timer.expires = jiffies + msecs_to_jiffies(100);
155 add_timer(&uhci->stall_timer);
156
157 return 0;
158}
159
160static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
161{
162 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
163 unsigned long io_addr = uhci->io_addr;
164 unsigned short status;
165
166 /*
167 * Read the interrupt status, and write it back to clear the
168 * interrupt cause. Contrary to the UHCI specification, the
169 * "HC Halted" status bit is persistent: it is RO, not R/WC.
170 */
171 status = inw(io_addr + USBSTS);
172 if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */
173 return IRQ_NONE;
174 outw(status, io_addr + USBSTS); /* Clear it */
175
176 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
177 if (status & USBSTS_HSE)
178 dev_err(uhci_dev(uhci), "host system error, "
179 "PCI problems?\n");
180 if (status & USBSTS_HCPE)
181 dev_err(uhci_dev(uhci), "host controller process "
182 "error, something bad happened!\n");
183 if ((status & USBSTS_HCH) && uhci->state > 0) {
184 dev_err(uhci_dev(uhci), "host controller halted, "
185 "very bad!\n");
186 /* FIXME: Reset the controller, fix the offending TD */
187 }
188 }
189
190 if (status & USBSTS_RD)
191 uhci->resume_detect = 1;
192
193 spin_lock(&uhci->lock);
194 uhci_scan_schedule(uhci, regs);
195 spin_unlock(&uhci->lock);
196
197 return IRQ_HANDLED;
198}
199
200static void reset_hc(struct uhci_hcd *uhci)
201{
202 unsigned long io_addr = uhci->io_addr;
203
204 /* Turn off PIRQ, SMI, and all interrupts. This also turns off
205 * the BIOS's USB Legacy Support.
206 */
207 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
208 outw(0, uhci->io_addr + USBINTR);
209
210 /* Global reset for 50ms */
211 uhci->state = UHCI_RESET;
212 outw(USBCMD_GRESET, io_addr + USBCMD);
213 msleep(50);
214 outw(0, io_addr + USBCMD);
215
216 /* Another 10ms delay */
217 msleep(10);
218 uhci->resume_detect = 0;
219 uhci->is_stopped = UHCI_IS_STOPPED;
220}
221
222static void suspend_hc(struct uhci_hcd *uhci)
223{
224 unsigned long io_addr = uhci->io_addr;
225
226 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
227 uhci->state = UHCI_SUSPENDED;
228 uhci->resume_detect = 0;
229 outw(USBCMD_EGSM, io_addr + USBCMD);
230
231 /* FIXME: Wait for the controller to actually stop */
232 uhci_get_current_frame_number(uhci);
233 uhci->is_stopped = UHCI_IS_STOPPED;
234
235 uhci_scan_schedule(uhci, NULL);
236}
237
238static void wakeup_hc(struct uhci_hcd *uhci)
239{
240 unsigned long io_addr = uhci->io_addr;
241
242 switch (uhci->state) {
243 case UHCI_SUSPENDED: /* Start the resume */
244 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
245
246 /* Global resume for >= 20ms */
247 outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
248 uhci->state = UHCI_RESUMING_1;
249 uhci->state_end = jiffies + msecs_to_jiffies(20);
250 uhci->is_stopped = 0;
251 break;
252
253 case UHCI_RESUMING_1: /* End global resume */
254 uhci->state = UHCI_RESUMING_2;
255 outw(0, io_addr + USBCMD);
256 /* Falls through */
257
258 case UHCI_RESUMING_2: /* Wait for EOP to be sent */
259 if (inw(io_addr + USBCMD) & USBCMD_FGR)
260 break;
261
262 /* Run for at least 1 second, and
263 * mark it configured with a 64-byte max packet */
264 uhci->state = UHCI_RUNNING_GRACE;
265 uhci->state_end = jiffies + HZ;
266 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
267 io_addr + USBCMD);
268 break;
269
270 case UHCI_RUNNING_GRACE: /* Now allowed to suspend */
271 uhci->state = UHCI_RUNNING;
272 break;
273
274 default:
275 break;
276 }
277}
278
279static int ports_active(struct uhci_hcd *uhci)
280{
281 unsigned long io_addr = uhci->io_addr;
282 int connection = 0;
283 int i;
284
285 for (i = 0; i < uhci->rh_numports; i++)
286 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
287
288 return connection;
289}
290
291static int suspend_allowed(struct uhci_hcd *uhci)
292{
293 unsigned long io_addr = uhci->io_addr;
294 int i;
295
296 if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
297 return 1;
298
299 /* Some of Intel's USB controllers have a bug that causes false
300 * resume indications if any port has an over current condition.
301 * To prevent problems, we will not allow a global suspend if
302 * any ports are OC.
303 *
304 * Some motherboards using Intel's chipsets (but not using all
305 * the USB ports) appear to hardwire the over current inputs active
306 * to disable the USB ports.
307 */
308
309 /* check for over current condition on any port */
310 for (i = 0; i < uhci->rh_numports; i++) {
311 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
312 return 0;
313 }
314
315 return 1;
316}
317
318static void hc_state_transitions(struct uhci_hcd *uhci)
319{
320 switch (uhci->state) {
321 case UHCI_RUNNING:
322
323 /* global suspend if nothing connected for 1 second */
324 if (!ports_active(uhci) && suspend_allowed(uhci)) {
325 uhci->state = UHCI_SUSPENDING_GRACE;
326 uhci->state_end = jiffies + HZ;
327 }
328 break;
329
330 case UHCI_SUSPENDING_GRACE:
331 if (ports_active(uhci))
332 uhci->state = UHCI_RUNNING;
333 else if (time_after_eq(jiffies, uhci->state_end))
334 suspend_hc(uhci);
335 break;
336
337 case UHCI_SUSPENDED:
338
339 /* wakeup if requested by a device */
340 if (uhci->resume_detect)
341 wakeup_hc(uhci);
342 break;
343
344 case UHCI_RESUMING_1:
345 case UHCI_RESUMING_2:
346 case UHCI_RUNNING_GRACE:
347 if (time_after_eq(jiffies, uhci->state_end))
348 wakeup_hc(uhci);
349 break;
350
351 default:
352 break;
353 }
354}
355
356/*
357 * Store the current frame number in uhci->frame_number if the controller
358 * is runnning
359 */
360static void uhci_get_current_frame_number(struct uhci_hcd *uhci)
361{
362 if (!uhci->is_stopped)
363 uhci->frame_number = inw(uhci->io_addr + USBFRNUM);
364}
365
366static int start_hc(struct uhci_hcd *uhci)
367{
368 unsigned long io_addr = uhci->io_addr;
369 int timeout = 10;
370
371 /*
372 * Reset the HC - this will force us to get a
373 * new notification of any already connected
374 * ports due to the virtual disconnect that it
375 * implies.
376 */
377 outw(USBCMD_HCRESET, io_addr + USBCMD);
378 while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
379 if (--timeout < 0) {
380 dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
381 return -ETIMEDOUT;
382 }
383 msleep(1);
384 }
385
386 /* Mark controller as running before we enable interrupts */
387 uhci_to_hcd(uhci)->state = HC_STATE_RUNNING;
388
389 /* Turn on PIRQ and all interrupts */
390 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
391 USBLEGSUP_DEFAULT);
392 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
393 io_addr + USBINTR);
394
395 /* Start at frame 0 */
396 outw(0, io_addr + USBFRNUM);
397 outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
398
399 /* Run and mark it configured with a 64-byte max packet */
400 uhci->state = UHCI_RUNNING_GRACE;
401 uhci->state_end = jiffies + HZ;
402 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
403 uhci->is_stopped = 0;
404
405 return 0;
406}
407
408/*
409 * De-allocate all resources
410 */
411static void release_uhci(struct uhci_hcd *uhci)
412{
413 int i;
414
415 for (i = 0; i < UHCI_NUM_SKELQH; i++)
416 if (uhci->skelqh[i]) {
417 uhci_free_qh(uhci, uhci->skelqh[i]);
418 uhci->skelqh[i] = NULL;
419 }
420
421 if (uhci->term_td) {
422 uhci_free_td(uhci, uhci->term_td);
423 uhci->term_td = NULL;
424 }
425
426 if (uhci->qh_pool) {
427 dma_pool_destroy(uhci->qh_pool);
428 uhci->qh_pool = NULL;
429 }
430
431 if (uhci->td_pool) {
432 dma_pool_destroy(uhci->td_pool);
433 uhci->td_pool = NULL;
434 }
435
436 if (uhci->fl) {
437 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
438 uhci->fl, uhci->fl->dma_handle);
439 uhci->fl = NULL;
440 }
441
442 if (uhci->dentry) {
443 debugfs_remove(uhci->dentry);
444 uhci->dentry = NULL;
445 }
446}
447
448static int uhci_reset(struct usb_hcd *hcd)
449{
450 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
451
452 uhci->io_addr = (unsigned long) hcd->rsrc_start;
453
454 /* Kick BIOS off this hardware and reset, so we won't get
455 * interrupts from any previous setup.
456 */
457 reset_hc(uhci);
458 return 0;
459}
460
461/*
462 * Allocate a frame list, and then setup the skeleton
463 *
464 * The hardware doesn't really know any difference
465 * in the queues, but the order does matter for the
466 * protocols higher up. The order is:
467 *
468 * - any isochronous events handled before any
469 * of the queues. We don't do that here, because
470 * we'll create the actual TD entries on demand.
471 * - The first queue is the interrupt queue.
472 * - The second queue is the control queue, split into low- and full-speed
473 * - The third queue is bulk queue.
474 * - The fourth queue is the bandwidth reclamation queue, which loops back
475 * to the full-speed control queue.
476 */
477static int uhci_start(struct usb_hcd *hcd)
478{
479 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
480 int retval = -EBUSY;
481 int i, port;
482 unsigned io_size;
483 dma_addr_t dma_handle;
484 struct usb_device *udev;
485 struct dentry *dentry;
486
487 io_size = (unsigned) hcd->rsrc_len;
488
489 dentry = debugfs_create_file(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, uhci, &uhci_debug_operations);
490 if (!dentry) {
491 dev_err(uhci_dev(uhci), "couldn't create uhci debugfs entry\n");
492 retval = -ENOMEM;
493 goto err_create_debug_entry;
494 }
495 uhci->dentry = dentry;
496
497 uhci->fsbr = 0;
498 uhci->fsbrtimeout = 0;
499
500 spin_lock_init(&uhci->lock);
501 INIT_LIST_HEAD(&uhci->qh_remove_list);
502
503 INIT_LIST_HEAD(&uhci->td_remove_list);
504
505 INIT_LIST_HEAD(&uhci->urb_remove_list);
506
507 INIT_LIST_HEAD(&uhci->urb_list);
508
509 INIT_LIST_HEAD(&uhci->complete_list);
510
511 init_waitqueue_head(&uhci->waitqh);
512
513 uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
514 &dma_handle, 0);
515 if (!uhci->fl) {
516 dev_err(uhci_dev(uhci), "unable to allocate "
517 "consistent memory for frame list\n");
518 goto err_alloc_fl;
519 }
520
521 memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
522
523 uhci->fl->dma_handle = dma_handle;
524
525 uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
526 sizeof(struct uhci_td), 16, 0);
527 if (!uhci->td_pool) {
528 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
529 goto err_create_td_pool;
530 }
531
532 uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
533 sizeof(struct uhci_qh), 16, 0);
534 if (!uhci->qh_pool) {
535 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
536 goto err_create_qh_pool;
537 }
538
539 /* Initialize the root hub */
540
541 /* UHCI specs says devices must have 2 ports, but goes on to say */
542 /* they may have more but give no way to determine how many they */
543 /* have. However, according to the UHCI spec, Bit 7 is always set */
544 /* to 1. So we try to use this to our advantage */
545 for (port = 0; port < (io_size - 0x10) / 2; port++) {
546 unsigned int portstatus;
547
548 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
549 if (!(portstatus & 0x0080))
550 break;
551 }
552 if (debug)
553 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
554
555 /* This is experimental so anything less than 2 or greater than 8 is */
556 /* something weird and we'll ignore it */
557 if (port < 2 || port > UHCI_RH_MAXCHILD) {
558 dev_info(uhci_dev(uhci), "port count misdetected? "
559 "forcing to 2 ports\n");
560 port = 2;
561 }
562
563 uhci->rh_numports = port;
564
565 udev = usb_alloc_dev(NULL, &hcd->self, 0);
566 if (!udev) {
567 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
568 goto err_alloc_root_hub;
569 }
570
571 uhci->term_td = uhci_alloc_td(uhci, udev);
572 if (!uhci->term_td) {
573 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
574 goto err_alloc_term_td;
575 }
576
577 for (i = 0; i < UHCI_NUM_SKELQH; i++) {
578 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
579 if (!uhci->skelqh[i]) {
580 dev_err(uhci_dev(uhci), "unable to allocate QH\n");
581 goto err_alloc_skelqh;
582 }
583 }
584
585 /*
586 * 8 Interrupt queues; link all higher int queues to int1,
587 * then link int1 to control and control to bulk
588 */
589 uhci->skel_int128_qh->link =
590 uhci->skel_int64_qh->link =
591 uhci->skel_int32_qh->link =
592 uhci->skel_int16_qh->link =
593 uhci->skel_int8_qh->link =
594 uhci->skel_int4_qh->link =
595 uhci->skel_int2_qh->link =
596 cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
597 uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
598
599 uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
600 uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
601 uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
602
603 /* This dummy TD is to work around a bug in Intel PIIX controllers */
604 uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
605 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
606 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
607
608 uhci->skel_term_qh->link = UHCI_PTR_TERM;
609 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
610
611 /*
612 * Fill the frame list: make all entries point to the proper
613 * interrupt queue.
614 *
615 * The interrupt queues will be interleaved as evenly as possible.
616 * There's not much to be done about period-1 interrupts; they have
617 * to occur in every frame. But we can schedule period-2 interrupts
618 * in odd-numbered frames, period-4 interrupts in frames congruent
619 * to 2 (mod 4), and so on. This way each frame only has two
620 * interrupt QHs, which will help spread out bandwidth utilization.
621 */
622 for (i = 0; i < UHCI_NUMFRAMES; i++) {
623 int irq;
624
625 /*
626 * ffs (Find First bit Set) does exactly what we need:
627 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6],
628 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
629 * ffs > 6 => not on any high-period queue, so use
630 * skel_int1_qh = skelqh[7].
631 * Add UHCI_NUMFRAMES to insure at least one bit is set.
632 */
633 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
634 if (irq < 0)
635 irq = 7;
636
637 /* Only place we don't use the frame list routines */
638 uhci->fl->frame[i] = UHCI_PTR_QH |
639 cpu_to_le32(uhci->skelqh[irq]->dma_handle);
640 }
641
642 /*
643 * Some architectures require a full mb() to enforce completion of
644 * the memory writes above before the I/O transfers in start_hc().
645 */
646 mb();
647 if ((retval = start_hc(uhci)) != 0)
648 goto err_alloc_skelqh;
649
650 init_stall_timer(hcd);
651
652 udev->speed = USB_SPEED_FULL;
653
654 if (usb_hcd_register_root_hub(udev, hcd) != 0) {
655 dev_err(uhci_dev(uhci), "unable to start root hub\n");
656 retval = -ENOMEM;
657 goto err_start_root_hub;
658 }
659
660 return 0;
661
662/*
663 * error exits:
664 */
665err_start_root_hub:
666 reset_hc(uhci);
667
668 del_timer_sync(&uhci->stall_timer);
669
670err_alloc_skelqh:
671 for (i = 0; i < UHCI_NUM_SKELQH; i++)
672 if (uhci->skelqh[i]) {
673 uhci_free_qh(uhci, uhci->skelqh[i]);
674 uhci->skelqh[i] = NULL;
675 }
676
677 uhci_free_td(uhci, uhci->term_td);
678 uhci->term_td = NULL;
679
680err_alloc_term_td:
681 usb_put_dev(udev);
682
683err_alloc_root_hub:
684 dma_pool_destroy(uhci->qh_pool);
685 uhci->qh_pool = NULL;
686
687err_create_qh_pool:
688 dma_pool_destroy(uhci->td_pool);
689 uhci->td_pool = NULL;
690
691err_create_td_pool:
692 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
693 uhci->fl, uhci->fl->dma_handle);
694 uhci->fl = NULL;
695
696err_alloc_fl:
697 debugfs_remove(uhci->dentry);
698 uhci->dentry = NULL;
699
700err_create_debug_entry:
701 return retval;
702}
703
704static void uhci_stop(struct usb_hcd *hcd)
705{
706 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
707
708 del_timer_sync(&uhci->stall_timer);
709 reset_hc(uhci);
710
711 spin_lock_irq(&uhci->lock);
712 uhci_scan_schedule(uhci, NULL);
713 spin_unlock_irq(&uhci->lock);
714
715 release_uhci(uhci);
716}
717
718#ifdef CONFIG_PM
719static int uhci_suspend(struct usb_hcd *hcd, u32 state)
720{
721 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
722
723 spin_lock_irq(&uhci->lock);
724
725 /* Don't try to suspend broken motherboards, reset instead */
726 if (suspend_allowed(uhci))
727 suspend_hc(uhci);
728 else {
729 spin_unlock_irq(&uhci->lock);
730 reset_hc(uhci);
731 spin_lock_irq(&uhci->lock);
732 uhci_scan_schedule(uhci, NULL);
733 }
734
735 spin_unlock_irq(&uhci->lock);
736 return 0;
737}
738
739static int uhci_resume(struct usb_hcd *hcd)
740{
741 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
742 int rc;
743
744 pci_set_master(to_pci_dev(uhci_dev(uhci)));
745
746 spin_lock_irq(&uhci->lock);
747
748 if (uhci->state == UHCI_SUSPENDED) {
749
750 /*
751 * Some systems don't maintain the UHCI register values
752 * during a PM suspend/resume cycle, so reinitialize
753 * the Frame Number, Framelist Base Address, Interrupt
754 * Enable, and Legacy Support registers.
755 */
756 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
757 0);
758 outw(uhci->frame_number, uhci->io_addr + USBFRNUM);
759 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
760 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
761 USBINTR_SP, uhci->io_addr + USBINTR);
762 uhci->resume_detect = 1;
763 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
764 USBLEGSUP_DEFAULT);
765 } else {
766 spin_unlock_irq(&uhci->lock);
767 reset_hc(uhci);
768 if ((rc = start_hc(uhci)) != 0)
769 return rc;
770 spin_lock_irq(&uhci->lock);
771 }
772 hcd->state = HC_STATE_RUNNING;
773
774 spin_unlock_irq(&uhci->lock);
775 return 0;
776}
777#endif
778
779/* Wait until all the URBs for a particular device/endpoint are gone */
780static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
781 struct usb_host_endpoint *ep)
782{
783 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
784
785 wait_event_interruptible(uhci->waitqh, list_empty(&ep->urb_list));
786}
787
788static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
789{
790 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
791 int frame_number;
792 unsigned long flags;
793
794 /* Minimize latency by avoiding the spinlock */
795 local_irq_save(flags);
796 rmb();
797 frame_number = (uhci->is_stopped ? uhci->frame_number :
798 inw(uhci->io_addr + USBFRNUM));
799 local_irq_restore(flags);
800 return frame_number;
801}
802
803static const char hcd_name[] = "uhci_hcd";
804
805static const struct hc_driver uhci_driver = {
806 .description = hcd_name,
807 .product_desc = "UHCI Host Controller",
808 .hcd_priv_size = sizeof(struct uhci_hcd),
809
810 /* Generic hardware linkage */
811 .irq = uhci_irq,
812 .flags = HCD_USB11,
813
814 /* Basic lifecycle operations */
815 .reset = uhci_reset,
816 .start = uhci_start,
817#ifdef CONFIG_PM
818 .suspend = uhci_suspend,
819 .resume = uhci_resume,
820#endif
821 .stop = uhci_stop,
822
823 .urb_enqueue = uhci_urb_enqueue,
824 .urb_dequeue = uhci_urb_dequeue,
825
826 .endpoint_disable = uhci_hcd_endpoint_disable,
827 .get_frame_number = uhci_hcd_get_frame_number,
828
829 .hub_status_data = uhci_hub_status_data,
830 .hub_control = uhci_hub_control,
831};
832
833static const struct pci_device_id uhci_pci_ids[] = { {
834 /* handle any USB UHCI controller */
835 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
836 .driver_data = (unsigned long) &uhci_driver,
837 }, { /* end: all zeroes */ }
838};
839
840MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
841
842static struct pci_driver uhci_pci_driver = {
843 .name = (char *)hcd_name,
844 .id_table = uhci_pci_ids,
845
846 .probe = usb_hcd_pci_probe,
847 .remove = usb_hcd_pci_remove,
848
849#ifdef CONFIG_PM
850 .suspend = usb_hcd_pci_suspend,
851 .resume = usb_hcd_pci_resume,
852#endif /* PM */
853};
854
855static int __init uhci_hcd_init(void)
856{
857 int retval = -ENOMEM;
858
859 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
860
861 if (usb_disabled())
862 return -ENODEV;
863
864 if (debug) {
865 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
866 if (!errbuf)
867 goto errbuf_failed;
868 }
869
870 uhci_debugfs_root = debugfs_create_dir("uhci", NULL);
871 if (!uhci_debugfs_root)
872 goto debug_failed;
873
874 uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
875 sizeof(struct urb_priv), 0, 0, NULL, NULL);
876 if (!uhci_up_cachep)
877 goto up_failed;
878
879 retval = pci_register_driver(&uhci_pci_driver);
880 if (retval)
881 goto init_failed;
882
883 return 0;
884
885init_failed:
886 if (kmem_cache_destroy(uhci_up_cachep))
887 warn("not all urb_priv's were freed!");
888
889up_failed:
890 debugfs_remove(uhci_debugfs_root);
891
892debug_failed:
893 if (errbuf)
894 kfree(errbuf);
895
896errbuf_failed:
897
898 return retval;
899}
900
901static void __exit uhci_hcd_cleanup(void)
902{
903 pci_unregister_driver(&uhci_pci_driver);
904
905 if (kmem_cache_destroy(uhci_up_cachep))
906 warn("not all urb_priv's were freed!");
907
908 debugfs_remove(uhci_debugfs_root);
909
910 if (errbuf)
911 kfree(errbuf);
912}
913
914module_init(uhci_hcd_init);
915module_exit(uhci_hcd_cleanup);
916
917MODULE_AUTHOR(DRIVER_AUTHOR);
918MODULE_DESCRIPTION(DRIVER_DESC);
919MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
new file mode 100644
index 000000000000..02255d69e1fe
--- /dev/null
+++ b/drivers/usb/host/uhci-hcd.h
@@ -0,0 +1,454 @@
1#ifndef __LINUX_UHCI_HCD_H
2#define __LINUX_UHCI_HCD_H
3
4#include <linux/list.h>
5#include <linux/usb.h>
6
7#define usb_packetid(pipe) (usb_pipein(pipe) ? USB_PID_IN : USB_PID_OUT)
8#define PIPE_DEVEP_MASK 0x0007ff00
9
10/*
11 * Universal Host Controller Interface data structures and defines
12 */
13
14/* Command register */
15#define USBCMD 0
16#define USBCMD_RS 0x0001 /* Run/Stop */
17#define USBCMD_HCRESET 0x0002 /* Host reset */
18#define USBCMD_GRESET 0x0004 /* Global reset */
19#define USBCMD_EGSM 0x0008 /* Global Suspend Mode */
20#define USBCMD_FGR 0x0010 /* Force Global Resume */
21#define USBCMD_SWDBG 0x0020 /* SW Debug mode */
22#define USBCMD_CF 0x0040 /* Config Flag (sw only) */
23#define USBCMD_MAXP 0x0080 /* Max Packet (0 = 32, 1 = 64) */
24
25/* Status register */
26#define USBSTS 2
27#define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */
28#define USBSTS_ERROR 0x0002 /* Interrupt due to error */
29#define USBSTS_RD 0x0004 /* Resume Detect */
30#define USBSTS_HSE 0x0008 /* Host System Error - basically PCI problems */
31#define USBSTS_HCPE 0x0010 /* Host Controller Process Error - the scripts were buggy */
32#define USBSTS_HCH 0x0020 /* HC Halted */
33
34/* Interrupt enable register */
35#define USBINTR 4
36#define USBINTR_TIMEOUT 0x0001 /* Timeout/CRC error enable */
37#define USBINTR_RESUME 0x0002 /* Resume interrupt enable */
38#define USBINTR_IOC 0x0004 /* Interrupt On Complete enable */
39#define USBINTR_SP 0x0008 /* Short packet interrupt enable */
40
41#define USBFRNUM 6
42#define USBFLBASEADD 8
43#define USBSOF 12
44
45/* USB port status and control registers */
46#define USBPORTSC1 16
47#define USBPORTSC2 18
48#define USBPORTSC_CCS 0x0001 /* Current Connect Status ("device present") */
49#define USBPORTSC_CSC 0x0002 /* Connect Status Change */
50#define USBPORTSC_PE 0x0004 /* Port Enable */
51#define USBPORTSC_PEC 0x0008 /* Port Enable Change */
52#define USBPORTSC_DPLUS 0x0010 /* D+ high (line status) */
53#define USBPORTSC_DMINUS 0x0020 /* D- high (line status) */
54#define USBPORTSC_RD 0x0040 /* Resume Detect */
55#define USBPORTSC_RES1 0x0080 /* reserved, always 1 */
56#define USBPORTSC_LSDA 0x0100 /* Low Speed Device Attached */
57#define USBPORTSC_PR 0x0200 /* Port Reset */
58/* OC and OCC from Intel 430TX and later (not UHCI 1.1d spec) */
59#define USBPORTSC_OC 0x0400 /* Over Current condition */
60#define USBPORTSC_OCC 0x0800 /* Over Current Change R/WC */
61#define USBPORTSC_SUSP 0x1000 /* Suspend */
62#define USBPORTSC_RES2 0x2000 /* reserved, write zeroes */
63#define USBPORTSC_RES3 0x4000 /* reserved, write zeroes */
64#define USBPORTSC_RES4 0x8000 /* reserved, write zeroes */
65
66/* Legacy support register */
67#define USBLEGSUP 0xc0
68#define USBLEGSUP_DEFAULT 0x2000 /* only PIRQ enable set */
69
70#define UHCI_NULL_DATA_SIZE 0x7FF /* for UHCI controller TD */
71
72#define UHCI_PTR_BITS cpu_to_le32(0x000F)
73#define UHCI_PTR_TERM cpu_to_le32(0x0001)
74#define UHCI_PTR_QH cpu_to_le32(0x0002)
75#define UHCI_PTR_DEPTH cpu_to_le32(0x0004)
76#define UHCI_PTR_BREADTH cpu_to_le32(0x0000)
77
78#define UHCI_NUMFRAMES 1024 /* in the frame list [array] */
79#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
80#define CAN_SCHEDULE_FRAMES 1000 /* how far future frames can be scheduled */
81
82struct uhci_frame_list {
83 __le32 frame[UHCI_NUMFRAMES];
84
85 void *frame_cpu[UHCI_NUMFRAMES];
86
87 dma_addr_t dma_handle;
88};
89
90struct urb_priv;
91
92/*
93 * One role of a QH is to hold a queue of TDs for some endpoint. Each QH is
94 * used with one URB, and qh->element (updated by the HC) is either:
95 * - the next unprocessed TD for the URB, or
96 * - UHCI_PTR_TERM (when there's no more traffic for this endpoint), or
97 * - the QH for the next URB queued to the same endpoint.
98 *
99 * The other role of a QH is to serve as a "skeleton" framelist entry, so we
100 * can easily splice a QH for some endpoint into the schedule at the right
101 * place. Then qh->element is UHCI_PTR_TERM.
102 *
103 * In the frame list, qh->link maintains a list of QHs seen by the HC:
104 * skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ...
105 */
106struct uhci_qh {
107 /* Hardware fields */
108 __le32 link; /* Next queue */
109 __le32 element; /* Queue element pointer */
110
111 /* Software fields */
112 dma_addr_t dma_handle;
113
114 struct usb_device *dev;
115 struct urb_priv *urbp;
116
117 struct list_head list; /* P: uhci->frame_list_lock */
118 struct list_head remove_list; /* P: uhci->remove_list_lock */
119} __attribute__((aligned(16)));
120
121/*
122 * We need a special accessor for the element pointer because it is
123 * subject to asynchronous updates by the controller
124 */
125static __le32 inline qh_element(struct uhci_qh *qh) {
126 __le32 element = qh->element;
127
128 barrier();
129 return element;
130}
131
132/*
133 * for TD <status>:
134 */
135#define TD_CTRL_SPD (1 << 29) /* Short Packet Detect */
136#define TD_CTRL_C_ERR_MASK (3 << 27) /* Error Counter bits */
137#define TD_CTRL_C_ERR_SHIFT 27
138#define TD_CTRL_LS (1 << 26) /* Low Speed Device */
139#define TD_CTRL_IOS (1 << 25) /* Isochronous Select */
140#define TD_CTRL_IOC (1 << 24) /* Interrupt on Complete */
141#define TD_CTRL_ACTIVE (1 << 23) /* TD Active */
142#define TD_CTRL_STALLED (1 << 22) /* TD Stalled */
143#define TD_CTRL_DBUFERR (1 << 21) /* Data Buffer Error */
144#define TD_CTRL_BABBLE (1 << 20) /* Babble Detected */
145#define TD_CTRL_NAK (1 << 19) /* NAK Received */
146#define TD_CTRL_CRCTIMEO (1 << 18) /* CRC/Time Out Error */
147#define TD_CTRL_BITSTUFF (1 << 17) /* Bit Stuff Error */
148#define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */
149
150#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \
151 TD_CTRL_BABBLE | TD_CTRL_CRCTIME | TD_CTRL_BITSTUFF)
152
153#define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT)
154#define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000)
155#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & TD_CTRL_ACTLEN_MASK) /* 1-based */
156
157/*
158 * for TD <info>: (a.k.a. Token)
159 */
160#define td_token(td) le32_to_cpu((td)->token)
161#define TD_TOKEN_DEVADDR_SHIFT 8
162#define TD_TOKEN_TOGGLE_SHIFT 19
163#define TD_TOKEN_TOGGLE (1 << 19)
164#define TD_TOKEN_EXPLEN_SHIFT 21
165#define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n - 1 */
166#define TD_TOKEN_PID_MASK 0xFF
167
168#define uhci_explen(len) ((len) << TD_TOKEN_EXPLEN_SHIFT)
169
170#define uhci_expected_length(token) ((((token) >> 21) + 1) & TD_TOKEN_EXPLEN_MASK)
171#define uhci_toggle(token) (((token) >> TD_TOKEN_TOGGLE_SHIFT) & 1)
172#define uhci_endpoint(token) (((token) >> 15) & 0xf)
173#define uhci_devaddr(token) (((token) >> TD_TOKEN_DEVADDR_SHIFT) & 0x7f)
174#define uhci_devep(token) (((token) >> TD_TOKEN_DEVADDR_SHIFT) & 0x7ff)
175#define uhci_packetid(token) ((token) & TD_TOKEN_PID_MASK)
176#define uhci_packetout(token) (uhci_packetid(token) != USB_PID_IN)
177#define uhci_packetin(token) (uhci_packetid(token) == USB_PID_IN)
178
179/*
180 * The documentation says "4 words for hardware, 4 words for software".
181 *
182 * That's silly, the hardware doesn't care. The hardware only cares that
183 * the hardware words are 16-byte aligned, and we can have any amount of
184 * sw space after the TD entry as far as I can tell.
185 *
186 * But let's just go with the documentation, at least for 32-bit machines.
187 * On 64-bit machines we probably want to take advantage of the fact that
188 * hw doesn't really care about the size of the sw-only area.
189 *
190 * Alas, not anymore, we have more than 4 words for software, woops.
191 * Everything still works tho, surprise! -jerdfelt
192 *
193 * td->link points to either another TD (not necessarily for the same urb or
194 * even the same endpoint), or nothing (PTR_TERM), or a QH (for queued urbs)
195 */
196struct uhci_td {
197 /* Hardware fields */
198 __le32 link;
199 __le32 status;
200 __le32 token;
201 __le32 buffer;
202
203 /* Software fields */
204 dma_addr_t dma_handle;
205
206 struct usb_device *dev;
207 struct urb *urb;
208
209 struct list_head list; /* P: urb->lock */
210 struct list_head remove_list; /* P: uhci->td_remove_list_lock */
211
212 int frame; /* for iso: what frame? */
213 struct list_head fl_list; /* P: uhci->frame_list_lock */
214} __attribute__((aligned(16)));
215
216/*
217 * We need a special accessor for the control/status word because it is
218 * subject to asynchronous updates by the controller
219 */
220static u32 inline td_status(struct uhci_td *td) {
221 __le32 status = td->status;
222
223 barrier();
224 return le32_to_cpu(status);
225}
226
227
228/*
229 * The UHCI driver places Interrupt, Control and Bulk into QH's both
230 * to group together TD's for one transfer, and also to faciliate queuing
231 * of URB's. To make it easy to insert entries into the schedule, we have
232 * a skeleton of QH's for each predefined Interrupt latency, low-speed
233 * control, full-speed control and terminating QH (see explanation for
234 * the terminating QH below).
235 *
236 * When we want to add a new QH, we add it to the end of the list for the
237 * skeleton QH.
238 *
239 * For instance, the queue can look like this:
240 *
241 * skel int128 QH
242 * dev 1 interrupt QH
243 * dev 5 interrupt QH
244 * skel int64 QH
245 * skel int32 QH
246 * ...
247 * skel int1 QH
248 * skel low-speed control QH
249 * dev 5 control QH
250 * skel full-speed control QH
251 * skel bulk QH
252 * dev 1 bulk QH
253 * dev 2 bulk QH
254 * skel terminating QH
255 *
256 * The terminating QH is used for 2 reasons:
257 * - To place a terminating TD which is used to workaround a PIIX bug
258 * (see Intel errata for explanation)
259 * - To loop back to the full-speed control queue for full-speed bandwidth
260 * reclamation
261 *
262 * Isochronous transfers are stored before the start of the skeleton
263 * schedule and don't use QH's. While the UHCI spec doesn't forbid the
264 * use of QH's for Isochronous, it doesn't use them either. Since we don't
265 * need to use them either, we follow the spec diagrams in hope that it'll
266 * be more compatible with future UHCI implementations.
267 */
268
269#define UHCI_NUM_SKELQH 12
270#define skel_int128_qh skelqh[0]
271#define skel_int64_qh skelqh[1]
272#define skel_int32_qh skelqh[2]
273#define skel_int16_qh skelqh[3]
274#define skel_int8_qh skelqh[4]
275#define skel_int4_qh skelqh[5]
276#define skel_int2_qh skelqh[6]
277#define skel_int1_qh skelqh[7]
278#define skel_ls_control_qh skelqh[8]
279#define skel_fs_control_qh skelqh[9]
280#define skel_bulk_qh skelqh[10]
281#define skel_term_qh skelqh[11]
282
283/*
284 * Search tree for determining where <interval> fits in the skelqh[]
285 * skeleton.
286 *
287 * An interrupt request should be placed into the slowest skelqh[]
288 * which meets the interval/period/frequency requirement.
289 * An interrupt request is allowed to be faster than <interval> but not slower.
290 *
291 * For a given <interval>, this function returns the appropriate/matching
292 * skelqh[] index value.
293 */
294static inline int __interval_to_skel(int interval)
295{
296 if (interval < 16) {
297 if (interval < 4) {
298 if (interval < 2)
299 return 7; /* int1 for 0-1 ms */
300 return 6; /* int2 for 2-3 ms */
301 }
302 if (interval < 8)
303 return 5; /* int4 for 4-7 ms */
304 return 4; /* int8 for 8-15 ms */
305 }
306 if (interval < 64) {
307 if (interval < 32)
308 return 3; /* int16 for 16-31 ms */
309 return 2; /* int32 for 32-63 ms */
310 }
311 if (interval < 128)
312 return 1; /* int64 for 64-127 ms */
313 return 0; /* int128 for 128-255 ms (Max.) */
314}
315
316/*
317 * Device states for the host controller.
318 *
319 * To prevent "bouncing" in the presence of electrical noise,
320 * we insist on a 1-second "grace" period, before switching to
321 * the RUNNING or SUSPENDED states, during which the state is
322 * not allowed to change.
323 *
324 * The resume process is divided into substates in order to avoid
325 * potentially length delays during the timer handler.
326 *
327 * States in which the host controller is halted must have values <= 0.
328 */
329enum uhci_state {
330 UHCI_RESET,
331 UHCI_RUNNING_GRACE, /* Before RUNNING */
332 UHCI_RUNNING, /* The normal state */
333 UHCI_SUSPENDING_GRACE, /* Before SUSPENDED */
334 UHCI_SUSPENDED = -10, /* When no devices are attached */
335 UHCI_RESUMING_1,
336 UHCI_RESUMING_2
337};
338
339/*
340 * This describes the full uhci information.
341 *
342 * Note how the "proper" USB information is just
343 * a subset of what the full implementation needs.
344 */
345struct uhci_hcd {
346
347 /* debugfs */
348 struct dentry *dentry;
349
350 /* Grabbed from PCI */
351 unsigned long io_addr;
352
353 struct dma_pool *qh_pool;
354 struct dma_pool *td_pool;
355
356 struct usb_bus *bus;
357
358 struct uhci_td *term_td; /* Terminating TD, see UHCI bug */
359 struct uhci_qh *skelqh[UHCI_NUM_SKELQH]; /* Skeleton QH's */
360
361 spinlock_t lock;
362 struct uhci_frame_list *fl; /* P: uhci->lock */
363 int fsbr; /* Full-speed bandwidth reclamation */
364 unsigned long fsbrtimeout; /* FSBR delay */
365
366 enum uhci_state state; /* FIXME: needs a spinlock */
367 unsigned long state_end; /* Time of next transition */
368 unsigned int frame_number; /* As of last check */
369 unsigned int is_stopped;
370#define UHCI_IS_STOPPED 9999 /* Larger than a frame # */
371
372 unsigned int scan_in_progress:1; /* Schedule scan is running */
373 unsigned int need_rescan:1; /* Redo the schedule scan */
374 unsigned int resume_detect:1; /* Need a Global Resume */
375
376 /* Support for port suspend/resume/reset */
377 unsigned long port_c_suspend; /* Bit-arrays of ports */
378 unsigned long suspended_ports;
379 unsigned long resuming_ports;
380 unsigned long ports_timeout; /* Time to stop signalling */
381
382 /* Main list of URB's currently controlled by this HC */
383 struct list_head urb_list; /* P: uhci->lock */
384
385 /* List of QH's that are done, but waiting to be unlinked (race) */
386 struct list_head qh_remove_list; /* P: uhci->lock */
387 unsigned int qh_remove_age; /* Age in frames */
388
389 /* List of TD's that are done, but waiting to be freed (race) */
390 struct list_head td_remove_list; /* P: uhci->lock */
391 unsigned int td_remove_age; /* Age in frames */
392
393 /* List of asynchronously unlinked URB's */
394 struct list_head urb_remove_list; /* P: uhci->lock */
395 unsigned int urb_remove_age; /* Age in frames */
396
397 /* List of URB's awaiting completion callback */
398 struct list_head complete_list; /* P: uhci->lock */
399
400 int rh_numports;
401
402 struct timer_list stall_timer;
403
404 wait_queue_head_t waitqh; /* endpoint_disable waiters */
405};
406
407/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
408static inline struct uhci_hcd *hcd_to_uhci(struct usb_hcd *hcd)
409{
410 return (struct uhci_hcd *) (hcd->hcd_priv);
411}
412static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci)
413{
414 return container_of((void *) uhci, struct usb_hcd, hcd_priv);
415}
416
417#define uhci_dev(u) (uhci_to_hcd(u)->self.controller)
418
419struct urb_priv {
420 struct list_head urb_list;
421
422 struct urb *urb;
423
424 struct uhci_qh *qh; /* QH for this URB */
425 struct list_head td_list; /* P: urb->lock */
426
427 unsigned fsbr : 1; /* URB turned on FSBR */
428 unsigned fsbr_timeout : 1; /* URB timed out on FSBR */
429 unsigned queued : 1; /* QH was queued (not linked in) */
430 unsigned short_control_packet : 1; /* If we get a short packet during */
431 /* a control transfer, retrigger */
432 /* the status phase */
433
434 unsigned long inserttime; /* In jiffies */
435 unsigned long fsbrtime; /* In jiffies */
436
437 struct list_head queue_list; /* P: uhci->frame_list_lock */
438};
439
440/*
441 * Locking in uhci.c
442 *
443 * Almost everything relating to the hardware schedule and processing
444 * of URBs is protected by uhci->lock. urb->status is protected by
445 * urb->lock; that's the one exception.
446 *
447 * To prevent deadlocks, never lock uhci->lock while holding urb->lock.
448 * The safe order of locking is:
449 *
450 * #1 uhci->lock
451 * #2 urb->lock
452 */
453
454#endif
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
new file mode 100644
index 000000000000..4c45ba8390f8
--- /dev/null
+++ b/drivers/usb/host/uhci-hub.c
@@ -0,0 +1,299 @@
1/*
2 * Universal Host Controller Interface driver for USB.
3 *
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5 *
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
13 */
14
15static __u8 root_hub_hub_des[] =
16{
17 0x09, /* __u8 bLength; */
18 0x29, /* __u8 bDescriptorType; Hub-descriptor */
19 0x02, /* __u8 bNbrPorts; */
20 0x0a, /* __u16 wHubCharacteristics; */
21 0x00, /* (per-port OC, no power switching) */
22 0x01, /* __u8 bPwrOn2pwrGood; 2ms */
23 0x00, /* __u8 bHubContrCurrent; 0 mA */
24 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
25 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
26};
27
28#define UHCI_RH_MAXCHILD 7
29
30/* must write as zeroes */
31#define WZ_BITS (USBPORTSC_RES2 | USBPORTSC_RES3 | USBPORTSC_RES4)
32
33/* status change bits: nonzero writes will clear */
34#define RWC_BITS (USBPORTSC_OCC | USBPORTSC_PEC | USBPORTSC_CSC)
35
36static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
37{
38 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
39 int port;
40
41 *buf = 0;
42 for (port = 0; port < uhci->rh_numports; ++port) {
43 if ((inw(uhci->io_addr + USBPORTSC1 + port * 2) & RWC_BITS) ||
44 test_bit(port, &uhci->port_c_suspend))
45 *buf |= (1 << (port + 1));
46 }
47 if (*buf && uhci->state == UHCI_SUSPENDED)
48 uhci->resume_detect = 1;
49 return !!*buf;
50}
51
52#define OK(x) len = (x); break
53
54#define CLR_RH_PORTSTAT(x) \
55 status = inw(port_addr); \
56 status &= ~(RWC_BITS|WZ_BITS); \
57 status &= ~(x); \
58 status |= RWC_BITS & (x); \
59 outw(status, port_addr)
60
61#define SET_RH_PORTSTAT(x) \
62 status = inw(port_addr); \
63 status |= (x); \
64 status &= ~(RWC_BITS|WZ_BITS); \
65 outw(status, port_addr)
66
67/* UHCI controllers don't automatically stop resume signalling after 20 msec,
68 * so we have to poll and check timeouts in order to take care of it.
69 */
70static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
71 unsigned long port_addr)
72{
73 int status;
74
75 if (test_bit(port, &uhci->suspended_ports)) {
76 CLR_RH_PORTSTAT(USBPORTSC_SUSP | USBPORTSC_RD);
77 clear_bit(port, &uhci->suspended_ports);
78 clear_bit(port, &uhci->resuming_ports);
79 set_bit(port, &uhci->port_c_suspend);
80
81 /* The controller won't actually turn off the RD bit until
82 * it has had a chance to send a low-speed EOP sequence,
83 * which takes 3 bit times (= 2 microseconds). We'll delay
84 * slightly longer for good luck. */
85 udelay(4);
86 }
87}
88
89static void uhci_check_ports(struct uhci_hcd *uhci)
90{
91 unsigned int port;
92 unsigned long port_addr;
93 int status;
94
95 for (port = 0; port < uhci->rh_numports; ++port) {
96 port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
97 status = inw(port_addr);
98 if (unlikely(status & USBPORTSC_PR)) {
99 if (time_after_eq(jiffies, uhci->ports_timeout)) {
100 CLR_RH_PORTSTAT(USBPORTSC_PR);
101 udelay(10);
102
103 /* If the port was enabled before, turning
104 * reset on caused a port enable change.
105 * Turning reset off causes a port connect
106 * status change. Clear these changes. */
107 CLR_RH_PORTSTAT(USBPORTSC_CSC | USBPORTSC_PEC);
108 SET_RH_PORTSTAT(USBPORTSC_PE);
109 }
110 }
111 if (unlikely(status & USBPORTSC_RD)) {
112 if (!test_bit(port, &uhci->resuming_ports)) {
113
114 /* Port received a wakeup request */
115 set_bit(port, &uhci->resuming_ports);
116 uhci->ports_timeout = jiffies +
117 msecs_to_jiffies(20);
118 } else if (time_after_eq(jiffies,
119 uhci->ports_timeout)) {
120 uhci_finish_suspend(uhci, port, port_addr);
121 }
122 }
123 }
124}
125
126/* size of returned buffer is part of USB spec */
127static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
128 u16 wIndex, char *buf, u16 wLength)
129{
130 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
131 int status, lstatus, retval = 0, len = 0;
132 unsigned int port = wIndex - 1;
133 unsigned long port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
134 u16 wPortChange, wPortStatus;
135 unsigned long flags;
136
137 spin_lock_irqsave(&uhci->lock, flags);
138 switch (typeReq) {
139
140 case GetHubStatus:
141 *(__le32 *)buf = cpu_to_le32(0);
142 OK(4); /* hub power */
143 case GetPortStatus:
144 if (port >= uhci->rh_numports)
145 goto err;
146
147 uhci_check_ports(uhci);
148 status = inw(port_addr);
149
150 /* Intel controllers report the OverCurrent bit active on.
151 * VIA controllers report it active off, so we'll adjust the
152 * bit value. (It's not standardized in the UHCI spec.)
153 */
154 if (to_pci_dev(hcd->self.controller)->vendor ==
155 PCI_VENDOR_ID_VIA)
156 status ^= USBPORTSC_OC;
157
158 /* UHCI doesn't support C_RESET (always false) */
159 wPortChange = lstatus = 0;
160 if (status & USBPORTSC_CSC)
161 wPortChange |= USB_PORT_STAT_C_CONNECTION;
162 if (status & USBPORTSC_PEC)
163 wPortChange |= USB_PORT_STAT_C_ENABLE;
164 if (status & USBPORTSC_OCC)
165 wPortChange |= USB_PORT_STAT_C_OVERCURRENT;
166
167 if (test_bit(port, &uhci->port_c_suspend)) {
168 wPortChange |= USB_PORT_STAT_C_SUSPEND;
169 lstatus |= 1;
170 }
171 if (test_bit(port, &uhci->suspended_ports))
172 lstatus |= 2;
173 if (test_bit(port, &uhci->resuming_ports))
174 lstatus |= 4;
175
176 /* UHCI has no power switching (always on) */
177 wPortStatus = USB_PORT_STAT_POWER;
178 if (status & USBPORTSC_CCS)
179 wPortStatus |= USB_PORT_STAT_CONNECTION;
180 if (status & USBPORTSC_PE) {
181 wPortStatus |= USB_PORT_STAT_ENABLE;
182 if (status & (USBPORTSC_SUSP | USBPORTSC_RD))
183 wPortStatus |= USB_PORT_STAT_SUSPEND;
184 }
185 if (status & USBPORTSC_OC)
186 wPortStatus |= USB_PORT_STAT_OVERCURRENT;
187 if (status & USBPORTSC_PR)
188 wPortStatus |= USB_PORT_STAT_RESET;
189 if (status & USBPORTSC_LSDA)
190 wPortStatus |= USB_PORT_STAT_LOW_SPEED;
191
192 if (wPortChange)
193 dev_dbg(uhci_dev(uhci), "port %d portsc %04x,%02x\n",
194 wIndex, status, lstatus);
195
196 *(__le16 *)buf = cpu_to_le16(wPortStatus);
197 *(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
198 OK(4);
199 case SetHubFeature: /* We don't implement these */
200 case ClearHubFeature:
201 switch (wValue) {
202 case C_HUB_OVER_CURRENT:
203 case C_HUB_LOCAL_POWER:
204 OK(0);
205 default:
206 goto err;
207 }
208 break;
209 case SetPortFeature:
210 if (port >= uhci->rh_numports)
211 goto err;
212
213 switch (wValue) {
214 case USB_PORT_FEAT_SUSPEND:
215 set_bit(port, &uhci->suspended_ports);
216 SET_RH_PORTSTAT(USBPORTSC_SUSP);
217 OK(0);
218 case USB_PORT_FEAT_RESET:
219 SET_RH_PORTSTAT(USBPORTSC_PR);
220
221 /* Reset terminates Resume signalling */
222 uhci_finish_suspend(uhci, port, port_addr);
223
224 /* USB v2.0 7.1.7.5 */
225 uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
226 OK(0);
227 case USB_PORT_FEAT_POWER:
228 /* UHCI has no power switching */
229 OK(0);
230 default:
231 goto err;
232 }
233 break;
234 case ClearPortFeature:
235 if (port >= uhci->rh_numports)
236 goto err;
237
238 switch (wValue) {
239 case USB_PORT_FEAT_ENABLE:
240 CLR_RH_PORTSTAT(USBPORTSC_PE);
241
242 /* Disable terminates Resume signalling */
243 uhci_finish_suspend(uhci, port, port_addr);
244 OK(0);
245 case USB_PORT_FEAT_C_ENABLE:
246 CLR_RH_PORTSTAT(USBPORTSC_PEC);
247 OK(0);
248 case USB_PORT_FEAT_SUSPEND:
249 if (test_bit(port, &uhci->suspended_ports) &&
250 !test_and_set_bit(port,
251 &uhci->resuming_ports)) {
252 SET_RH_PORTSTAT(USBPORTSC_RD);
253
254 /* The controller won't allow RD to be set
255 * if the port is disabled. When this happens
256 * just skip the Resume signalling.
257 */
258 if (!(inw(port_addr) & USBPORTSC_RD))
259 uhci_finish_suspend(uhci, port,
260 port_addr);
261 else
262 /* USB v2.0 7.1.7.7 */
263 uhci->ports_timeout = jiffies +
264 msecs_to_jiffies(20);
265 }
266 OK(0);
267 case USB_PORT_FEAT_C_SUSPEND:
268 clear_bit(port, &uhci->port_c_suspend);
269 OK(0);
270 case USB_PORT_FEAT_POWER:
271 /* UHCI has no power switching */
272 goto err;
273 case USB_PORT_FEAT_C_CONNECTION:
274 CLR_RH_PORTSTAT(USBPORTSC_CSC);
275 OK(0);
276 case USB_PORT_FEAT_C_OVER_CURRENT:
277 CLR_RH_PORTSTAT(USBPORTSC_OCC);
278 OK(0);
279 case USB_PORT_FEAT_C_RESET:
280 /* this driver won't report these */
281 OK(0);
282 default:
283 goto err;
284 }
285 break;
286 case GetHubDescriptor:
287 len = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
288 memcpy(buf, root_hub_hub_des, len);
289 if (len > 2)
290 buf[2] = uhci->rh_numports;
291 OK(len);
292 default:
293err:
294 retval = -EPIPE;
295 }
296 spin_unlock_irqrestore(&uhci->lock, flags);
297
298 return retval;
299}
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
new file mode 100644
index 000000000000..2a7c19501f24
--- /dev/null
+++ b/drivers/usb/host/uhci-q.c
@@ -0,0 +1,1539 @@
1/*
2 * Universal Host Controller Interface driver for USB.
3 *
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5 *
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
17 */
18
19static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
20static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
21static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
22static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
23static void uhci_free_pending_tds(struct uhci_hcd *uhci);
24
25/*
26 * Technically, updating td->status here is a race, but it's not really a
27 * problem. The worst that can happen is that we set the IOC bit again
28 * generating a spurious interrupt. We could fix this by creating another
29 * QH and leaving the IOC bit always set, but then we would have to play
30 * games with the FSBR code to make sure we get the correct order in all
31 * the cases. I don't think it's worth the effort
32 */
33static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
34{
35 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
36}
37
38static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
39{
40 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
41}
42
43static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
44 struct urb_priv *urbp)
45{
46 list_move_tail(&urbp->urb_list, &uhci->complete_list);
47}
48
49static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
50{
51 dma_addr_t dma_handle;
52 struct uhci_td *td;
53
54 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
55 if (!td)
56 return NULL;
57
58 td->dma_handle = dma_handle;
59
60 td->link = UHCI_PTR_TERM;
61 td->buffer = 0;
62
63 td->frame = -1;
64 td->dev = dev;
65
66 INIT_LIST_HEAD(&td->list);
67 INIT_LIST_HEAD(&td->remove_list);
68 INIT_LIST_HEAD(&td->fl_list);
69
70 usb_get_dev(dev);
71
72 return td;
73}
74
75static inline void uhci_fill_td(struct uhci_td *td, u32 status,
76 u32 token, u32 buffer)
77{
78 td->status = cpu_to_le32(status);
79 td->token = cpu_to_le32(token);
80 td->buffer = cpu_to_le32(buffer);
81}
82
83/*
84 * We insert Isochronous URB's directly into the frame list at the beginning
85 */
86static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
87{
88 framenum &= (UHCI_NUMFRAMES - 1);
89
90 td->frame = framenum;
91
92 /* Is there a TD already mapped there? */
93 if (uhci->fl->frame_cpu[framenum]) {
94 struct uhci_td *ftd, *ltd;
95
96 ftd = uhci->fl->frame_cpu[framenum];
97 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
98
99 list_add_tail(&td->fl_list, &ftd->fl_list);
100
101 td->link = ltd->link;
102 wmb();
103 ltd->link = cpu_to_le32(td->dma_handle);
104 } else {
105 td->link = uhci->fl->frame[framenum];
106 wmb();
107 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
108 uhci->fl->frame_cpu[framenum] = td;
109 }
110}
111
112static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
113{
114 /* If it's not inserted, don't remove it */
115 if (td->frame == -1 && list_empty(&td->fl_list))
116 return;
117
118 if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
119 if (list_empty(&td->fl_list)) {
120 uhci->fl->frame[td->frame] = td->link;
121 uhci->fl->frame_cpu[td->frame] = NULL;
122 } else {
123 struct uhci_td *ntd;
124
125 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
126 uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
127 uhci->fl->frame_cpu[td->frame] = ntd;
128 }
129 } else {
130 struct uhci_td *ptd;
131
132 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
133 ptd->link = td->link;
134 }
135
136 wmb();
137 td->link = UHCI_PTR_TERM;
138
139 list_del_init(&td->fl_list);
140 td->frame = -1;
141}
142
143/*
144 * Inserts a td list into qh.
145 */
146static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
147{
148 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
149 struct uhci_td *td;
150 __le32 *plink;
151
152 /* Ordering isn't important here yet since the QH hasn't been */
153 /* inserted into the schedule yet */
154 plink = &qh->element;
155 list_for_each_entry(td, &urbp->td_list, list) {
156 *plink = cpu_to_le32(td->dma_handle) | breadth;
157 plink = &td->link;
158 }
159 *plink = UHCI_PTR_TERM;
160}
161
162static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
163{
164 if (!list_empty(&td->list))
165 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
166 if (!list_empty(&td->remove_list))
167 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
168 if (!list_empty(&td->fl_list))
169 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
170
171 if (td->dev)
172 usb_put_dev(td->dev);
173
174 dma_pool_free(uhci->td_pool, td, td->dma_handle);
175}
176
177static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
178{
179 dma_addr_t dma_handle;
180 struct uhci_qh *qh;
181
182 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
183 if (!qh)
184 return NULL;
185
186 qh->dma_handle = dma_handle;
187
188 qh->element = UHCI_PTR_TERM;
189 qh->link = UHCI_PTR_TERM;
190
191 qh->dev = dev;
192 qh->urbp = NULL;
193
194 INIT_LIST_HEAD(&qh->list);
195 INIT_LIST_HEAD(&qh->remove_list);
196
197 usb_get_dev(dev);
198
199 return qh;
200}
201
202static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
203{
204 if (!list_empty(&qh->list))
205 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
206 if (!list_empty(&qh->remove_list))
207 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
208
209 if (qh->dev)
210 usb_put_dev(qh->dev);
211
212 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
213}
214
215/*
216 * Append this urb's qh after the last qh in skelqh->list
217 *
218 * Note that urb_priv.queue_list doesn't have a separate queue head;
219 * it's a ring with every element "live".
220 */
221static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
222{
223 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
224 struct urb_priv *turbp;
225 struct uhci_qh *lqh;
226
227 /* Grab the last QH */
228 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
229
230 /* Point to the next skelqh */
231 urbp->qh->link = lqh->link;
232 wmb(); /* Ordering is important */
233
234 /*
235 * Patch QHs for previous endpoint's queued URBs? HC goes
236 * here next, not to the next skelqh it now points to.
237 *
238 * lqh --> td ... --> qh ... --> td --> qh ... --> td
239 * | | |
240 * v v v
241 * +<----------------+-----------------+
242 * v
243 * newqh --> td ... --> td
244 * |
245 * v
246 * ...
247 *
248 * The HC could see (and use!) any of these as we write them.
249 */
250 lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
251 if (lqh->urbp) {
252 list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
253 turbp->qh->link = lqh->link;
254 }
255
256 list_add_tail(&urbp->qh->list, &skelqh->list);
257}
258
259/*
260 * Start removal of QH from schedule; it finishes next frame.
261 * TDs should be unlinked before this is called.
262 */
263static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
264{
265 struct uhci_qh *pqh;
266 __le32 newlink;
267
268 if (!qh)
269 return;
270
271 /*
272 * Only go through the hoops if it's actually linked in
273 */
274 if (!list_empty(&qh->list)) {
275
276 /* If our queue is nonempty, make the next URB the head */
277 if (!list_empty(&qh->urbp->queue_list)) {
278 struct urb_priv *nurbp;
279
280 nurbp = list_entry(qh->urbp->queue_list.next,
281 struct urb_priv, queue_list);
282 nurbp->queued = 0;
283 list_add(&nurbp->qh->list, &qh->list);
284 newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
285 } else
286 newlink = qh->link;
287
288 /* Fix up the previous QH's queue to link to either
289 * the new head of this queue or the start of the
290 * next endpoint's queue. */
291 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
292 pqh->link = newlink;
293 if (pqh->urbp) {
294 struct urb_priv *turbp;
295
296 list_for_each_entry(turbp, &pqh->urbp->queue_list,
297 queue_list)
298 turbp->qh->link = newlink;
299 }
300 wmb();
301
302 /* Leave qh->link in case the HC is on the QH now, it will */
303 /* continue the rest of the schedule */
304 qh->element = UHCI_PTR_TERM;
305
306 list_del_init(&qh->list);
307 }
308
309 list_del_init(&qh->urbp->queue_list);
310 qh->urbp = NULL;
311
312 uhci_get_current_frame_number(uhci);
313 if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age) {
314 uhci_free_pending_qhs(uhci);
315 uhci->qh_remove_age = uhci->frame_number;
316 }
317
318 /* Check to see if the remove list is empty. Set the IOC bit */
319 /* to force an interrupt so we can remove the QH */
320 if (list_empty(&uhci->qh_remove_list))
321 uhci_set_next_interrupt(uhci);
322
323 list_add(&qh->remove_list, &uhci->qh_remove_list);
324}
325
326static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
327{
328 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
329 struct uhci_td *td;
330
331 list_for_each_entry(td, &urbp->td_list, list) {
332 if (toggle)
333 td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
334 else
335 td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
336
337 toggle ^= 1;
338 }
339
340 return toggle;
341}
342
343/* This function will append one URB's QH to another URB's QH. This is for */
344/* queuing interrupt, control or bulk transfers */
345static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
346{
347 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
348 struct uhci_td *lltd;
349
350 eurbp = eurb->hcpriv;
351 urbp = urb->hcpriv;
352
353 /* Find the first URB in the queue */
354 furbp = eurbp;
355 if (eurbp->queued) {
356 list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
357 if (!furbp->queued)
358 break;
359 }
360
361 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
362
363 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
364
365 /* Control transfers always start with toggle 0 */
366 if (!usb_pipecontrol(urb->pipe))
367 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
368 usb_pipeout(urb->pipe),
369 uhci_fixup_toggle(urb,
370 uhci_toggle(td_token(lltd)) ^ 1));
371
372 /* All qh's in the queue need to link to the next queue */
373 urbp->qh->link = eurbp->qh->link;
374
375 wmb(); /* Make sure we flush everything */
376
377 lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
378
379 list_add_tail(&urbp->queue_list, &furbp->queue_list);
380
381 urbp->queued = 1;
382}
383
384static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
385{
386 struct urb_priv *urbp, *nurbp, *purbp, *turbp;
387 struct uhci_td *pltd;
388 unsigned int toggle;
389
390 urbp = urb->hcpriv;
391
392 if (list_empty(&urbp->queue_list))
393 return;
394
395 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
396
397 /*
398 * Fix up the toggle for the following URBs in the queue.
399 * Only needed for bulk and interrupt: control and isochronous
400 * endpoints don't propagate toggles between messages.
401 */
402 if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
403 if (!urbp->queued)
404 /* We just set the toggle in uhci_unlink_generic */
405 toggle = usb_gettoggle(urb->dev,
406 usb_pipeendpoint(urb->pipe),
407 usb_pipeout(urb->pipe));
408 else {
409 /* If we're in the middle of the queue, grab the */
410 /* toggle from the TD previous to us */
411 purbp = list_entry(urbp->queue_list.prev,
412 struct urb_priv, queue_list);
413 pltd = list_entry(purbp->td_list.prev,
414 struct uhci_td, list);
415 toggle = uhci_toggle(td_token(pltd)) ^ 1;
416 }
417
418 list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
419 if (!turbp->queued)
420 break;
421 toggle = uhci_fixup_toggle(turbp->urb, toggle);
422 }
423
424 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
425 usb_pipeout(urb->pipe), toggle);
426 }
427
428 if (urbp->queued) {
429 /* We're somewhere in the middle (or end). The case where
430 * we're at the head is handled in uhci_remove_qh(). */
431 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
432 queue_list);
433
434 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
435 if (nurbp->queued)
436 pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
437 else
438 /* The next URB happens to be the beginning, so */
439 /* we're the last, end the chain */
440 pltd->link = UHCI_PTR_TERM;
441 }
442
443 /* urbp->queue_list is handled in uhci_remove_qh() */
444}
445
446static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
447{
448 struct urb_priv *urbp;
449
450 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
451 if (!urbp)
452 return NULL;
453
454 memset((void *)urbp, 0, sizeof(*urbp));
455
456 urbp->inserttime = jiffies;
457 urbp->fsbrtime = jiffies;
458 urbp->urb = urb;
459
460 INIT_LIST_HEAD(&urbp->td_list);
461 INIT_LIST_HEAD(&urbp->queue_list);
462 INIT_LIST_HEAD(&urbp->urb_list);
463
464 list_add_tail(&urbp->urb_list, &uhci->urb_list);
465
466 urb->hcpriv = urbp;
467
468 return urbp;
469}
470
471static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
472{
473 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
474
475 td->urb = urb;
476
477 list_add_tail(&td->list, &urbp->td_list);
478}
479
480static void uhci_remove_td_from_urb(struct uhci_td *td)
481{
482 if (list_empty(&td->list))
483 return;
484
485 list_del_init(&td->list);
486
487 td->urb = NULL;
488}
489
490static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
491{
492 struct uhci_td *td, *tmp;
493 struct urb_priv *urbp;
494
495 urbp = (struct urb_priv *)urb->hcpriv;
496 if (!urbp)
497 return;
498
499 if (!list_empty(&urbp->urb_list))
500 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
501 "or uhci->remove_list!\n", urb);
502
503 uhci_get_current_frame_number(uhci);
504 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) {
505 uhci_free_pending_tds(uhci);
506 uhci->td_remove_age = uhci->frame_number;
507 }
508
509 /* Check to see if the remove list is empty. Set the IOC bit */
510 /* to force an interrupt so we can remove the TD's*/
511 if (list_empty(&uhci->td_remove_list))
512 uhci_set_next_interrupt(uhci);
513
514 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
515 uhci_remove_td_from_urb(td);
516 uhci_remove_td(uhci, td);
517 list_add(&td->remove_list, &uhci->td_remove_list);
518 }
519
520 urb->hcpriv = NULL;
521 kmem_cache_free(uhci_up_cachep, urbp);
522}
523
524static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
525{
526 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
527
528 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
529 urbp->fsbr = 1;
530 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
531 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
532 }
533}
534
535static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
536{
537 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
538
539 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
540 urbp->fsbr = 0;
541 if (!--uhci->fsbr)
542 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
543 }
544}
545
546/*
547 * Map status to standard result codes
548 *
549 * <status> is (td_status(td) & 0xF60000), a.k.a.
550 * uhci_status_bits(td_status(td)).
551 * Note: <status> does not include the TD_CTRL_NAK bit.
552 * <dir_out> is True for output TDs and False for input TDs.
553 */
554static int uhci_map_status(int status, int dir_out)
555{
556 if (!status)
557 return 0;
558 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
559 return -EPROTO;
560 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
561 if (dir_out)
562 return -EPROTO;
563 else
564 return -EILSEQ;
565 }
566 if (status & TD_CTRL_BABBLE) /* Babble */
567 return -EOVERFLOW;
568 if (status & TD_CTRL_DBUFERR) /* Buffer error */
569 return -ENOSR;
570 if (status & TD_CTRL_STALLED) /* Stalled */
571 return -EPIPE;
572 WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
573 return 0;
574}
575
576/*
577 * Control transfers
578 */
579static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
580{
581 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
582 struct uhci_td *td;
583 struct uhci_qh *qh, *skelqh;
584 unsigned long destination, status;
585 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
586 int len = urb->transfer_buffer_length;
587 dma_addr_t data = urb->transfer_dma;
588
589 /* The "pipe" thing contains the destination in bits 8--18 */
590 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
591
592 /* 3 errors */
593 status = TD_CTRL_ACTIVE | uhci_maxerr(3);
594 if (urb->dev->speed == USB_SPEED_LOW)
595 status |= TD_CTRL_LS;
596
597 /*
598 * Build the TD for the control request setup packet
599 */
600 td = uhci_alloc_td(uhci, urb->dev);
601 if (!td)
602 return -ENOMEM;
603
604 uhci_add_td_to_urb(urb, td);
605 uhci_fill_td(td, status, destination | uhci_explen(7),
606 urb->setup_dma);
607
608 /*
609 * If direction is "send", change the packet ID from SETUP (0x2D)
610 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
611 * set Short Packet Detect (SPD) for all data packets.
612 */
613 if (usb_pipeout(urb->pipe))
614 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
615 else {
616 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
617 status |= TD_CTRL_SPD;
618 }
619
620 /*
621 * Build the DATA TD's
622 */
623 while (len > 0) {
624 int pktsze = len;
625
626 if (pktsze > maxsze)
627 pktsze = maxsze;
628
629 td = uhci_alloc_td(uhci, urb->dev);
630 if (!td)
631 return -ENOMEM;
632
633 /* Alternate Data0/1 (start with Data1) */
634 destination ^= TD_TOKEN_TOGGLE;
635
636 uhci_add_td_to_urb(urb, td);
637 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
638 data);
639
640 data += pktsze;
641 len -= pktsze;
642 }
643
644 /*
645 * Build the final TD for control status
646 */
647 td = uhci_alloc_td(uhci, urb->dev);
648 if (!td)
649 return -ENOMEM;
650
651 /*
652 * It's IN if the pipe is an output pipe or we're not expecting
653 * data back.
654 */
655 destination &= ~TD_TOKEN_PID_MASK;
656 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
657 destination |= USB_PID_IN;
658 else
659 destination |= USB_PID_OUT;
660
661 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
662
663 status &= ~TD_CTRL_SPD;
664
665 uhci_add_td_to_urb(urb, td);
666 uhci_fill_td(td, status | TD_CTRL_IOC,
667 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
668
669 qh = uhci_alloc_qh(uhci, urb->dev);
670 if (!qh)
671 return -ENOMEM;
672
673 urbp->qh = qh;
674 qh->urbp = urbp;
675
676 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
677
678 /* Low-speed transfers get a different queue, and won't hog the bus.
679 * Also, some devices enumerate better without FSBR; the easiest way
680 * to do that is to put URBs on the low-speed queue while the device
681 * is in the DEFAULT state. */
682 if (urb->dev->speed == USB_SPEED_LOW ||
683 urb->dev->state == USB_STATE_DEFAULT)
684 skelqh = uhci->skel_ls_control_qh;
685 else {
686 skelqh = uhci->skel_fs_control_qh;
687 uhci_inc_fsbr(uhci, urb);
688 }
689
690 if (eurb)
691 uhci_append_queued_urb(uhci, eurb, urb);
692 else
693 uhci_insert_qh(uhci, skelqh, urb);
694
695 return -EINPROGRESS;
696}
697
698/*
699 * If control-IN transfer was short, the status packet wasn't sent.
700 * This routine changes the element pointer in the QH to point at the
701 * status TD. It's safe to do this even while the QH is live, because
702 * the hardware only updates the element pointer following a successful
703 * transfer. The inactive TD for the short packet won't cause an update,
704 * so the pointer won't get overwritten. The next time the controller
705 * sees this QH, it will send the status packet.
706 */
707static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
708{
709 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
710 struct uhci_td *td;
711
712 urbp->short_control_packet = 1;
713
714 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
715 urbp->qh->element = cpu_to_le32(td->dma_handle);
716
717 return -EINPROGRESS;
718}
719
720
721static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
722{
723 struct list_head *tmp, *head;
724 struct urb_priv *urbp = urb->hcpriv;
725 struct uhci_td *td;
726 unsigned int status;
727 int ret = 0;
728
729 if (list_empty(&urbp->td_list))
730 return -EINVAL;
731
732 head = &urbp->td_list;
733
734 if (urbp->short_control_packet) {
735 tmp = head->prev;
736 goto status_stage;
737 }
738
739 tmp = head->next;
740 td = list_entry(tmp, struct uhci_td, list);
741
742 /* The first TD is the SETUP stage, check the status, but skip */
743 /* the count */
744 status = uhci_status_bits(td_status(td));
745 if (status & TD_CTRL_ACTIVE)
746 return -EINPROGRESS;
747
748 if (status)
749 goto td_error;
750
751 urb->actual_length = 0;
752
753 /* The rest of the TD's (but the last) are data */
754 tmp = tmp->next;
755 while (tmp != head && tmp->next != head) {
756 unsigned int ctrlstat;
757
758 td = list_entry(tmp, struct uhci_td, list);
759 tmp = tmp->next;
760
761 ctrlstat = td_status(td);
762 status = uhci_status_bits(ctrlstat);
763 if (status & TD_CTRL_ACTIVE)
764 return -EINPROGRESS;
765
766 urb->actual_length += uhci_actual_length(ctrlstat);
767
768 if (status)
769 goto td_error;
770
771 /* Check to see if we received a short packet */
772 if (uhci_actual_length(ctrlstat) <
773 uhci_expected_length(td_token(td))) {
774 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
775 ret = -EREMOTEIO;
776 goto err;
777 }
778
779 if (uhci_packetid(td_token(td)) == USB_PID_IN)
780 return usb_control_retrigger_status(uhci, urb);
781 else
782 return 0;
783 }
784 }
785
786status_stage:
787 td = list_entry(tmp, struct uhci_td, list);
788
789 /* Control status stage */
790 status = td_status(td);
791
792#ifdef I_HAVE_BUGGY_APC_BACKUPS
793 /* APC BackUPS Pro kludge */
794 /* It tries to send all of the descriptor instead of the amount */
795 /* we requested */
796 if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
797 status & TD_CTRL_ACTIVE &&
798 status & TD_CTRL_NAK)
799 return 0;
800#endif
801
802 status = uhci_status_bits(status);
803 if (status & TD_CTRL_ACTIVE)
804 return -EINPROGRESS;
805
806 if (status)
807 goto td_error;
808
809 return 0;
810
811td_error:
812 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
813
814err:
815 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
816 /* Some debugging code */
817 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
818 __FUNCTION__, status);
819
820 if (errbuf) {
821 /* Print the chain for debugging purposes */
822 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
823
824 lprintk(errbuf);
825 }
826 }
827
828 return ret;
829}
830
831/*
832 * Common submit for bulk and interrupt
833 */
834static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
835{
836 struct uhci_td *td;
837 struct uhci_qh *qh;
838 unsigned long destination, status;
839 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
840 int len = urb->transfer_buffer_length;
841 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
842 dma_addr_t data = urb->transfer_dma;
843
844 if (len < 0)
845 return -EINVAL;
846
847 /* The "pipe" thing contains the destination in bits 8--18 */
848 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
849
850 status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
851 if (urb->dev->speed == USB_SPEED_LOW)
852 status |= TD_CTRL_LS;
853 if (usb_pipein(urb->pipe))
854 status |= TD_CTRL_SPD;
855
856 /*
857 * Build the DATA TD's
858 */
859 do { /* Allow zero length packets */
860 int pktsze = maxsze;
861
862 if (pktsze >= len) {
863 pktsze = len;
864 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
865 status &= ~TD_CTRL_SPD;
866 }
867
868 td = uhci_alloc_td(uhci, urb->dev);
869 if (!td)
870 return -ENOMEM;
871
872 uhci_add_td_to_urb(urb, td);
873 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
874 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
875 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
876 data);
877
878 data += pktsze;
879 len -= maxsze;
880
881 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
882 usb_pipeout(urb->pipe));
883 } while (len > 0);
884
885 /*
886 * URB_ZERO_PACKET means adding a 0-length packet, if direction
887 * is OUT and the transfer_length was an exact multiple of maxsze,
888 * hence (len = transfer_length - N * maxsze) == 0
889 * however, if transfer_length == 0, the zero packet was already
890 * prepared above.
891 */
892 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
893 !len && urb->transfer_buffer_length) {
894 td = uhci_alloc_td(uhci, urb->dev);
895 if (!td)
896 return -ENOMEM;
897
898 uhci_add_td_to_urb(urb, td);
899 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
900 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
901 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
902 data);
903
904 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
905 usb_pipeout(urb->pipe));
906 }
907
908 /* Set the interrupt-on-completion flag on the last packet.
909 * A more-or-less typical 4 KB URB (= size of one memory page)
910 * will require about 3 ms to transfer; that's a little on the
911 * fast side but not enough to justify delaying an interrupt
912 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
913 * flag setting. */
914 td->status |= cpu_to_le32(TD_CTRL_IOC);
915
916 qh = uhci_alloc_qh(uhci, urb->dev);
917 if (!qh)
918 return -ENOMEM;
919
920 urbp->qh = qh;
921 qh->urbp = urbp;
922
923 /* Always breadth first */
924 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
925
926 if (eurb)
927 uhci_append_queued_urb(uhci, eurb, urb);
928 else
929 uhci_insert_qh(uhci, skelqh, urb);
930
931 return -EINPROGRESS;
932}
933
934/*
935 * Common result for bulk and interrupt
936 */
937static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
938{
939 struct urb_priv *urbp = urb->hcpriv;
940 struct uhci_td *td;
941 unsigned int status = 0;
942 int ret = 0;
943
944 urb->actual_length = 0;
945
946 list_for_each_entry(td, &urbp->td_list, list) {
947 unsigned int ctrlstat = td_status(td);
948
949 status = uhci_status_bits(ctrlstat);
950 if (status & TD_CTRL_ACTIVE)
951 return -EINPROGRESS;
952
953 urb->actual_length += uhci_actual_length(ctrlstat);
954
955 if (status)
956 goto td_error;
957
958 if (uhci_actual_length(ctrlstat) <
959 uhci_expected_length(td_token(td))) {
960 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
961 ret = -EREMOTEIO;
962 goto err;
963 } else
964 return 0;
965 }
966 }
967
968 return 0;
969
970td_error:
971 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
972
973err:
974 /*
975 * Enable this chunk of code if you want to see some more debugging.
976 * But be careful, it has the tendancy to starve out khubd and prevent
977 * disconnects from happening successfully if you have a slow debug
978 * log interface (like a serial console.
979 */
980#if 0
981 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
982 /* Some debugging code */
983 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
984 __FUNCTION__, status);
985
986 if (errbuf) {
987 /* Print the chain for debugging purposes */
988 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
989
990 lprintk(errbuf);
991 }
992 }
993#endif
994 return ret;
995}
996
997static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
998{
999 int ret;
1000
1001 /* Can't have low-speed bulk transfers */
1002 if (urb->dev->speed == USB_SPEED_LOW)
1003 return -EINVAL;
1004
1005 ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1006 if (ret == -EINPROGRESS)
1007 uhci_inc_fsbr(uhci, urb);
1008
1009 return ret;
1010}
1011
1012static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1013{
1014 /* USB 1.1 interrupt transfers only involve one packet per interval;
1015 * that's the uhci_submit_common() "breadth first" policy. Drivers
1016 * can submit urbs of any length, but longer ones might need many
1017 * intervals to complete.
1018 */
1019 return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1020}
1021
1022/*
1023 * Isochronous transfers
1024 */
1025static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1026{
1027 struct urb *last_urb = NULL;
1028 struct urb_priv *up;
1029 int ret = 0;
1030
1031 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1032 struct urb *u = up->urb;
1033
1034 /* look for pending URB's with identical pipe handle */
1035 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1036 (u->status == -EINPROGRESS) && (u != urb)) {
1037 if (!last_urb)
1038 *start = u->start_frame;
1039 last_urb = u;
1040 }
1041 }
1042
1043 if (last_urb) {
1044 *end = (last_urb->start_frame + last_urb->number_of_packets *
1045 last_urb->interval) & (UHCI_NUMFRAMES-1);
1046 ret = 0;
1047 } else
1048 ret = -1; /* no previous urb found */
1049
1050 return ret;
1051}
1052
1053static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1054{
1055 int limits;
1056 unsigned int start = 0, end = 0;
1057
1058 if (urb->number_of_packets > 900) /* 900? Why? */
1059 return -EFBIG;
1060
1061 limits = isochronous_find_limits(uhci, urb, &start, &end);
1062
1063 if (urb->transfer_flags & URB_ISO_ASAP) {
1064 if (limits) {
1065 uhci_get_current_frame_number(uhci);
1066 urb->start_frame = (uhci->frame_number + 10)
1067 & (UHCI_NUMFRAMES - 1);
1068 } else
1069 urb->start_frame = end;
1070 } else {
1071 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1072 /* FIXME: Sanity check */
1073 }
1074
1075 return 0;
1076}
1077
1078/*
1079 * Isochronous transfers
1080 */
1081static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1082{
1083 struct uhci_td *td;
1084 int i, ret, frame;
1085 int status, destination;
1086
1087 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1088 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1089
1090 ret = isochronous_find_start(uhci, urb);
1091 if (ret)
1092 return ret;
1093
1094 frame = urb->start_frame;
1095 for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1096 if (!urb->iso_frame_desc[i].length)
1097 continue;
1098
1099 td = uhci_alloc_td(uhci, urb->dev);
1100 if (!td)
1101 return -ENOMEM;
1102
1103 uhci_add_td_to_urb(urb, td);
1104 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1105 urb->transfer_dma + urb->iso_frame_desc[i].offset);
1106
1107 if (i + 1 >= urb->number_of_packets)
1108 td->status |= cpu_to_le32(TD_CTRL_IOC);
1109
1110 uhci_insert_td_frame_list(uhci, td, frame);
1111 }
1112
1113 return -EINPROGRESS;
1114}
1115
1116static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1117{
1118 struct uhci_td *td;
1119 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1120 int status;
1121 int i, ret = 0;
1122
1123 urb->actual_length = 0;
1124
1125 i = 0;
1126 list_for_each_entry(td, &urbp->td_list, list) {
1127 int actlength;
1128 unsigned int ctrlstat = td_status(td);
1129
1130 if (ctrlstat & TD_CTRL_ACTIVE)
1131 return -EINPROGRESS;
1132
1133 actlength = uhci_actual_length(ctrlstat);
1134 urb->iso_frame_desc[i].actual_length = actlength;
1135 urb->actual_length += actlength;
1136
1137 status = uhci_map_status(uhci_status_bits(ctrlstat),
1138 usb_pipeout(urb->pipe));
1139 urb->iso_frame_desc[i].status = status;
1140 if (status) {
1141 urb->error_count++;
1142 ret = status;
1143 }
1144
1145 i++;
1146 }
1147
1148 return ret;
1149}
1150
1151static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1152{
1153 struct urb_priv *up;
1154
1155 /* We don't match Isoc transfers since they are special */
1156 if (usb_pipeisoc(urb->pipe))
1157 return NULL;
1158
1159 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1160 struct urb *u = up->urb;
1161
1162 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1163 /* For control, ignore the direction */
1164 if (usb_pipecontrol(urb->pipe) &&
1165 (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1166 return u;
1167 else if (u->pipe == urb->pipe)
1168 return u;
1169 }
1170 }
1171
1172 return NULL;
1173}
1174
1175static int uhci_urb_enqueue(struct usb_hcd *hcd,
1176 struct usb_host_endpoint *ep,
1177 struct urb *urb, int mem_flags)
1178{
1179 int ret;
1180 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1181 unsigned long flags;
1182 struct urb *eurb;
1183 int bustime;
1184
1185 spin_lock_irqsave(&uhci->lock, flags);
1186
1187 ret = urb->status;
1188 if (ret != -EINPROGRESS) /* URB already unlinked! */
1189 goto out;
1190
1191 eurb = uhci_find_urb_ep(uhci, urb);
1192
1193 if (!uhci_alloc_urb_priv(uhci, urb)) {
1194 ret = -ENOMEM;
1195 goto out;
1196 }
1197
1198 switch (usb_pipetype(urb->pipe)) {
1199 case PIPE_CONTROL:
1200 ret = uhci_submit_control(uhci, urb, eurb);
1201 break;
1202 case PIPE_INTERRUPT:
1203 if (!eurb) {
1204 bustime = usb_check_bandwidth(urb->dev, urb);
1205 if (bustime < 0)
1206 ret = bustime;
1207 else {
1208 ret = uhci_submit_interrupt(uhci, urb, eurb);
1209 if (ret == -EINPROGRESS)
1210 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1211 }
1212 } else { /* inherit from parent */
1213 urb->bandwidth = eurb->bandwidth;
1214 ret = uhci_submit_interrupt(uhci, urb, eurb);
1215 }
1216 break;
1217 case PIPE_BULK:
1218 ret = uhci_submit_bulk(uhci, urb, eurb);
1219 break;
1220 case PIPE_ISOCHRONOUS:
1221 bustime = usb_check_bandwidth(urb->dev, urb);
1222 if (bustime < 0) {
1223 ret = bustime;
1224 break;
1225 }
1226
1227 ret = uhci_submit_isochronous(uhci, urb);
1228 if (ret == -EINPROGRESS)
1229 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1230 break;
1231 }
1232
1233 if (ret != -EINPROGRESS) {
1234 /* Submit failed, so delete it from the urb_list */
1235 struct urb_priv *urbp = urb->hcpriv;
1236
1237 list_del_init(&urbp->urb_list);
1238 uhci_destroy_urb_priv(uhci, urb);
1239 } else
1240 ret = 0;
1241
1242out:
1243 spin_unlock_irqrestore(&uhci->lock, flags);
1244 return ret;
1245}
1246
1247/*
1248 * Return the result of a transfer
1249 */
1250static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1251{
1252 int ret = -EINPROGRESS;
1253 struct urb_priv *urbp;
1254
1255 spin_lock(&urb->lock);
1256
1257 urbp = (struct urb_priv *)urb->hcpriv;
1258
1259 if (urb->status != -EINPROGRESS) /* URB already dequeued */
1260 goto out;
1261
1262 switch (usb_pipetype(urb->pipe)) {
1263 case PIPE_CONTROL:
1264 ret = uhci_result_control(uhci, urb);
1265 break;
1266 case PIPE_BULK:
1267 case PIPE_INTERRUPT:
1268 ret = uhci_result_common(uhci, urb);
1269 break;
1270 case PIPE_ISOCHRONOUS:
1271 ret = uhci_result_isochronous(uhci, urb);
1272 break;
1273 }
1274
1275 if (ret == -EINPROGRESS)
1276 goto out;
1277 urb->status = ret;
1278
1279 switch (usb_pipetype(urb->pipe)) {
1280 case PIPE_CONTROL:
1281 case PIPE_BULK:
1282 case PIPE_ISOCHRONOUS:
1283 /* Release bandwidth for Interrupt or Isoc. transfers */
1284 if (urb->bandwidth)
1285 usb_release_bandwidth(urb->dev, urb, 1);
1286 uhci_unlink_generic(uhci, urb);
1287 break;
1288 case PIPE_INTERRUPT:
1289 /* Release bandwidth for Interrupt or Isoc. transfers */
1290 /* Make sure we don't release if we have a queued URB */
1291 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1292 usb_release_bandwidth(urb->dev, urb, 0);
1293 else
1294 /* bandwidth was passed on to queued URB, */
1295 /* so don't let usb_unlink_urb() release it */
1296 urb->bandwidth = 0;
1297 uhci_unlink_generic(uhci, urb);
1298 break;
1299 default:
1300 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1301 "for urb %p\n",
1302 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1303 }
1304
1305 /* Move it from uhci->urb_list to uhci->complete_list */
1306 uhci_moveto_complete(uhci, urbp);
1307
1308out:
1309 spin_unlock(&urb->lock);
1310}
1311
1312static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1313{
1314 struct list_head *head;
1315 struct uhci_td *td;
1316 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1317 int prevactive = 0;
1318
1319 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1320
1321 /*
1322 * Now we need to find out what the last successful toggle was
1323 * so we can update the local data toggle for the next transfer
1324 *
1325 * There are 2 ways the last successful completed TD is found:
1326 *
1327 * 1) The TD is NOT active and the actual length < expected length
1328 * 2) The TD is NOT active and it's the last TD in the chain
1329 *
1330 * and a third way the first uncompleted TD is found:
1331 *
1332 * 3) The TD is active and the previous TD is NOT active
1333 *
1334 * Control and Isochronous ignore the toggle, so this is safe
1335 * for all types
1336 *
1337 * FIXME: The toggle fixups won't be 100% reliable until we
1338 * change over to using a single queue for each endpoint and
1339 * stop the queue before unlinking.
1340 */
1341 head = &urbp->td_list;
1342 list_for_each_entry(td, head, list) {
1343 unsigned int ctrlstat = td_status(td);
1344
1345 if (!(ctrlstat & TD_CTRL_ACTIVE) &&
1346 (uhci_actual_length(ctrlstat) <
1347 uhci_expected_length(td_token(td)) ||
1348 td->list.next == head))
1349 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1350 uhci_packetout(td_token(td)),
1351 uhci_toggle(td_token(td)) ^ 1);
1352 else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
1353 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1354 uhci_packetout(td_token(td)),
1355 uhci_toggle(td_token(td)));
1356
1357 prevactive = ctrlstat & TD_CTRL_ACTIVE;
1358 }
1359
1360 uhci_delete_queued_urb(uhci, urb);
1361
1362 /* The interrupt loop will reclaim the QH's */
1363 uhci_remove_qh(uhci, urbp->qh);
1364 urbp->qh = NULL;
1365}
1366
1367static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1368{
1369 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1370 unsigned long flags;
1371 struct urb_priv *urbp;
1372
1373 spin_lock_irqsave(&uhci->lock, flags);
1374 urbp = urb->hcpriv;
1375 if (!urbp) /* URB was never linked! */
1376 goto done;
1377 list_del_init(&urbp->urb_list);
1378
1379 uhci_unlink_generic(uhci, urb);
1380
1381 uhci_get_current_frame_number(uhci);
1382 if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age) {
1383 uhci_remove_pending_urbps(uhci);
1384 uhci->urb_remove_age = uhci->frame_number;
1385 }
1386
1387 /* If we're the first, set the next interrupt bit */
1388 if (list_empty(&uhci->urb_remove_list))
1389 uhci_set_next_interrupt(uhci);
1390 list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1391
1392done:
1393 spin_unlock_irqrestore(&uhci->lock, flags);
1394 return 0;
1395}
1396
1397static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1398{
1399 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1400 struct list_head *head;
1401 struct uhci_td *td;
1402 int count = 0;
1403
1404 uhci_dec_fsbr(uhci, urb);
1405
1406 urbp->fsbr_timeout = 1;
1407
1408 /*
1409 * Ideally we would want to fix qh->element as well, but it's
1410 * read/write by the HC, so that can introduce a race. It's not
1411 * really worth the hassle
1412 */
1413
1414 head = &urbp->td_list;
1415 list_for_each_entry(td, head, list) {
1416 /*
1417 * Make sure we don't do the last one (since it'll have the
1418 * TERM bit set) as well as we skip every so many TD's to
1419 * make sure it doesn't hog the bandwidth
1420 */
1421 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1422 (DEPTH_INTERVAL - 1))
1423 td->link |= UHCI_PTR_DEPTH;
1424
1425 count++;
1426 }
1427
1428 return 0;
1429}
1430
1431static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1432{
1433 struct uhci_qh *qh, *tmp;
1434
1435 list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
1436 list_del_init(&qh->remove_list);
1437
1438 uhci_free_qh(uhci, qh);
1439 }
1440}
1441
1442static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1443{
1444 struct uhci_td *td, *tmp;
1445
1446 list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1447 list_del_init(&td->remove_list);
1448
1449 uhci_free_td(uhci, td);
1450 }
1451}
1452
1453static void
1454uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1455__releases(uhci->lock)
1456__acquires(uhci->lock)
1457{
1458 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1459
1460 uhci_destroy_urb_priv(uhci, urb);
1461
1462 spin_unlock(&uhci->lock);
1463 usb_hcd_giveback_urb(hcd, urb, regs);
1464 spin_lock(&uhci->lock);
1465}
1466
1467static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs)
1468{
1469 struct urb_priv *urbp, *tmp;
1470
1471 list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1472 struct urb *urb = urbp->urb;
1473
1474 list_del_init(&urbp->urb_list);
1475 uhci_finish_urb(uhci_to_hcd(uhci), urb, regs);
1476 }
1477}
1478
1479static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1480{
1481
1482 /* Splice the urb_remove_list onto the end of the complete_list */
1483 list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1484}
1485
1486/* Process events in the schedule, but only in one thread at a time */
1487static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1488{
1489 struct urb_priv *urbp, *tmp;
1490
1491 /* Don't allow re-entrant calls */
1492 if (uhci->scan_in_progress) {
1493 uhci->need_rescan = 1;
1494 return;
1495 }
1496 uhci->scan_in_progress = 1;
1497 rescan:
1498 uhci->need_rescan = 0;
1499
1500 uhci_get_current_frame_number(uhci);
1501
1502 if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age)
1503 uhci_free_pending_qhs(uhci);
1504 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
1505 uhci_free_pending_tds(uhci);
1506 if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age)
1507 uhci_remove_pending_urbps(uhci);
1508
1509 /* Walk the list of pending URBs to see which ones completed
1510 * (must be _safe because uhci_transfer_result() dequeues URBs) */
1511 list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
1512 struct urb *urb = urbp->urb;
1513
1514 /* Checks the status and does all of the magic necessary */
1515 uhci_transfer_result(uhci, urb);
1516 }
1517 uhci_finish_completion(uhci, regs);
1518
1519 /* If the controller is stopped, we can finish these off right now */
1520 if (uhci->is_stopped) {
1521 uhci_free_pending_qhs(uhci);
1522 uhci_free_pending_tds(uhci);
1523 uhci_remove_pending_urbps(uhci);
1524 }
1525
1526 if (uhci->need_rescan)
1527 goto rescan;
1528 uhci->scan_in_progress = 0;
1529
1530 if (list_empty(&uhci->urb_remove_list) &&
1531 list_empty(&uhci->td_remove_list) &&
1532 list_empty(&uhci->qh_remove_list))
1533 uhci_clear_next_interrupt(uhci);
1534 else
1535 uhci_set_next_interrupt(uhci);
1536
1537 /* Wake up anyone waiting for an URB to complete */
1538 wake_up_all(&uhci->waitqh);
1539}