aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig12
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/isp1362-hcd.c2905
-rw-r--r--drivers/usb/host/isp1362.h1079
4 files changed, 3997 insertions, 0 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 41b8d7de2e07..9b43b226817f 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -159,6 +159,18 @@ config USB_ISP1760_HCD
159 To compile this driver as a module, choose M here: the 159 To compile this driver as a module, choose M here: the
160 module will be called isp1760. 160 module will be called isp1760.
161 161
162config USB_ISP1362_HCD
163 tristate "ISP1362 HCD support"
164 depends on USB
165 default N
166 ---help---
167 Supports the Philips ISP1362 chip as a host controller
168
169 This driver does not support isochronous transfers.
170
171 To compile this driver as a module, choose M here: the
172 module will be called isp1362-hcd.
173
162config USB_OHCI_HCD 174config USB_OHCI_HCD
163 tristate "OHCI HCD support" 175 tristate "OHCI HCD support"
164 depends on USB && USB_ARCH_HAS_OHCI 176 depends on USB && USB_ARCH_HAS_OHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 289d748bb414..f58b2494c44a 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_PCI) += pci-quirks.o
21obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o 21obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
22obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o 22obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
23obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o 23obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
24obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
24obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o 25obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
25obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o 26obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
26obj-$(CONFIG_USB_FHCI_HCD) += fhci.o 27obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
new file mode 100644
index 000000000000..5819e10a146c
--- /dev/null
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -0,0 +1,2905 @@
1/*
2 * ISP1362 HCD (Host Controller Driver) for USB.
3 *
4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
5 *
6 * Derived from the SL811 HCD, rewritten for ISP116x.
7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
8 *
9 * Portions:
10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11 * Copyright (C) 2004 David Brownell
12 */
13
14/*
15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
16 * accesses to the address and data register.
17 * The following timing options exist:
18 *
19 * 1. Configure your memory controller to add such delays if it can (the best)
20 * 2. Implement platform-specific delay function possibly
21 * combined with configuring the memory controller; see
22 * include/linux/usb_isp1362.h for more info.
23 * 3. Use ndelay (easiest, poorest).
24 *
25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26 * platform specific section of isp1362.h to select the appropriate variant.
27 *
28 * Also note that according to the Philips "ISP1362 Errata" document
29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30 * is reasserted (even with #CS deasserted) within 132ns after a
31 * write cycle to any controller register. If the hardware doesn't
32 * implement the recommended fix (gating the #WR with #CS) software
33 * must ensure that no further write cycle (not necessarily to the chip!)
34 * is issued by the CPU within this interval.
35
36 * For PXA25x this can be ensured by using VLIO with the maximum
37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
38 */
39
40#ifdef CONFIG_USB_DEBUG
41# define ISP1362_DEBUG
42#else
43# undef ISP1362_DEBUG
44#endif
45
46/*
47 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
48 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
49 * requests are carried out in separate frames. This will delay any SETUP
50 * packets until the start of the next frame so that this situation is
51 * unlikely to occur (and makes usbtest happy running with a PXA255 target
52 * device).
53 */
54#undef BUGGY_PXA2XX_UDC_USBTEST
55
56#undef PTD_TRACE
57#undef URB_TRACE
58#undef VERBOSE
59#undef REGISTERS
60
61/* This enables a memory test on the ISP1362 chip memory to make sure the
62 * chip access timing is correct.
63 */
64#undef CHIP_BUFFER_TEST
65
66#include <linux/module.h>
67#include <linux/moduleparam.h>
68#include <linux/kernel.h>
69#include <linux/delay.h>
70#include <linux/ioport.h>
71#include <linux/sched.h>
72#include <linux/slab.h>
73#include <linux/smp_lock.h>
74#include <linux/errno.h>
75#include <linux/init.h>
76#include <linux/list.h>
77#include <linux/interrupt.h>
78#include <linux/usb.h>
79#include <linux/usb/isp1362.h>
80#include <linux/platform_device.h>
81#include <linux/pm.h>
82#include <linux/io.h>
83#include <linux/bitops.h>
84
85#include <asm/irq.h>
86#include <asm/system.h>
87#include <asm/byteorder.h>
88#include <asm/unaligned.h>
89
90static int dbg_level;
91#ifdef ISP1362_DEBUG
92module_param(dbg_level, int, 0644);
93#else
94module_param(dbg_level, int, 0);
95#define STUB_DEBUG_FILE
96#endif
97
98#include "../core/hcd.h"
99#include "../core/usb.h"
100#include "isp1362.h"
101
102
103#define DRIVER_VERSION "2005-04-04"
104#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
105
106MODULE_DESCRIPTION(DRIVER_DESC);
107MODULE_LICENSE("GPL");
108
109static const char hcd_name[] = "isp1362-hcd";
110
111static void isp1362_hc_stop(struct usb_hcd *hcd);
112static int isp1362_hc_start(struct usb_hcd *hcd);
113
114/*-------------------------------------------------------------------------*/
115
116/*
117 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
118 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
119 * completion.
120 * We don't need a 'disable' counterpart, since interrupts will be disabled
121 * only by the interrupt handler.
122 */
123static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
124{
125 if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
126 return;
127 if (mask & ~isp1362_hcd->irqenb)
128 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
129 isp1362_hcd->irqenb |= mask;
130 if (isp1362_hcd->irq_active)
131 return;
132 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
133}
134
135/*-------------------------------------------------------------------------*/
136
137static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
138 u16 offset)
139{
140 struct isp1362_ep_queue *epq = NULL;
141
142 if (offset < isp1362_hcd->istl_queue[1].buf_start)
143 epq = &isp1362_hcd->istl_queue[0];
144 else if (offset < isp1362_hcd->intl_queue.buf_start)
145 epq = &isp1362_hcd->istl_queue[1];
146 else if (offset < isp1362_hcd->atl_queue.buf_start)
147 epq = &isp1362_hcd->intl_queue;
148 else if (offset < isp1362_hcd->atl_queue.buf_start +
149 isp1362_hcd->atl_queue.buf_size)
150 epq = &isp1362_hcd->atl_queue;
151
152 if (epq)
153 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
154 else
155 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
156
157 return epq;
158}
159
160static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
161{
162 int offset;
163
164 if (index * epq->blk_size > epq->buf_size) {
165 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
166 epq->buf_size / epq->blk_size);
167 return -EINVAL;
168 }
169 offset = epq->buf_start + index * epq->blk_size;
170 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
171
172 return offset;
173}
174
175/*-------------------------------------------------------------------------*/
176
177static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
178 int mps)
179{
180 u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
181
182 xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
183 if (xfer_size < size && xfer_size % mps)
184 xfer_size -= xfer_size % mps;
185
186 return xfer_size;
187}
188
189static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
190 struct isp1362_ep *ep, u16 len)
191{
192 int ptd_offset = -EINVAL;
193 int index;
194 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
195 int found = -1;
196 int last = -1;
197
198 BUG_ON(len > epq->buf_size);
199
200 if (!epq->buf_avail)
201 return -ENOMEM;
202
203 if (ep->num_ptds)
204 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
205 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
206 BUG_ON(ep->num_ptds != 0);
207
208 for (index = 0; index <= epq->buf_count - num_ptds; index++) {
209 if (test_bit(index, &epq->buf_map))
210 continue;
211 found = index;
212 for (last = index + 1; last < index + num_ptds; last++) {
213 if (test_bit(last, &epq->buf_map)) {
214 found = -1;
215 break;
216 }
217 }
218 if (found >= 0)
219 break;
220 }
221 if (found < 0)
222 return -EOVERFLOW;
223
224 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
225 num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
226 ptd_offset = get_ptd_offset(epq, found);
227 WARN_ON(ptd_offset < 0);
228 ep->ptd_offset = ptd_offset;
229 ep->num_ptds += num_ptds;
230 epq->buf_avail -= num_ptds;
231 BUG_ON(epq->buf_avail > epq->buf_count);
232 ep->ptd_index = found;
233 for (index = found; index < last; index++)
234 __set_bit(index, &epq->buf_map);
235 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
236 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
237 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
238
239 return found;
240}
241
242static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
243{
244 int index = ep->ptd_index;
245 int last = ep->ptd_index + ep->num_ptds;
246
247 if (last > epq->buf_count)
248 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
249 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
250 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
251 epq->buf_map, epq->skip_map);
252 BUG_ON(last > epq->buf_count);
253
254 for (; index < last; index++) {
255 __clear_bit(index, &epq->buf_map);
256 __set_bit(index, &epq->skip_map);
257 }
258 epq->buf_avail += ep->num_ptds;
259 epq->ptd_count--;
260
261 BUG_ON(epq->buf_avail > epq->buf_count);
262 BUG_ON(epq->ptd_count > epq->buf_count);
263
264 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
265 __func__, epq->name,
266 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
267 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
268 epq->buf_map, epq->skip_map);
269
270 ep->num_ptds = 0;
271 ep->ptd_offset = -EINVAL;
272 ep->ptd_index = -EINVAL;
273}
274
275/*-------------------------------------------------------------------------*/
276
277/*
278 Set up PTD's.
279*/
280static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
281 struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
282 u16 fno)
283{
284 struct ptd *ptd;
285 int toggle;
286 int dir;
287 u16 len;
288 size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
289
290 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
291
292 ptd = &ep->ptd;
293
294 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
295
296 switch (ep->nextpid) {
297 case USB_PID_IN:
298 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
299 dir = PTD_DIR_IN;
300 if (usb_pipecontrol(urb->pipe)) {
301 len = min_t(size_t, ep->maxpacket, buf_len);
302 } else if (usb_pipeisoc(urb->pipe)) {
303 len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
304 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
305 } else
306 len = max_transfer_size(epq, buf_len, ep->maxpacket);
307 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
308 (int)buf_len);
309 break;
310 case USB_PID_OUT:
311 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
312 dir = PTD_DIR_OUT;
313 if (usb_pipecontrol(urb->pipe))
314 len = min_t(size_t, ep->maxpacket, buf_len);
315 else if (usb_pipeisoc(urb->pipe))
316 len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
317 else
318 len = max_transfer_size(epq, buf_len, ep->maxpacket);
319 if (len == 0)
320 pr_info("%s: Sending ZERO packet: %d\n", __func__,
321 urb->transfer_flags & URB_ZERO_PACKET);
322 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
323 (int)buf_len);
324 break;
325 case USB_PID_SETUP:
326 toggle = 0;
327 dir = PTD_DIR_SETUP;
328 len = sizeof(struct usb_ctrlrequest);
329 DBG(1, "%s: SETUP len %d\n", __func__, len);
330 ep->data = urb->setup_packet;
331 break;
332 case USB_PID_ACK:
333 toggle = 1;
334 len = 0;
335 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
336 PTD_DIR_OUT : PTD_DIR_IN;
337 DBG(1, "%s: ACK len %d\n", __func__, len);
338 break;
339 default:
340 toggle = dir = len = 0;
341 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
342 BUG_ON(1);
343 }
344
345 ep->length = len;
346 if (!len)
347 ep->data = NULL;
348
349 ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
350 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
351 PTD_EP(ep->epnum);
352 ptd->len = PTD_LEN(len) | PTD_DIR(dir);
353 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
354
355 if (usb_pipeint(urb->pipe)) {
356 ptd->faddr |= PTD_SF_INT(ep->branch);
357 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
358 }
359 if (usb_pipeisoc(urb->pipe))
360 ptd->faddr |= PTD_SF_ISO(fno);
361
362 DBG(1, "%s: Finished\n", __func__);
363}
364
365static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
366 struct isp1362_ep_queue *epq)
367{
368 struct ptd *ptd = &ep->ptd;
369 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
370
371 _BUG_ON(ep->ptd_offset < 0);
372
373 prefetch(ptd);
374 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
375 if (len)
376 isp1362_write_buffer(isp1362_hcd, ep->data,
377 ep->ptd_offset + PTD_HEADER_SIZE, len);
378
379 dump_ptd(ptd);
380 dump_ptd_out_data(ptd, ep->data);
381}
382
383static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
384 struct isp1362_ep_queue *epq)
385{
386 struct ptd *ptd = &ep->ptd;
387 int act_len;
388
389 WARN_ON(list_empty(&ep->active));
390 BUG_ON(ep->ptd_offset < 0);
391
392 list_del_init(&ep->active);
393 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
394
395 prefetchw(ptd);
396 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
397 dump_ptd(ptd);
398 act_len = PTD_GET_COUNT(ptd);
399 if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
400 return;
401 if (act_len > ep->length)
402 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
403 ep->ptd_offset, act_len, ep->length);
404 BUG_ON(act_len > ep->length);
405 /* Only transfer the amount of data that has actually been overwritten
406 * in the chip buffer. We don't want any data that doesn't belong to the
407 * transfer to leak out of the chip to the callers transfer buffer!
408 */
409 prefetchw(ep->data);
410 isp1362_read_buffer(isp1362_hcd, ep->data,
411 ep->ptd_offset + PTD_HEADER_SIZE, act_len);
412 dump_ptd_in_data(ptd, ep->data);
413}
414
415/*
416 * INT PTDs will stay in the chip until data is available.
417 * This function will remove a PTD from the chip when the URB is dequeued.
418 * Must be called with the spinlock held and IRQs disabled
419 */
420static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
421
422{
423 int index;
424 struct isp1362_ep_queue *epq;
425
426 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
427 BUG_ON(ep->ptd_offset < 0);
428
429 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
430 BUG_ON(!epq);
431
432 /* put ep in remove_list for cleanup */
433 WARN_ON(!list_empty(&ep->remove_list));
434 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
435 /* let SOF interrupt handle the cleanup */
436 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
437
438 index = ep->ptd_index;
439 if (index < 0)
440 /* ISO queues don't have SKIP registers */
441 return;
442
443 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
444 index, ep->ptd_offset, epq->skip_map, 1 << index);
445
446 /* prevent further processing of PTD (will be effective after next SOF) */
447 epq->skip_map |= 1 << index;
448 if (epq == &isp1362_hcd->atl_queue) {
449 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
450 isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
451 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
452 if (~epq->skip_map == 0)
453 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
454 } else if (epq == &isp1362_hcd->intl_queue) {
455 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
456 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
457 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
458 if (~epq->skip_map == 0)
459 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
460 }
461}
462
463/*
464 Take done or failed requests out of schedule. Give back
465 processed urbs.
466*/
467static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
468 struct urb *urb, int status)
469 __releases(isp1362_hcd->lock)
470 __acquires(isp1362_hcd->lock)
471{
472 urb->hcpriv = NULL;
473 ep->error_count = 0;
474
475 if (usb_pipecontrol(urb->pipe))
476 ep->nextpid = USB_PID_SETUP;
477
478 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
479 ep->num_req, usb_pipedevice(urb->pipe),
480 usb_pipeendpoint(urb->pipe),
481 !usb_pipein(urb->pipe) ? "out" : "in",
482 usb_pipecontrol(urb->pipe) ? "ctrl" :
483 usb_pipeint(urb->pipe) ? "int" :
484 usb_pipebulk(urb->pipe) ? "bulk" :
485 "iso",
486 urb->actual_length, urb->transfer_buffer_length,
487 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
488 "short_ok" : "", urb->status);
489
490
491 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
492 spin_unlock(&isp1362_hcd->lock);
493 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
494 spin_lock(&isp1362_hcd->lock);
495
496 /* take idle endpoints out of the schedule right away */
497 if (!list_empty(&ep->hep->urb_list))
498 return;
499
500 /* async deschedule */
501 if (!list_empty(&ep->schedule)) {
502 list_del_init(&ep->schedule);
503 return;
504 }
505
506
507 if (ep->interval) {
508 /* periodic deschedule */
509 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
510 ep, ep->branch, ep->load,
511 isp1362_hcd->load[ep->branch],
512 isp1362_hcd->load[ep->branch] - ep->load);
513 isp1362_hcd->load[ep->branch] -= ep->load;
514 ep->branch = PERIODIC_SIZE;
515 }
516}
517
518/*
519 * Analyze transfer results, handle partial transfers and errors
520*/
521static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
522{
523 struct urb *urb = get_urb(ep);
524 struct usb_device *udev;
525 struct ptd *ptd;
526 int short_ok;
527 u16 len;
528 int urbstat = -EINPROGRESS;
529 u8 cc;
530
531 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
532
533 udev = urb->dev;
534 ptd = &ep->ptd;
535 cc = PTD_GET_CC(ptd);
536 if (cc == PTD_NOTACCESSED) {
537 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
538 ep->num_req, ptd);
539 cc = PTD_DEVNOTRESP;
540 }
541
542 short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
543 len = urb->transfer_buffer_length - urb->actual_length;
544
545 /* Data underrun is special. For allowed underrun
546 we clear the error and continue as normal. For
547 forbidden underrun we finish the DATA stage
548 immediately while for control transfer,
549 we do a STATUS stage.
550 */
551 if (cc == PTD_DATAUNDERRUN) {
552 if (short_ok) {
553 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
554 __func__, ep->num_req, short_ok ? "" : "not_",
555 PTD_GET_COUNT(ptd), ep->maxpacket, len);
556 cc = PTD_CC_NOERROR;
557 urbstat = 0;
558 } else {
559 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
560 __func__, ep->num_req,
561 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
562 short_ok ? "" : "not_",
563 PTD_GET_COUNT(ptd), ep->maxpacket, len);
564 if (usb_pipecontrol(urb->pipe)) {
565 ep->nextpid = USB_PID_ACK;
566 /* save the data underrun error code for later and
567 * procede with the status stage
568 */
569 urb->actual_length += PTD_GET_COUNT(ptd);
570 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
571
572 if (urb->status == -EINPROGRESS)
573 urb->status = cc_to_error[PTD_DATAUNDERRUN];
574 } else {
575 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
576 PTD_GET_TOGGLE(ptd));
577 urbstat = cc_to_error[PTD_DATAUNDERRUN];
578 }
579 goto out;
580 }
581 }
582
583 if (cc != PTD_CC_NOERROR) {
584 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
585 urbstat = cc_to_error[cc];
586 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
587 __func__, ep->num_req, ep->nextpid, urbstat, cc,
588 ep->error_count);
589 }
590 goto out;
591 }
592
593 switch (ep->nextpid) {
594 case USB_PID_OUT:
595 if (PTD_GET_COUNT(ptd) != ep->length)
596 pr_err("%s: count=%d len=%d\n", __func__,
597 PTD_GET_COUNT(ptd), ep->length);
598 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
599 urb->actual_length += ep->length;
600 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
601 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
602 if (urb->actual_length == urb->transfer_buffer_length) {
603 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
604 ep->num_req, len, ep->maxpacket, urbstat);
605 if (usb_pipecontrol(urb->pipe)) {
606 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
607 ep->num_req,
608 usb_pipein(urb->pipe) ? "IN" : "OUT");
609 ep->nextpid = USB_PID_ACK;
610 } else {
611 if (len % ep->maxpacket ||
612 !(urb->transfer_flags & URB_ZERO_PACKET)) {
613 urbstat = 0;
614 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
615 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
616 urbstat, len, ep->maxpacket, urb->actual_length);
617 }
618 }
619 }
620 break;
621 case USB_PID_IN:
622 len = PTD_GET_COUNT(ptd);
623 BUG_ON(len > ep->length);
624 urb->actual_length += len;
625 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
626 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
627 /* if transfer completed or (allowed) data underrun */
628 if ((urb->transfer_buffer_length == urb->actual_length) ||
629 len % ep->maxpacket) {
630 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
631 ep->num_req, len, ep->maxpacket, urbstat);
632 if (usb_pipecontrol(urb->pipe)) {
633 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
634 ep->num_req,
635 usb_pipein(urb->pipe) ? "IN" : "OUT");
636 ep->nextpid = USB_PID_ACK;
637 } else {
638 urbstat = 0;
639 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
640 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
641 urbstat, len, ep->maxpacket, urb->actual_length);
642 }
643 }
644 break;
645 case USB_PID_SETUP:
646 if (urb->transfer_buffer_length == urb->actual_length) {
647 ep->nextpid = USB_PID_ACK;
648 } else if (usb_pipeout(urb->pipe)) {
649 usb_settoggle(udev, 0, 1, 1);
650 ep->nextpid = USB_PID_OUT;
651 } else {
652 usb_settoggle(udev, 0, 0, 1);
653 ep->nextpid = USB_PID_IN;
654 }
655 break;
656 case USB_PID_ACK:
657 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
658 urbstat);
659 WARN_ON(urbstat != -EINPROGRESS);
660 urbstat = 0;
661 ep->nextpid = 0;
662 break;
663 default:
664 BUG_ON(1);
665 }
666
667 out:
668 if (urbstat != -EINPROGRESS) {
669 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
670 ep, ep->num_req, urb, urbstat);
671 finish_request(isp1362_hcd, ep, urb, urbstat);
672 }
673}
674
675static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
676{
677 struct isp1362_ep *ep;
678 struct isp1362_ep *tmp;
679
680 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
681 struct isp1362_ep_queue *epq =
682 get_ptd_queue(isp1362_hcd, ep->ptd_offset);
683 int index = ep->ptd_index;
684
685 BUG_ON(epq == NULL);
686 if (index >= 0) {
687 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
688 BUG_ON(ep->num_ptds == 0);
689 release_ptd_buffers(epq, ep);
690 }
691 if (!list_empty(&ep->hep->urb_list)) {
692 struct urb *urb = get_urb(ep);
693
694 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
695 ep->num_req, ep);
696 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
697 }
698 WARN_ON(list_empty(&ep->active));
699 if (!list_empty(&ep->active)) {
700 list_del_init(&ep->active);
701 DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
702 }
703 list_del_init(&ep->remove_list);
704 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
705 }
706 DBG(1, "%s: Done\n", __func__);
707}
708
709static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
710{
711 if (count > 0) {
712 if (count < isp1362_hcd->atl_queue.ptd_count)
713 isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
714 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
715 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
716 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
717 } else
718 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
719}
720
721static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
722{
723 isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
724 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
725 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
726}
727
728static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
729{
730 isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
731 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
732 HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
733}
734
735static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
736 struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
737{
738 int index = epq->free_ptd;
739
740 prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
741 index = claim_ptd_buffers(epq, ep, ep->length);
742 if (index == -ENOMEM) {
743 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
744 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
745 return index;
746 } else if (index == -EOVERFLOW) {
747 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
748 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
749 epq->buf_map, epq->skip_map);
750 return index;
751 } else
752 BUG_ON(index < 0);
753 list_add_tail(&ep->active, &epq->active);
754 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
755 ep, ep->num_req, ep->length, &epq->active);
756 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
757 ep->ptd_offset, ep, ep->num_req);
758 isp1362_write_ptd(isp1362_hcd, ep, epq);
759 __clear_bit(ep->ptd_index, &epq->skip_map);
760
761 return 0;
762}
763
764static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
765{
766 int ptd_count = 0;
767 struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
768 struct isp1362_ep *ep;
769 int defer = 0;
770
771 if (atomic_read(&epq->finishing)) {
772 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
773 return;
774 }
775
776 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
777 struct urb *urb = get_urb(ep);
778 int ret;
779
780 if (!list_empty(&ep->active)) {
781 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
782 continue;
783 }
784
785 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
786 ep, ep->num_req);
787
788 ret = submit_req(isp1362_hcd, urb, ep, epq);
789 if (ret == -ENOMEM) {
790 defer = 1;
791 break;
792 } else if (ret == -EOVERFLOW) {
793 defer = 1;
794 continue;
795 }
796#ifdef BUGGY_PXA2XX_UDC_USBTEST
797 defer = ep->nextpid == USB_PID_SETUP;
798#endif
799 ptd_count++;
800 }
801
802 /* Avoid starving of endpoints */
803 if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
804 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
805 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
806 }
807 if (ptd_count || defer)
808 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
809
810 epq->ptd_count += ptd_count;
811 if (epq->ptd_count > epq->stat_maxptds) {
812 epq->stat_maxptds = epq->ptd_count;
813 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
814 }
815}
816
817static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
818{
819 int ptd_count = 0;
820 struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
821 struct isp1362_ep *ep;
822
823 if (atomic_read(&epq->finishing)) {
824 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
825 return;
826 }
827
828 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
829 struct urb *urb = get_urb(ep);
830 int ret;
831
832 if (!list_empty(&ep->active)) {
833 DBG(1, "%s: Skipping active %s ep %p\n", __func__,
834 epq->name, ep);
835 continue;
836 }
837
838 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
839 epq->name, ep, ep->num_req);
840 ret = submit_req(isp1362_hcd, urb, ep, epq);
841 if (ret == -ENOMEM)
842 break;
843 else if (ret == -EOVERFLOW)
844 continue;
845 ptd_count++;
846 }
847
848 if (ptd_count) {
849 static int last_count;
850
851 if (ptd_count != last_count) {
852 DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
853 last_count = ptd_count;
854 }
855 enable_intl_transfers(isp1362_hcd);
856 }
857
858 epq->ptd_count += ptd_count;
859 if (epq->ptd_count > epq->stat_maxptds)
860 epq->stat_maxptds = epq->ptd_count;
861}
862
863static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
864{
865 u16 ptd_offset = ep->ptd_offset;
866 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
867
868 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
869 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
870
871 ptd_offset += num_ptds * epq->blk_size;
872 if (ptd_offset < epq->buf_start + epq->buf_size)
873 return ptd_offset;
874 else
875 return -ENOMEM;
876}
877
878static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
879{
880 int ptd_count = 0;
881 int flip = isp1362_hcd->istl_flip;
882 struct isp1362_ep_queue *epq;
883 int ptd_offset;
884 struct isp1362_ep *ep;
885 struct isp1362_ep *tmp;
886 u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
887
888 fill2:
889 epq = &isp1362_hcd->istl_queue[flip];
890 if (atomic_read(&epq->finishing)) {
891 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
892 return;
893 }
894
895 if (!list_empty(&epq->active))
896 return;
897
898 ptd_offset = epq->buf_start;
899 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
900 struct urb *urb = get_urb(ep);
901 s16 diff = fno - (u16)urb->start_frame;
902
903 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
904
905 if (diff > urb->number_of_packets) {
906 /* time frame for this URB has elapsed */
907 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
908 continue;
909 } else if (diff < -1) {
910 /* URB is not due in this frame or the next one.
911 * Comparing with '-1' instead of '0' accounts for double
912 * buffering in the ISP1362 which enables us to queue the PTD
913 * one frame ahead of time
914 */
915 } else if (diff == -1) {
916 /* submit PTD's that are due in the next frame */
917 prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
918 if (ptd_offset + PTD_HEADER_SIZE + ep->length >
919 epq->buf_start + epq->buf_size) {
920 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
921 __func__, ep->length);
922 continue;
923 }
924 ep->ptd_offset = ptd_offset;
925 list_add_tail(&ep->active, &epq->active);
926
927 ptd_offset = next_ptd(epq, ep);
928 if (ptd_offset < 0) {
929 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
930 ep->num_req, epq->name);
931 break;
932 }
933 }
934 }
935 list_for_each_entry(ep, &epq->active, active) {
936 if (epq->active.next == &ep->active)
937 ep->ptd.mps |= PTD_LAST_MSK;
938 isp1362_write_ptd(isp1362_hcd, ep, epq);
939 ptd_count++;
940 }
941
942 if (ptd_count)
943 enable_istl_transfers(isp1362_hcd, flip);
944
945 epq->ptd_count += ptd_count;
946 if (epq->ptd_count > epq->stat_maxptds)
947 epq->stat_maxptds = epq->ptd_count;
948
949 /* check, whether the second ISTL buffer may also be filled */
950 if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
951 (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
952 fno++;
953 ptd_count = 0;
954 flip = 1 - flip;
955 goto fill2;
956 }
957}
958
959static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
960 struct isp1362_ep_queue *epq)
961{
962 struct isp1362_ep *ep;
963 struct isp1362_ep *tmp;
964
965 if (list_empty(&epq->active)) {
966 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
967 return;
968 }
969
970 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
971
972 atomic_inc(&epq->finishing);
973 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
974 int index = ep->ptd_index;
975
976 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
977 index, ep->ptd_offset);
978
979 BUG_ON(index < 0);
980 if (__test_and_clear_bit(index, &done_map)) {
981 isp1362_read_ptd(isp1362_hcd, ep, epq);
982 epq->free_ptd = index;
983 BUG_ON(ep->num_ptds == 0);
984 release_ptd_buffers(epq, ep);
985
986 DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
987 ep, ep->num_req);
988 if (!list_empty(&ep->remove_list)) {
989 list_del_init(&ep->remove_list);
990 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
991 }
992 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
993 ep, ep->num_req);
994 postproc_ep(isp1362_hcd, ep);
995 }
996 if (!done_map)
997 break;
998 }
999 if (done_map)
1000 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
1001 epq->skip_map);
1002 atomic_dec(&epq->finishing);
1003}
1004
1005static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
1006{
1007 struct isp1362_ep *ep;
1008 struct isp1362_ep *tmp;
1009
1010 if (list_empty(&epq->active)) {
1011 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
1012 return;
1013 }
1014
1015 DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
1016
1017 atomic_inc(&epq->finishing);
1018 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1019 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1020
1021 isp1362_read_ptd(isp1362_hcd, ep, epq);
1022 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1023 postproc_ep(isp1362_hcd, ep);
1024 }
1025 WARN_ON(epq->blk_size != 0);
1026 atomic_dec(&epq->finishing);
1027}
1028
1029static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1030{
1031 int handled = 0;
1032 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1033 u16 irqstat;
1034 u16 svc_mask;
1035
1036 spin_lock(&isp1362_hcd->lock);
1037
1038 BUG_ON(isp1362_hcd->irq_active++);
1039
1040 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1041
1042 irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1043 DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1044
1045 /* only handle interrupts that are currently enabled */
1046 irqstat &= isp1362_hcd->irqenb;
1047 isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1048 svc_mask = irqstat;
1049
1050 if (irqstat & HCuPINT_SOF) {
1051 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1052 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1053 handled = 1;
1054 svc_mask &= ~HCuPINT_SOF;
1055 DBG(3, "%s: SOF\n", __func__);
1056 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1057 if (!list_empty(&isp1362_hcd->remove_list))
1058 finish_unlinks(isp1362_hcd);
1059 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1060 if (list_empty(&isp1362_hcd->atl_queue.active)) {
1061 start_atl_transfers(isp1362_hcd);
1062 } else {
1063 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1064 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1065 isp1362_hcd->atl_queue.skip_map);
1066 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1067 }
1068 }
1069 }
1070
1071 if (irqstat & HCuPINT_ISTL0) {
1072 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1073 handled = 1;
1074 svc_mask &= ~HCuPINT_ISTL0;
1075 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1076 DBG(1, "%s: ISTL0\n", __func__);
1077 WARN_ON((int)!!isp1362_hcd->istl_flip);
1078 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL0_ACTIVE);
1079 WARN_ON(!isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL0_DONE);
1080 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1081 }
1082
1083 if (irqstat & HCuPINT_ISTL1) {
1084 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1085 handled = 1;
1086 svc_mask &= ~HCuPINT_ISTL1;
1087 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1088 DBG(1, "%s: ISTL1\n", __func__);
1089 WARN_ON(!(int)isp1362_hcd->istl_flip);
1090 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL1_ACTIVE);
1091 WARN_ON(!isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL1_DONE);
1092 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1093 }
1094
1095 if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1096 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1097 (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1098 finish_iso_transfers(isp1362_hcd,
1099 &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1100 start_iso_transfers(isp1362_hcd);
1101 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1102 }
1103
1104 if (irqstat & HCuPINT_INTL) {
1105 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1106 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1107 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1108
1109 DBG(2, "%s: INTL\n", __func__);
1110
1111 svc_mask &= ~HCuPINT_INTL;
1112
1113 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1114 if (~(done_map | skip_map) == 0)
1115 /* All PTDs are finished, disable INTL processing entirely */
1116 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1117
1118 handled = 1;
1119 WARN_ON(!done_map);
1120 if (done_map) {
1121 DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1122 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1123 start_intl_transfers(isp1362_hcd);
1124 }
1125 }
1126
1127 if (irqstat & HCuPINT_ATL) {
1128 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1129 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1130 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1131
1132 DBG(2, "%s: ATL\n", __func__);
1133
1134 svc_mask &= ~HCuPINT_ATL;
1135
1136 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1137 if (~(done_map | skip_map) == 0)
1138 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1139 if (done_map) {
1140 DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1141 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1142 start_atl_transfers(isp1362_hcd);
1143 }
1144 handled = 1;
1145 }
1146
1147 if (irqstat & HCuPINT_OPR) {
1148 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1149 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1150
1151 svc_mask &= ~HCuPINT_OPR;
1152 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1153 intstat &= isp1362_hcd->intenb;
1154 if (intstat & OHCI_INTR_UE) {
1155 pr_err("Unrecoverable error\n");
1156 /* FIXME: do here reset or cleanup or whatever */
1157 }
1158 if (intstat & OHCI_INTR_RHSC) {
1159 isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1160 isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1161 isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1162 }
1163 if (intstat & OHCI_INTR_RD) {
1164 pr_info("%s: RESUME DETECTED\n", __func__);
1165 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1166 usb_hcd_resume_root_hub(hcd);
1167 }
1168 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1169 irqstat &= ~HCuPINT_OPR;
1170 handled = 1;
1171 }
1172
1173 if (irqstat & HCuPINT_SUSP) {
1174 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1175 handled = 1;
1176 svc_mask &= ~HCuPINT_SUSP;
1177
1178 pr_info("%s: SUSPEND IRQ\n", __func__);
1179 }
1180
1181 if (irqstat & HCuPINT_CLKRDY) {
1182 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1183 handled = 1;
1184 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1185 svc_mask &= ~HCuPINT_CLKRDY;
1186 pr_info("%s: CLKRDY IRQ\n", __func__);
1187 }
1188
1189 if (svc_mask)
1190 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1191
1192 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1193 isp1362_hcd->irq_active--;
1194 spin_unlock(&isp1362_hcd->lock);
1195
1196 return IRQ_RETVAL(handled);
1197}
1198
1199/*-------------------------------------------------------------------------*/
1200
1201#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
1202static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1203{
1204 int i, branch = -ENOSPC;
1205
1206 /* search for the least loaded schedule branch of that interval
1207 * which has enough bandwidth left unreserved.
1208 */
1209 for (i = 0; i < interval; i++) {
1210 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1211 int j;
1212
1213 for (j = i; j < PERIODIC_SIZE; j += interval) {
1214 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1215 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1216 load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1217 break;
1218 }
1219 }
1220 if (j < PERIODIC_SIZE)
1221 continue;
1222 branch = i;
1223 }
1224 }
1225 return branch;
1226}
1227
1228/* NB! ALL the code above this point runs with isp1362_hcd->lock
1229 held, irqs off
1230*/
1231
1232/*-------------------------------------------------------------------------*/
1233
1234static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1235 struct urb *urb,
1236 gfp_t mem_flags)
1237{
1238 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1239 struct usb_device *udev = urb->dev;
1240 unsigned int pipe = urb->pipe;
1241 int is_out = !usb_pipein(pipe);
1242 int type = usb_pipetype(pipe);
1243 int epnum = usb_pipeendpoint(pipe);
1244 struct usb_host_endpoint *hep = urb->ep;
1245 struct isp1362_ep *ep = NULL;
1246 unsigned long flags;
1247 int retval = 0;
1248
1249 DBG(3, "%s: urb %p\n", __func__, urb);
1250
1251 if (type == PIPE_ISOCHRONOUS) {
1252 pr_err("Isochronous transfers not supported\n");
1253 return -ENOSPC;
1254 }
1255
1256 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1257 usb_pipedevice(pipe), epnum,
1258 is_out ? "out" : "in",
1259 usb_pipecontrol(pipe) ? "ctrl" :
1260 usb_pipeint(pipe) ? "int" :
1261 usb_pipebulk(pipe) ? "bulk" :
1262 "iso",
1263 urb->transfer_buffer_length,
1264 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1265 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1266 "short_ok" : "");
1267
1268 /* avoid all allocations within spinlocks: request or endpoint */
1269 if (!hep->hcpriv) {
1270 ep = kcalloc(1, sizeof *ep, mem_flags);
1271 if (!ep)
1272 return -ENOMEM;
1273 }
1274 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1275
1276 /* don't submit to a dead or disabled port */
1277 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1278 (1 << USB_PORT_FEAT_ENABLE)) ||
1279 !HC_IS_RUNNING(hcd->state)) {
1280 kfree(ep);
1281 retval = -ENODEV;
1282 goto fail_not_linked;
1283 }
1284
1285 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1286 if (retval) {
1287 kfree(ep);
1288 goto fail_not_linked;
1289 }
1290
1291 if (hep->hcpriv) {
1292 ep = hep->hcpriv;
1293 } else {
1294 INIT_LIST_HEAD(&ep->schedule);
1295 INIT_LIST_HEAD(&ep->active);
1296 INIT_LIST_HEAD(&ep->remove_list);
1297 ep->udev = usb_get_dev(udev);
1298 ep->hep = hep;
1299 ep->epnum = epnum;
1300 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1301 ep->ptd_offset = -EINVAL;
1302 ep->ptd_index = -EINVAL;
1303 usb_settoggle(udev, epnum, is_out, 0);
1304
1305 if (type == PIPE_CONTROL)
1306 ep->nextpid = USB_PID_SETUP;
1307 else if (is_out)
1308 ep->nextpid = USB_PID_OUT;
1309 else
1310 ep->nextpid = USB_PID_IN;
1311
1312 switch (type) {
1313 case PIPE_ISOCHRONOUS:
1314 case PIPE_INTERRUPT:
1315 if (urb->interval > PERIODIC_SIZE)
1316 urb->interval = PERIODIC_SIZE;
1317 ep->interval = urb->interval;
1318 ep->branch = PERIODIC_SIZE;
1319 ep->load = usb_calc_bus_time(udev->speed, !is_out,
1320 (type == PIPE_ISOCHRONOUS),
1321 usb_maxpacket(udev, pipe, is_out)) / 1000;
1322 break;
1323 }
1324 hep->hcpriv = ep;
1325 }
1326 ep->num_req = isp1362_hcd->req_serial++;
1327
1328 /* maybe put endpoint into schedule */
1329 switch (type) {
1330 case PIPE_CONTROL:
1331 case PIPE_BULK:
1332 if (list_empty(&ep->schedule)) {
1333 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1334 __func__, ep, ep->num_req);
1335 list_add_tail(&ep->schedule, &isp1362_hcd->async);
1336 }
1337 break;
1338 case PIPE_ISOCHRONOUS:
1339 case PIPE_INTERRUPT:
1340 urb->interval = ep->interval;
1341
1342 /* urb submitted for already existing EP */
1343 if (ep->branch < PERIODIC_SIZE)
1344 break;
1345
1346 retval = balance(isp1362_hcd, ep->interval, ep->load);
1347 if (retval < 0) {
1348 pr_err("%s: balance returned %d\n", __func__, retval);
1349 goto fail;
1350 }
1351 ep->branch = retval;
1352 retval = 0;
1353 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1354 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1355 __func__, isp1362_hcd->fmindex, ep->branch,
1356 ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1357 ~(PERIODIC_SIZE - 1)) + ep->branch,
1358 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1359
1360 if (list_empty(&ep->schedule)) {
1361 if (type == PIPE_ISOCHRONOUS) {
1362 u16 frame = isp1362_hcd->fmindex;
1363
1364 frame += max_t(u16, 8, ep->interval);
1365 frame &= ~(ep->interval - 1);
1366 frame |= ep->branch;
1367 if (frame_before(frame, isp1362_hcd->fmindex))
1368 frame += ep->interval;
1369 urb->start_frame = frame;
1370
1371 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1372 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1373 } else {
1374 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1375 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1376 }
1377 } else
1378 DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1379
1380 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1381 ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1382 isp1362_hcd->load[ep->branch] + ep->load);
1383 isp1362_hcd->load[ep->branch] += ep->load;
1384 }
1385
1386 urb->hcpriv = hep;
1387 ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1388
1389 switch (type) {
1390 case PIPE_CONTROL:
1391 case PIPE_BULK:
1392 start_atl_transfers(isp1362_hcd);
1393 break;
1394 case PIPE_INTERRUPT:
1395 start_intl_transfers(isp1362_hcd);
1396 break;
1397 case PIPE_ISOCHRONOUS:
1398 start_iso_transfers(isp1362_hcd);
1399 break;
1400 default:
1401 BUG();
1402 }
1403 fail:
1404 if (retval)
1405 usb_hcd_unlink_urb_from_ep(hcd, urb);
1406
1407
1408 fail_not_linked:
1409 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1410 if (retval)
1411 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1412 return retval;
1413}
1414
1415static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1416{
1417 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1418 struct usb_host_endpoint *hep;
1419 unsigned long flags;
1420 struct isp1362_ep *ep;
1421 int retval = 0;
1422
1423 DBG(3, "%s: urb %p\n", __func__, urb);
1424
1425 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1426 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1427 if (retval)
1428 goto done;
1429
1430 hep = urb->hcpriv;
1431
1432 if (!hep) {
1433 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1434 return -EIDRM;
1435 }
1436
1437 ep = hep->hcpriv;
1438 if (ep) {
1439 /* In front of queue? */
1440 if (ep->hep->urb_list.next == &urb->urb_list) {
1441 if (!list_empty(&ep->active)) {
1442 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1443 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1444 /* disable processing and queue PTD for removal */
1445 remove_ptd(isp1362_hcd, ep);
1446 urb = NULL;
1447 }
1448 }
1449 if (urb) {
1450 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1451 ep->num_req);
1452 finish_request(isp1362_hcd, ep, urb, status);
1453 } else
1454 DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1455 } else {
1456 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1457 retval = -EINVAL;
1458 }
1459done:
1460 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1461
1462 DBG(3, "%s: exit\n", __func__);
1463
1464 return retval;
1465}
1466
1467static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1468{
1469 struct isp1362_ep *ep = hep->hcpriv;
1470 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1471 unsigned long flags;
1472
1473 DBG(1, "%s: ep %p\n", __func__, ep);
1474 if (!ep)
1475 return;
1476 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1477 if (!list_empty(&hep->urb_list)) {
1478 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1479 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1480 ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1481 remove_ptd(isp1362_hcd, ep);
1482 pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1483 }
1484 }
1485 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1486 /* Wait for interrupt to clear out active list */
1487 while (!list_empty(&ep->active))
1488 msleep(1);
1489
1490 DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1491
1492 usb_put_dev(ep->udev);
1493 kfree(ep);
1494 hep->hcpriv = NULL;
1495}
1496
1497static int isp1362_get_frame(struct usb_hcd *hcd)
1498{
1499 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1500 u32 fmnum;
1501 unsigned long flags;
1502
1503 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1504 fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1505 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1506
1507 return (int)fmnum;
1508}
1509
1510/*-------------------------------------------------------------------------*/
1511
1512/* Adapted from ohci-hub.c */
1513static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1514{
1515 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1516 int ports, i, changed = 0;
1517 unsigned long flags;
1518
1519 if (!HC_IS_RUNNING(hcd->state))
1520 return -ESHUTDOWN;
1521
1522 /* Report no status change now, if we are scheduled to be
1523 called later */
1524 if (timer_pending(&hcd->rh_timer))
1525 return 0;
1526
1527 ports = isp1362_hcd->rhdesca & RH_A_NDP;
1528 BUG_ON(ports > 2);
1529
1530 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1531 /* init status */
1532 if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1533 buf[0] = changed = 1;
1534 else
1535 buf[0] = 0;
1536
1537 for (i = 0; i < ports; i++) {
1538 u32 status = isp1362_hcd->rhport[i];
1539
1540 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1541 RH_PS_OCIC | RH_PS_PRSC)) {
1542 changed = 1;
1543 buf[0] |= 1 << (i + 1);
1544 continue;
1545 }
1546
1547 if (!(status & RH_PS_CCS))
1548 continue;
1549 }
1550 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1551 return changed;
1552}
1553
1554static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1555 struct usb_hub_descriptor *desc)
1556{
1557 u32 reg = isp1362_hcd->rhdesca;
1558
1559 DBG(3, "%s: enter\n", __func__);
1560
1561 desc->bDescriptorType = 0x29;
1562 desc->bDescLength = 9;
1563 desc->bHubContrCurrent = 0;
1564 desc->bNbrPorts = reg & 0x3;
1565 /* Power switching, device type, overcurrent. */
1566 desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1567 DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1568 desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1569 /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
1570 desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1571 desc->bitmap[1] = ~0;
1572
1573 DBG(3, "%s: exit\n", __func__);
1574}
1575
1576/* Adapted from ohci-hub.c */
1577static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1578 u16 wIndex, char *buf, u16 wLength)
1579{
1580 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1581 int retval = 0;
1582 unsigned long flags;
1583 unsigned long t1;
1584 int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1585 u32 tmp = 0;
1586
1587 switch (typeReq) {
1588 case ClearHubFeature:
1589 DBG(0, "ClearHubFeature: ");
1590 switch (wValue) {
1591 case C_HUB_OVER_CURRENT:
1592 _DBG(0, "C_HUB_OVER_CURRENT\n");
1593 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1594 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1595 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1596 case C_HUB_LOCAL_POWER:
1597 _DBG(0, "C_HUB_LOCAL_POWER\n");
1598 break;
1599 default:
1600 goto error;
1601 }
1602 break;
1603 case SetHubFeature:
1604 DBG(0, "SetHubFeature: ");
1605 switch (wValue) {
1606 case C_HUB_OVER_CURRENT:
1607 case C_HUB_LOCAL_POWER:
1608 _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1609 break;
1610 default:
1611 goto error;
1612 }
1613 break;
1614 case GetHubDescriptor:
1615 DBG(0, "GetHubDescriptor\n");
1616 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1617 break;
1618 case GetHubStatus:
1619 DBG(0, "GetHubStatus\n");
1620 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1621 break;
1622 case GetPortStatus:
1623#ifndef VERBOSE
1624 DBG(0, "GetPortStatus\n");
1625#endif
1626 if (!wIndex || wIndex > ports)
1627 goto error;
1628 tmp = isp1362_hcd->rhport[--wIndex];
1629 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1630 break;
1631 case ClearPortFeature:
1632 DBG(0, "ClearPortFeature: ");
1633 if (!wIndex || wIndex > ports)
1634 goto error;
1635 wIndex--;
1636
1637 switch (wValue) {
1638 case USB_PORT_FEAT_ENABLE:
1639 _DBG(0, "USB_PORT_FEAT_ENABLE\n");
1640 tmp = RH_PS_CCS;
1641 break;
1642 case USB_PORT_FEAT_C_ENABLE:
1643 _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1644 tmp = RH_PS_PESC;
1645 break;
1646 case USB_PORT_FEAT_SUSPEND:
1647 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1648 tmp = RH_PS_POCI;
1649 break;
1650 case USB_PORT_FEAT_C_SUSPEND:
1651 _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1652 tmp = RH_PS_PSSC;
1653 break;
1654 case USB_PORT_FEAT_POWER:
1655 _DBG(0, "USB_PORT_FEAT_POWER\n");
1656 tmp = RH_PS_LSDA;
1657
1658 break;
1659 case USB_PORT_FEAT_C_CONNECTION:
1660 _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1661 tmp = RH_PS_CSC;
1662 break;
1663 case USB_PORT_FEAT_C_OVER_CURRENT:
1664 _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1665 tmp = RH_PS_OCIC;
1666 break;
1667 case USB_PORT_FEAT_C_RESET:
1668 _DBG(0, "USB_PORT_FEAT_C_RESET\n");
1669 tmp = RH_PS_PRSC;
1670 break;
1671 default:
1672 goto error;
1673 }
1674
1675 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1676 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1677 isp1362_hcd->rhport[wIndex] =
1678 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1679 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1680 break;
1681 case SetPortFeature:
1682 DBG(0, "SetPortFeature: ");
1683 if (!wIndex || wIndex > ports)
1684 goto error;
1685 wIndex--;
1686 switch (wValue) {
1687 case USB_PORT_FEAT_SUSPEND:
1688 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1689#ifdef CONFIG_USB_OTG
1690 if (ohci->hcd.self.otg_port == (wIndex + 1) &&
1691 ohci->hcd.self.b_hnp_enable) {
1692 start_hnp(ohci);
1693 break;
1694 }
1695#endif
1696 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1697 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1698 isp1362_hcd->rhport[wIndex] =
1699 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1700 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1701 break;
1702 case USB_PORT_FEAT_POWER:
1703 _DBG(0, "USB_PORT_FEAT_POWER\n");
1704 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1705 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1706 isp1362_hcd->rhport[wIndex] =
1707 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1708 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1709 break;
1710 case USB_PORT_FEAT_RESET:
1711 _DBG(0, "USB_PORT_FEAT_RESET\n");
1712 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1713
1714 t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1715 while (time_before(jiffies, t1)) {
1716 /* spin until any current reset finishes */
1717 for (;;) {
1718 tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1719 if (!(tmp & RH_PS_PRS))
1720 break;
1721 udelay(500);
1722 }
1723 if (!(tmp & RH_PS_CCS))
1724 break;
1725 /* Reset lasts 10ms (claims datasheet) */
1726 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1727
1728 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1729 msleep(10);
1730 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1731 }
1732
1733 isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1734 HCRHPORT1 + wIndex);
1735 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1736 break;
1737 default:
1738 goto error;
1739 }
1740 break;
1741
1742 default:
1743 error:
1744 /* "protocol stall" on error */
1745 _DBG(0, "PROTOCOL STALL\n");
1746 retval = -EPIPE;
1747 }
1748
1749 return retval;
1750}
1751
1752#ifdef CONFIG_PM
1753static int isp1362_bus_suspend(struct usb_hcd *hcd)
1754{
1755 int status = 0;
1756 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1757 unsigned long flags;
1758
1759 if (time_before(jiffies, isp1362_hcd->next_statechange))
1760 msleep(5);
1761
1762 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1763
1764 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1765 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1766 case OHCI_USB_RESUME:
1767 DBG(0, "%s: resume/suspend?\n", __func__);
1768 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1769 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1770 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1771 /* FALL THROUGH */
1772 case OHCI_USB_RESET:
1773 status = -EBUSY;
1774 pr_warning("%s: needs reinit!\n", __func__);
1775 goto done;
1776 case OHCI_USB_SUSPEND:
1777 pr_warning("%s: already suspended?\n", __func__);
1778 goto done;
1779 }
1780 DBG(0, "%s: suspend root hub\n", __func__);
1781
1782 /* First stop any processing */
1783 hcd->state = HC_STATE_QUIESCING;
1784 if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1785 !list_empty(&isp1362_hcd->intl_queue.active) ||
1786 !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1787 !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1788 int limit;
1789
1790 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1791 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1792 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1793 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1794 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1795
1796 DBG(0, "%s: stopping schedules ...\n", __func__);
1797 limit = 2000;
1798 while (limit > 0) {
1799 udelay(250);
1800 limit -= 250;
1801 if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1802 break;
1803 }
1804 mdelay(7);
1805 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1806 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1807 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1808 }
1809 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1810 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1811 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1812 }
1813 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1814 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1815 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1816 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1817 }
1818 DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1819 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1820 isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1821 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1822
1823 /* Suspend hub */
1824 isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1825 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1826 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1827 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1828
1829#if 1
1830 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1831 if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1832 pr_err("%s: controller won't suspend %08x\n", __func__,
1833 isp1362_hcd->hc_control);
1834 status = -EBUSY;
1835 } else
1836#endif
1837 {
1838 /* no resumes until devices finish suspending */
1839 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1840 }
1841done:
1842 if (status == 0) {
1843 hcd->state = HC_STATE_SUSPENDED;
1844 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1845 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1846 }
1847 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1848 return status;
1849}
1850
1851static int isp1362_bus_resume(struct usb_hcd *hcd)
1852{
1853 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1854 u32 port;
1855 unsigned long flags;
1856 int status = -EINPROGRESS;
1857
1858 if (time_before(jiffies, isp1362_hcd->next_statechange))
1859 msleep(5);
1860
1861 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1862 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1863 pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1864 if (hcd->state == HC_STATE_RESUMING) {
1865 pr_warning("%s: duplicate resume\n", __func__);
1866 status = 0;
1867 } else
1868 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1869 case OHCI_USB_SUSPEND:
1870 DBG(0, "%s: resume root hub\n", __func__);
1871 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1872 isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1873 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1874 break;
1875 case OHCI_USB_RESUME:
1876 /* HCFS changes sometime after INTR_RD */
1877 DBG(0, "%s: remote wakeup\n", __func__);
1878 break;
1879 case OHCI_USB_OPER:
1880 DBG(0, "%s: odd resume\n", __func__);
1881 status = 0;
1882 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1883 break;
1884 default: /* RESET, we lost power */
1885 DBG(0, "%s: root hub hardware reset\n", __func__);
1886 status = -EBUSY;
1887 }
1888 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1889 if (status == -EBUSY) {
1890 DBG(0, "%s: Restarting HC\n", __func__);
1891 isp1362_hc_stop(hcd);
1892 return isp1362_hc_start(hcd);
1893 }
1894 if (status != -EINPROGRESS)
1895 return status;
1896 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1897 port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1898 while (port--) {
1899 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1900
1901 /* force global, not selective, resume */
1902 if (!(stat & RH_PS_PSS)) {
1903 DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1904 continue;
1905 }
1906 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1907 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1908 }
1909 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1910
1911 /* Some controllers (lucent) need extra-long delays */
1912 hcd->state = HC_STATE_RESUMING;
1913 mdelay(20 /* usb 11.5.1.10 */ + 15);
1914
1915 isp1362_hcd->hc_control = OHCI_USB_OPER;
1916 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1917 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1918 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1919 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1920 /* TRSMRCY */
1921 msleep(10);
1922
1923 /* keep it alive for ~5x suspend + resume costs */
1924 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1925
1926 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1927 hcd->state = HC_STATE_RUNNING;
1928 return 0;
1929}
1930#else
1931#define isp1362_bus_suspend NULL
1932#define isp1362_bus_resume NULL
1933#endif
1934
1935/*-------------------------------------------------------------------------*/
1936
1937#ifdef STUB_DEBUG_FILE
1938
1939static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1940{
1941}
1942static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1943{
1944}
1945
1946#else
1947
1948#include <linux/proc_fs.h>
1949#include <linux/seq_file.h>
1950
1951static void dump_irq(struct seq_file *s, char *label, u16 mask)
1952{
1953 seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1954 mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1955 mask & HCuPINT_SUSP ? " susp" : "",
1956 mask & HCuPINT_OPR ? " opr" : "",
1957 mask & HCuPINT_EOT ? " eot" : "",
1958 mask & HCuPINT_ATL ? " atl" : "",
1959 mask & HCuPINT_SOF ? " sof" : "");
1960}
1961
1962static void dump_int(struct seq_file *s, char *label, u32 mask)
1963{
1964 seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1965 mask & OHCI_INTR_MIE ? " MIE" : "",
1966 mask & OHCI_INTR_RHSC ? " rhsc" : "",
1967 mask & OHCI_INTR_FNO ? " fno" : "",
1968 mask & OHCI_INTR_UE ? " ue" : "",
1969 mask & OHCI_INTR_RD ? " rd" : "",
1970 mask & OHCI_INTR_SF ? " sof" : "",
1971 mask & OHCI_INTR_SO ? " so" : "");
1972}
1973
1974static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1975{
1976 seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1977 mask & OHCI_CTRL_RWC ? " rwc" : "",
1978 mask & OHCI_CTRL_RWE ? " rwe" : "",
1979 ({
1980 char *hcfs;
1981 switch (mask & OHCI_CTRL_HCFS) {
1982 case OHCI_USB_OPER:
1983 hcfs = " oper";
1984 break;
1985 case OHCI_USB_RESET:
1986 hcfs = " reset";
1987 break;
1988 case OHCI_USB_RESUME:
1989 hcfs = " resume";
1990 break;
1991 case OHCI_USB_SUSPEND:
1992 hcfs = " suspend";
1993 break;
1994 default:
1995 hcfs = " ?";
1996 }
1997 hcfs;
1998 }));
1999}
2000
2001static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
2002{
2003 seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
2004 isp1362_read_reg32(isp1362_hcd, HCREVISION));
2005 seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
2006 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2007 seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
2008 isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
2009 seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
2010 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2011 seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
2012 isp1362_read_reg32(isp1362_hcd, HCINTENB));
2013 seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
2014 isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
2015 seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
2016 isp1362_read_reg32(isp1362_hcd, HCFMREM));
2017 seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
2018 isp1362_read_reg32(isp1362_hcd, HCFMNUM));
2019 seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
2020 isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
2021 seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
2022 isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2023 seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2024 isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2025 seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2026 isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2027 seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2028 isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2029 seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2030 isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2031 seq_printf(s, "\n");
2032 seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2033 isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2034 seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2035 isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2036 seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2037 isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2038 seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2039 isp1362_read_reg16(isp1362_hcd, HCuPINT));
2040 seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2041 isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2042 seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2043 isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2044 seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2045 isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2046 seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2047 isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2048 seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2049 isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2050#if 0
2051 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2052 isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2053#endif
2054 seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2055 isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2056 seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2057 isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2058 seq_printf(s, "\n");
2059 seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2060 isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2061 seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2062 isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2063 seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2064 isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2065 seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2066 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2067 seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2068 isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2069 seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2070 isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2071 seq_printf(s, "\n");
2072 seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2073 isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2074 seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2075 isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2076#if 0
2077 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2078 isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2079#endif
2080 seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2081 isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2082 seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2083 isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2084 seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2085 isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2086 seq_printf(s, "\n");
2087 seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2088 isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2089 seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2090 isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2091}
2092
2093static int proc_isp1362_show(struct seq_file *s, void *unused)
2094{
2095 struct isp1362_hcd *isp1362_hcd = s->private;
2096 struct isp1362_ep *ep;
2097 int i;
2098
2099 seq_printf(s, "%s\n%s version %s\n",
2100 isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2101
2102 /* collect statistics to help estimate potential win for
2103 * DMA engines that care about alignment (PXA)
2104 */
2105 seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2106 isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2107 isp1362_hcd->stat2, isp1362_hcd->stat1);
2108 seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2109 seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2110 seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2111 max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2112 isp1362_hcd->istl_queue[1] .stat_maxptds));
2113
2114 /* FIXME: don't show the following in suspended state */
2115 spin_lock_irq(&isp1362_hcd->lock);
2116
2117 dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2118 dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2119 dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2120 dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2121 dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2122
2123 for (i = 0; i < NUM_ISP1362_IRQS; i++)
2124 if (isp1362_hcd->irq_stat[i])
2125 seq_printf(s, "%-15s: %d\n",
2126 ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2127
2128 dump_regs(s, isp1362_hcd);
2129 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2130 struct urb *urb;
2131
2132 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2133 ({
2134 char *s;
2135 switch (ep->nextpid) {
2136 case USB_PID_IN:
2137 s = "in";
2138 break;
2139 case USB_PID_OUT:
2140 s = "out";
2141 break;
2142 case USB_PID_SETUP:
2143 s = "setup";
2144 break;
2145 case USB_PID_ACK:
2146 s = "status";
2147 break;
2148 default:
2149 s = "?";
2150 break;
2151 };
2152 s;}), ep->maxpacket) ;
2153 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2154 seq_printf(s, " urb%p, %d/%d\n", urb,
2155 urb->actual_length,
2156 urb->transfer_buffer_length);
2157 }
2158 }
2159 if (!list_empty(&isp1362_hcd->async))
2160 seq_printf(s, "\n");
2161 dump_ptd_queue(&isp1362_hcd->atl_queue);
2162
2163 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2164
2165 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2166 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2167 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2168
2169 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2170 ep->interval, ep,
2171 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2172 ep->udev->devnum, ep->epnum,
2173 (ep->epnum == 0) ? "" :
2174 ((ep->nextpid == USB_PID_IN) ?
2175 "in" : "out"), ep->maxpacket);
2176 }
2177 dump_ptd_queue(&isp1362_hcd->intl_queue);
2178
2179 seq_printf(s, "ISO:\n");
2180
2181 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2182 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2183 ep->interval, ep,
2184 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2185 ep->udev->devnum, ep->epnum,
2186 (ep->epnum == 0) ? "" :
2187 ((ep->nextpid == USB_PID_IN) ?
2188 "in" : "out"), ep->maxpacket);
2189 }
2190
2191 spin_unlock_irq(&isp1362_hcd->lock);
2192 seq_printf(s, "\n");
2193
2194 return 0;
2195}
2196
2197static int proc_isp1362_open(struct inode *inode, struct file *file)
2198{
2199 return single_open(file, proc_isp1362_show, PDE(inode)->data);
2200}
2201
2202static const struct file_operations proc_ops = {
2203 .open = proc_isp1362_open,
2204 .read = seq_read,
2205 .llseek = seq_lseek,
2206 .release = single_release,
2207};
2208
2209/* expect just one isp1362_hcd per system */
2210static const char proc_filename[] = "driver/isp1362";
2211
2212static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2213{
2214 struct proc_dir_entry *pde;
2215
2216 pde = create_proc_entry(proc_filename, 0, NULL);
2217 if (pde == NULL) {
2218 pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2219 return;
2220 }
2221
2222 pde->proc_fops = &proc_ops;
2223 pde->data = isp1362_hcd;
2224 isp1362_hcd->pde = pde;
2225}
2226
2227static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2228{
2229 if (isp1362_hcd->pde)
2230 remove_proc_entry(proc_filename, 0);
2231}
2232
2233#endif
2234
2235/*-------------------------------------------------------------------------*/
2236
2237static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2238{
2239 int tmp = 20;
2240 unsigned long flags;
2241
2242 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2243
2244 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2245 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2246 while (--tmp) {
2247 mdelay(1);
2248 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2249 break;
2250 }
2251 if (!tmp)
2252 pr_err("Software reset timeout\n");
2253 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2254}
2255
2256static int isp1362_mem_config(struct usb_hcd *hcd)
2257{
2258 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2259 unsigned long flags;
2260 u32 total;
2261 u16 istl_size = ISP1362_ISTL_BUFSIZE;
2262 u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2263 u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2264 u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2265 u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2266 u16 atl_size;
2267 int i;
2268
2269 WARN_ON(istl_size & 3);
2270 WARN_ON(atl_blksize & 3);
2271 WARN_ON(intl_blksize & 3);
2272 WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2273 WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2274
2275 BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2276 if (atl_buffers > 32)
2277 atl_buffers = 32;
2278 atl_size = atl_buffers * atl_blksize;
2279 total = atl_size + intl_size + istl_size;
2280 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2281 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2282 istl_size / 2, istl_size, 0, istl_size / 2);
2283 dev_info(hcd->self.controller, " INTL: %4d * (%3u+8): %4d @ $%04x\n",
2284 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2285 intl_size, istl_size);
2286 dev_info(hcd->self.controller, " ATL : %4d * (%3u+8): %4d @ $%04x\n",
2287 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2288 atl_size, istl_size + intl_size);
2289 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2290 ISP1362_BUF_SIZE - total);
2291
2292 if (total > ISP1362_BUF_SIZE) {
2293 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2294 __func__, total, ISP1362_BUF_SIZE);
2295 return -ENOMEM;
2296 }
2297
2298 total = istl_size + intl_size + atl_size;
2299 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2300
2301 for (i = 0; i < 2; i++) {
2302 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2303 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2304 isp1362_hcd->istl_queue[i].blk_size = 4;
2305 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2306 snprintf(isp1362_hcd->istl_queue[i].name,
2307 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2308 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2309 isp1362_hcd->istl_queue[i].name,
2310 isp1362_hcd->istl_queue[i].buf_start,
2311 isp1362_hcd->istl_queue[i].buf_size);
2312 }
2313 isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2314
2315 isp1362_hcd->intl_queue.buf_start = istl_size;
2316 isp1362_hcd->intl_queue.buf_size = intl_size;
2317 isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2318 isp1362_hcd->intl_queue.blk_size = intl_blksize;
2319 isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2320 isp1362_hcd->intl_queue.skip_map = ~0;
2321 INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2322
2323 isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2324 isp1362_hcd->intl_queue.buf_size);
2325 isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2326 isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2327 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2328 isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2329 1 << (ISP1362_INTL_BUFFERS - 1));
2330
2331 isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2332 isp1362_hcd->atl_queue.buf_size = atl_size;
2333 isp1362_hcd->atl_queue.buf_count = atl_buffers;
2334 isp1362_hcd->atl_queue.blk_size = atl_blksize;
2335 isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2336 isp1362_hcd->atl_queue.skip_map = ~0;
2337 INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2338
2339 isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2340 isp1362_hcd->atl_queue.buf_size);
2341 isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2342 isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2343 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2344 isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2345 1 << (atl_buffers - 1));
2346
2347 snprintf(isp1362_hcd->atl_queue.name,
2348 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2349 snprintf(isp1362_hcd->intl_queue.name,
2350 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2351 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2352 isp1362_hcd->intl_queue.name,
2353 isp1362_hcd->intl_queue.buf_start,
2354 ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2355 isp1362_hcd->intl_queue.buf_size);
2356 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2357 isp1362_hcd->atl_queue.name,
2358 isp1362_hcd->atl_queue.buf_start,
2359 atl_buffers, isp1362_hcd->atl_queue.blk_size,
2360 isp1362_hcd->atl_queue.buf_size);
2361
2362 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2363
2364 return 0;
2365}
2366
2367static int isp1362_hc_reset(struct usb_hcd *hcd)
2368{
2369 int ret = 0;
2370 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2371 unsigned long t;
2372 unsigned long timeout = 100;
2373 unsigned long flags;
2374 int clkrdy = 0;
2375
2376 pr_info("%s:\n", __func__);
2377
2378 if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2379 isp1362_hcd->board->reset(hcd->self.controller, 1);
2380 msleep(20);
2381 if (isp1362_hcd->board->clock)
2382 isp1362_hcd->board->clock(hcd->self.controller, 1);
2383 isp1362_hcd->board->reset(hcd->self.controller, 0);
2384 } else
2385 isp1362_sw_reset(isp1362_hcd);
2386
2387 /* chip has been reset. First we need to see a clock */
2388 t = jiffies + msecs_to_jiffies(timeout);
2389 while (!clkrdy && time_before_eq(jiffies, t)) {
2390 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2391 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2392 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2393 if (!clkrdy)
2394 msleep(4);
2395 }
2396
2397 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2398 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2399 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2400 if (!clkrdy) {
2401 pr_err("Clock not ready after %lums\n", timeout);
2402 ret = -ENODEV;
2403 }
2404 return ret;
2405}
2406
2407static void isp1362_hc_stop(struct usb_hcd *hcd)
2408{
2409 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2410 unsigned long flags;
2411 u32 tmp;
2412
2413 pr_info("%s:\n", __func__);
2414
2415 del_timer_sync(&hcd->rh_timer);
2416
2417 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2418
2419 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2420
2421 /* Switch off power for all ports */
2422 tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2423 tmp &= ~(RH_A_NPS | RH_A_PSM);
2424 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2425 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2426
2427 /* Reset the chip */
2428 if (isp1362_hcd->board && isp1362_hcd->board->reset)
2429 isp1362_hcd->board->reset(hcd->self.controller, 1);
2430 else
2431 isp1362_sw_reset(isp1362_hcd);
2432
2433 if (isp1362_hcd->board && isp1362_hcd->board->clock)
2434 isp1362_hcd->board->clock(hcd->self.controller, 0);
2435
2436 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2437}
2438
2439#ifdef CHIP_BUFFER_TEST
2440static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2441{
2442 int ret = 0;
2443 u16 *ref;
2444 unsigned long flags;
2445
2446 ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2447 if (ref) {
2448 int offset;
2449 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2450
2451 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2452 ref[offset] = ~offset;
2453 tst[offset] = offset;
2454 }
2455
2456 for (offset = 0; offset < 4; offset++) {
2457 int j;
2458
2459 for (j = 0; j < 8; j++) {
2460 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2461 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2462 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2463 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2464
2465 if (memcmp(ref, tst, j)) {
2466 ret = -ENODEV;
2467 pr_err("%s: memory check with %d byte offset %d failed\n",
2468 __func__, j, offset);
2469 dump_data((u8 *)ref + offset, j);
2470 dump_data((u8 *)tst + offset, j);
2471 }
2472 }
2473 }
2474
2475 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2476 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2477 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2478 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2479
2480 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2481 ret = -ENODEV;
2482 pr_err("%s: memory check failed\n", __func__);
2483 dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2484 }
2485
2486 for (offset = 0; offset < 256; offset++) {
2487 int test_size = 0;
2488
2489 yield();
2490
2491 memset(tst, 0, ISP1362_BUF_SIZE);
2492 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2493 isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2494 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2495 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2496 if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2497 ISP1362_BUF_SIZE / 2)) {
2498 pr_err("%s: Failed to clear buffer\n", __func__);
2499 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2500 break;
2501 }
2502 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2503 isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2504 isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2505 offset * 2 + PTD_HEADER_SIZE, test_size);
2506 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2507 PTD_HEADER_SIZE + test_size);
2508 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2509 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2510 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2511 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2512 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2513 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2514 PTD_HEADER_SIZE + test_size);
2515 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2516 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2517 ret = -ENODEV;
2518 pr_err("%s: memory check with offset %02x failed\n",
2519 __func__, offset);
2520 break;
2521 }
2522 pr_warning("%s: memory check with offset %02x ok after second read\n",
2523 __func__, offset);
2524 }
2525 }
2526 kfree(ref);
2527 }
2528 return ret;
2529}
2530#endif
2531
2532static int isp1362_hc_start(struct usb_hcd *hcd)
2533{
2534 int ret;
2535 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2536 struct isp1362_platform_data *board = isp1362_hcd->board;
2537 u16 hwcfg;
2538 u16 chipid;
2539 unsigned long flags;
2540
2541 pr_info("%s:\n", __func__);
2542
2543 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2544 chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2545 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2546
2547 if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2548 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2549 return -ENODEV;
2550 }
2551
2552#ifdef CHIP_BUFFER_TEST
2553 ret = isp1362_chip_test(isp1362_hcd);
2554 if (ret)
2555 return -ENODEV;
2556#endif
2557 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2558 /* clear interrupt status and disable all interrupt sources */
2559 isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2560 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2561
2562 /* HW conf */
2563 hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2564 if (board->sel15Kres)
2565 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2566 (MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0;
2567 if (board->clknotstop)
2568 hwcfg |= HCHWCFG_CLKNOTSTOP;
2569 if (board->oc_enable)
2570 hwcfg |= HCHWCFG_ANALOG_OC;
2571 if (board->int_act_high)
2572 hwcfg |= HCHWCFG_INT_POL;
2573 if (board->int_edge_triggered)
2574 hwcfg |= HCHWCFG_INT_TRIGGER;
2575 if (board->dreq_act_high)
2576 hwcfg |= HCHWCFG_DREQ_POL;
2577 if (board->dack_act_high)
2578 hwcfg |= HCHWCFG_DACK_POL;
2579 isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2580 isp1362_show_reg(isp1362_hcd, HCHWCFG);
2581 isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2582 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2583
2584 ret = isp1362_mem_config(hcd);
2585 if (ret)
2586 return ret;
2587
2588 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2589
2590 /* Root hub conf */
2591 isp1362_hcd->rhdesca = 0;
2592 if (board->no_power_switching)
2593 isp1362_hcd->rhdesca |= RH_A_NPS;
2594 if (board->power_switching_mode)
2595 isp1362_hcd->rhdesca |= RH_A_PSM;
2596 if (board->potpg)
2597 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2598 else
2599 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2600
2601 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2602 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2603 isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2604
2605 isp1362_hcd->rhdescb = RH_B_PPCM;
2606 isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2607 isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2608
2609 isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2610 isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2611 isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2612
2613 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2614
2615 isp1362_hcd->hc_control = OHCI_USB_OPER;
2616 hcd->state = HC_STATE_RUNNING;
2617
2618 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2619 /* Set up interrupts */
2620 isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2621 isp1362_hcd->intenb |= OHCI_INTR_RD;
2622 isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2623 isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2624 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2625
2626 /* Go operational */
2627 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2628 /* enable global power */
2629 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2630
2631 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2632
2633 return 0;
2634}
2635
2636/*-------------------------------------------------------------------------*/
2637
2638static struct hc_driver isp1362_hc_driver = {
2639 .description = hcd_name,
2640 .product_desc = "ISP1362 Host Controller",
2641 .hcd_priv_size = sizeof(struct isp1362_hcd),
2642
2643 .irq = isp1362_irq,
2644 .flags = HCD_USB11 | HCD_MEMORY,
2645
2646 .reset = isp1362_hc_reset,
2647 .start = isp1362_hc_start,
2648 .stop = isp1362_hc_stop,
2649
2650 .urb_enqueue = isp1362_urb_enqueue,
2651 .urb_dequeue = isp1362_urb_dequeue,
2652 .endpoint_disable = isp1362_endpoint_disable,
2653
2654 .get_frame_number = isp1362_get_frame,
2655
2656 .hub_status_data = isp1362_hub_status_data,
2657 .hub_control = isp1362_hub_control,
2658 .bus_suspend = isp1362_bus_suspend,
2659 .bus_resume = isp1362_bus_resume,
2660};
2661
2662/*-------------------------------------------------------------------------*/
2663
2664#define resource_len(r) (((r)->end - (r)->start) + 1)
2665
2666static int __devexit isp1362_remove(struct platform_device *pdev)
2667{
2668 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2669 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2670 struct resource *res;
2671
2672 remove_debug_file(isp1362_hcd);
2673 DBG(0, "%s: Removing HCD\n", __func__);
2674 usb_remove_hcd(hcd);
2675
2676 DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__,
2677 (u32)isp1362_hcd->data_reg);
2678 iounmap(isp1362_hcd->data_reg);
2679
2680 DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__,
2681 (u32)isp1362_hcd->addr_reg);
2682 iounmap(isp1362_hcd->addr_reg);
2683
2684 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2685 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2686 if (res)
2687 release_mem_region(res->start, resource_len(res));
2688
2689 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2690 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2691 if (res)
2692 release_mem_region(res->start, resource_len(res));
2693
2694 DBG(0, "%s: put_hcd\n", __func__);
2695 usb_put_hcd(hcd);
2696 DBG(0, "%s: Done\n", __func__);
2697
2698 return 0;
2699}
2700
2701static int __init isp1362_probe(struct platform_device *pdev)
2702{
2703 struct usb_hcd *hcd;
2704 struct isp1362_hcd *isp1362_hcd;
2705 struct resource *addr, *data;
2706 void __iomem *addr_reg;
2707 void __iomem *data_reg;
2708 int irq;
2709 int retval = 0;
2710
2711 /* basic sanity checks first. board-specific init logic should
2712 * have initialized this the three resources and probably board
2713 * specific platform_data. we don't probe for IRQs, and do only
2714 * minimal sanity checking.
2715 */
2716 if (pdev->num_resources < 3) {
2717 retval = -ENODEV;
2718 goto err1;
2719 }
2720
2721 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2722 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2723 irq = platform_get_irq(pdev, 0);
2724 if (!addr || !data || irq < 0) {
2725 retval = -ENODEV;
2726 goto err1;
2727 }
2728
2729#ifdef CONFIG_USB_HCD_DMA
2730 if (pdev->dev.dma_mask) {
2731 struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
2732
2733 if (!dma_res) {
2734 retval = -ENODEV;
2735 goto err1;
2736 }
2737 isp1362_hcd->data_dma = dma_res->start;
2738 isp1362_hcd->max_dma_size = resource_len(dma_res);
2739 }
2740#else
2741 if (pdev->dev.dma_mask) {
2742 DBG(1, "won't do DMA");
2743 retval = -ENODEV;
2744 goto err1;
2745 }
2746#endif
2747
2748 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
2749 retval = -EBUSY;
2750 goto err1;
2751 }
2752 addr_reg = ioremap(addr->start, resource_len(addr));
2753 if (addr_reg == NULL) {
2754 retval = -ENOMEM;
2755 goto err2;
2756 }
2757
2758 if (!request_mem_region(data->start, resource_len(data), hcd_name)) {
2759 retval = -EBUSY;
2760 goto err3;
2761 }
2762 data_reg = ioremap(data->start, resource_len(data));
2763 if (data_reg == NULL) {
2764 retval = -ENOMEM;
2765 goto err4;
2766 }
2767
2768 /* allocate and initialize hcd */
2769 hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2770 if (!hcd) {
2771 retval = -ENOMEM;
2772 goto err5;
2773 }
2774 hcd->rsrc_start = data->start;
2775 isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2776 isp1362_hcd->data_reg = data_reg;
2777 isp1362_hcd->addr_reg = addr_reg;
2778
2779 isp1362_hcd->next_statechange = jiffies;
2780 spin_lock_init(&isp1362_hcd->lock);
2781 INIT_LIST_HEAD(&isp1362_hcd->async);
2782 INIT_LIST_HEAD(&isp1362_hcd->periodic);
2783 INIT_LIST_HEAD(&isp1362_hcd->isoc);
2784 INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2785 isp1362_hcd->board = pdev->dev.platform_data;
2786#if USE_PLATFORM_DELAY
2787 if (!isp1362_hcd->board->delay) {
2788 dev_err(hcd->self.controller, "No platform delay function given\n");
2789 retval = -ENODEV;
2790 goto err6;
2791 }
2792#endif
2793
2794#ifdef CONFIG_ARM
2795 if (isp1362_hcd->board)
2796 set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING);
2797#endif
2798
2799 retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED);
2800 if (retval != 0)
2801 goto err6;
2802 pr_info("%s, irq %d\n", hcd->product_desc, irq);
2803
2804 create_debug_file(isp1362_hcd);
2805
2806 return 0;
2807
2808 err6:
2809 DBG(0, "%s: Freeing dev %08x\n", __func__, (u32)isp1362_hcd);
2810 usb_put_hcd(hcd);
2811 err5:
2812 DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, (u32)data_reg);
2813 iounmap(data_reg);
2814 err4:
2815 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2816 release_mem_region(data->start, resource_len(data));
2817 err3:
2818 DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, (u32)addr_reg);
2819 iounmap(addr_reg);
2820 err2:
2821 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2822 release_mem_region(addr->start, resource_len(addr));
2823 err1:
2824 pr_err("%s: init error, %d\n", __func__, retval);
2825
2826 return retval;
2827}
2828
2829#ifdef CONFIG_PM
2830static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2831{
2832 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2833 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2834 unsigned long flags;
2835 int retval = 0;
2836
2837 DBG(0, "%s: Suspending device\n", __func__);
2838
2839 if (state.event == PM_EVENT_FREEZE) {
2840 DBG(0, "%s: Suspending root hub\n", __func__);
2841 retval = isp1362_bus_suspend(hcd);
2842 } else {
2843 DBG(0, "%s: Suspending RH ports\n", __func__);
2844 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2845 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2846 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2847 }
2848 if (retval == 0)
2849 pdev->dev.power.power_state = state;
2850 return retval;
2851}
2852
2853static int isp1362_resume(struct platform_device *pdev)
2854{
2855 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2856 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2857 unsigned long flags;
2858
2859 DBG(0, "%s: Resuming\n", __func__);
2860
2861 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2862 DBG(0, "%s: Resume RH ports\n", __func__);
2863 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2864 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2865 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2866 return 0;
2867 }
2868
2869 pdev->dev.power.power_state = PMSG_ON;
2870
2871 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2872}
2873#else
2874#define isp1362_suspend NULL
2875#define isp1362_resume NULL
2876#endif
2877
2878static struct platform_driver isp1362_driver = {
2879 .probe = isp1362_probe,
2880 .remove = __devexit_p(isp1362_remove),
2881
2882 .suspend = isp1362_suspend,
2883 .resume = isp1362_resume,
2884 .driver = {
2885 .name = (char *)hcd_name,
2886 .owner = THIS_MODULE,
2887 },
2888};
2889
2890/*-------------------------------------------------------------------------*/
2891
2892static int __init isp1362_init(void)
2893{
2894 if (usb_disabled())
2895 return -ENODEV;
2896 pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
2897 return platform_driver_register(&isp1362_driver);
2898}
2899module_init(isp1362_init);
2900
2901static void __exit isp1362_cleanup(void)
2902{
2903 platform_driver_unregister(&isp1362_driver);
2904}
2905module_exit(isp1362_cleanup);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
new file mode 100644
index 000000000000..26e44fc8776f
--- /dev/null
+++ b/drivers/usb/host/isp1362.h
@@ -0,0 +1,1079 @@
1/*
2 * ISP1362 HCD (Host Controller Driver) for USB.
3 *
4 * COPYRIGHT (C) by L. Wassmann <LW@KARO-electronics.de>
5 */
6
7/* ------------------------------------------------------------------------- */
8/*
9 * Platform specific compile time options
10 */
11#if defined(CONFIG_ARCH_KARO)
12#include <asm/arch/hardware.h>
13#include <asm/arch/pxa-regs.h>
14#include <asm/arch/karo.h>
15
16#define USE_32BIT 1
17
18
19/* These options are mutually eclusive */
20#define USE_PLATFORM_DELAY 1
21#define USE_NDELAY 0
22/*
23 * MAX_ROOT_PORTS: Number of downstream ports
24 *
25 * The chip has two USB ports, one of which can be configured as
26 * an USB device port, so the value of this constant is implementation
27 * specific.
28 */
29#define MAX_ROOT_PORTS 2
30#define DUMMY_DELAY_ACCESS do {} while (0)
31
32/* insert platform specific definitions for other machines here */
33#elif defined(CONFIG_BLACKFIN)
34
35#include <linux/io.h>
36#define USE_32BIT 0
37#define MAX_ROOT_PORTS 2
38#define USE_PLATFORM_DELAY 0
39#define USE_NDELAY 1
40
41#define DUMMY_DELAY_ACCESS \
42 do { \
43 bfin_read16(ASYNC_BANK0_BASE); \
44 bfin_read16(ASYNC_BANK0_BASE); \
45 bfin_read16(ASYNC_BANK0_BASE); \
46 } while (0)
47
48#undef insw
49#undef outsw
50
51#define insw delayed_insw
52#define outsw delayed_outsw
53
54static inline void delayed_outsw(unsigned int addr, void *buf, int len)
55{
56 unsigned short *bp = (unsigned short *)buf;
57 while (len--) {
58 DUMMY_DELAY_ACCESS;
59 outw(*bp++, addr);
60 }
61}
62
63static inline void delayed_insw(unsigned int addr, void *buf, int len)
64{
65 unsigned short *bp = (unsigned short *)buf;
66 while (len--) {
67 DUMMY_DELAY_ACCESS;
68 *bp++ = inw((void *)addr);
69 }
70}
71
72#else
73
74#define MAX_ROOT_PORTS 2
75
76#define USE_32BIT 0
77
78/* These options are mutually eclusive */
79#define USE_PLATFORM_DELAY 0
80#define USE_NDELAY 0
81
82#define DUMMY_DELAY_ACCESS do {} while (0)
83
84#endif
85
86
87/* ------------------------------------------------------------------------- */
88
89#define USB_RESET_WIDTH 50
90#define MAX_XFER_SIZE 1023
91
92/* Buffer sizes */
93#define ISP1362_BUF_SIZE 4096
94#define ISP1362_ISTL_BUFSIZE 512
95#define ISP1362_INTL_BLKSIZE 64
96#define ISP1362_INTL_BUFFERS 16
97#define ISP1362_ATL_BLKSIZE 64
98
99#define ISP1362_REG_WRITE_OFFSET 0x80
100
101#ifdef ISP1362_DEBUG
102typedef const unsigned int isp1362_reg_t;
103
104#define REG_WIDTH_16 0x000
105#define REG_WIDTH_32 0x100
106#define REG_WIDTH_MASK 0x100
107#define REG_NO_MASK 0x0ff
108
109#define REG_ACCESS_R 0x200
110#define REG_ACCESS_W 0x400
111#define REG_ACCESS_RW 0x600
112#define REG_ACCESS_MASK 0x600
113
114#define ISP1362_REG_NO(r) ((r) & REG_NO_MASK)
115
116#define _BUG_ON(x) BUG_ON(x)
117#define _WARN_ON(x) WARN_ON(x)
118
119#define ISP1362_REG(name, addr, width, rw) \
120static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw))
121
122#define REG_ACCESS_TEST(r) BUG_ON(((r) & ISP1362_REG_WRITE_OFFSET) && !((r) & REG_ACCESS_W))
123#define REG_WIDTH_TEST(r, w) BUG_ON(((r) & REG_WIDTH_MASK) != (w))
124#else
125typedef const unsigned char isp1362_reg_t;
126#define ISP1362_REG_NO(r) (r)
127#define _BUG_ON(x) do {} while (0)
128#define _WARN_ON(x) do {} while (0)
129
130#define ISP1362_REG(name, addr, width, rw) \
131static isp1362_reg_t ISP1362_REG_##name = addr
132
133#define REG_ACCESS_TEST(r) do {} while (0)
134#define REG_WIDTH_TEST(r, w) do {} while (0)
135#endif
136
137/* OHCI compatible registers */
138/*
139 * Note: Some of the ISP1362 'OHCI' registers implement only
140 * a subset of the bits defined in the OHCI spec.
141 *
142 * Bitmasks for the individual bits of these registers are defined in "ohci.h"
143 */
144ISP1362_REG(HCREVISION, 0x00, REG_WIDTH_32, REG_ACCESS_R);
145ISP1362_REG(HCCONTROL, 0x01, REG_WIDTH_32, REG_ACCESS_RW);
146ISP1362_REG(HCCMDSTAT, 0x02, REG_WIDTH_32, REG_ACCESS_RW);
147ISP1362_REG(HCINTSTAT, 0x03, REG_WIDTH_32, REG_ACCESS_RW);
148ISP1362_REG(HCINTENB, 0x04, REG_WIDTH_32, REG_ACCESS_RW);
149ISP1362_REG(HCINTDIS, 0x05, REG_WIDTH_32, REG_ACCESS_RW);
150ISP1362_REG(HCFMINTVL, 0x0d, REG_WIDTH_32, REG_ACCESS_RW);
151ISP1362_REG(HCFMREM, 0x0e, REG_WIDTH_32, REG_ACCESS_RW);
152ISP1362_REG(HCFMNUM, 0x0f, REG_WIDTH_32, REG_ACCESS_RW);
153ISP1362_REG(HCLSTHRESH, 0x11, REG_WIDTH_32, REG_ACCESS_RW);
154ISP1362_REG(HCRHDESCA, 0x12, REG_WIDTH_32, REG_ACCESS_RW);
155ISP1362_REG(HCRHDESCB, 0x13, REG_WIDTH_32, REG_ACCESS_RW);
156ISP1362_REG(HCRHSTATUS, 0x14, REG_WIDTH_32, REG_ACCESS_RW);
157ISP1362_REG(HCRHPORT1, 0x15, REG_WIDTH_32, REG_ACCESS_RW);
158ISP1362_REG(HCRHPORT2, 0x16, REG_WIDTH_32, REG_ACCESS_RW);
159
160/* Philips ISP1362 specific registers */
161ISP1362_REG(HCHWCFG, 0x20, REG_WIDTH_16, REG_ACCESS_RW);
162#define HCHWCFG_DISABLE_SUSPEND (1 << 15)
163#define HCHWCFG_GLOBAL_PWRDOWN (1 << 14)
164#define HCHWCFG_PULLDOWN_DS1 (1 << 13)
165#define HCHWCFG_PULLDOWN_DS2 (1 << 12)
166#define HCHWCFG_CLKNOTSTOP (1 << 11)
167#define HCHWCFG_ANALOG_OC (1 << 10)
168#define HCHWCFG_ONEINT (1 << 9)
169#define HCHWCFG_DACK_MODE (1 << 8)
170#define HCHWCFG_ONEDMA (1 << 7)
171#define HCHWCFG_DACK_POL (1 << 6)
172#define HCHWCFG_DREQ_POL (1 << 5)
173#define HCHWCFG_DBWIDTH_MASK (0x03 << 3)
174#define HCHWCFG_DBWIDTH(n) (((n) << 3) & HCHWCFG_DBWIDTH_MASK)
175#define HCHWCFG_INT_POL (1 << 2)
176#define HCHWCFG_INT_TRIGGER (1 << 1)
177#define HCHWCFG_INT_ENABLE (1 << 0)
178
179ISP1362_REG(HCDMACFG, 0x21, REG_WIDTH_16, REG_ACCESS_RW);
180#define HCDMACFG_CTR_ENABLE (1 << 7)
181#define HCDMACFG_BURST_LEN_MASK (0x03 << 5)
182#define HCDMACFG_BURST_LEN(n) (((n) << 5) & HCDMACFG_BURST_LEN_MASK)
183#define HCDMACFG_BURST_LEN_1 HCDMACFG_BURST_LEN(0)
184#define HCDMACFG_BURST_LEN_4 HCDMACFG_BURST_LEN(1)
185#define HCDMACFG_BURST_LEN_8 HCDMACFG_BURST_LEN(2)
186#define HCDMACFG_DMA_ENABLE (1 << 4)
187#define HCDMACFG_BUF_TYPE_MASK (0x07 << 1)
188#define HCDMACFG_BUF_TYPE(n) (((n) << 1) & HCDMACFG_BUF_TYPE_MASK)
189#define HCDMACFG_BUF_ISTL0 HCDMACFG_BUF_TYPE(0)
190#define HCDMACFG_BUF_ISTL1 HCDMACFG_BUF_TYPE(1)
191#define HCDMACFG_BUF_INTL HCDMACFG_BUF_TYPE(2)
192#define HCDMACFG_BUF_ATL HCDMACFG_BUF_TYPE(3)
193#define HCDMACFG_BUF_DIRECT HCDMACFG_BUF_TYPE(4)
194#define HCDMACFG_DMA_RW_SELECT (1 << 0)
195
196ISP1362_REG(HCXFERCTR, 0x22, REG_WIDTH_16, REG_ACCESS_RW);
197
198ISP1362_REG(HCuPINT, 0x24, REG_WIDTH_16, REG_ACCESS_RW);
199#define HCuPINT_SOF (1 << 0)
200#define HCuPINT_ISTL0 (1 << 1)
201#define HCuPINT_ISTL1 (1 << 2)
202#define HCuPINT_EOT (1 << 3)
203#define HCuPINT_OPR (1 << 4)
204#define HCuPINT_SUSP (1 << 5)
205#define HCuPINT_CLKRDY (1 << 6)
206#define HCuPINT_INTL (1 << 7)
207#define HCuPINT_ATL (1 << 8)
208#define HCuPINT_OTG (1 << 9)
209
210ISP1362_REG(HCuPINTENB, 0x25, REG_WIDTH_16, REG_ACCESS_RW);
211/* same bit definitions apply as for HCuPINT */
212
213ISP1362_REG(HCCHIPID, 0x27, REG_WIDTH_16, REG_ACCESS_R);
214#define HCCHIPID_MASK 0xff00
215#define HCCHIPID_MAGIC 0x3600
216
217ISP1362_REG(HCSCRATCH, 0x28, REG_WIDTH_16, REG_ACCESS_RW);
218
219ISP1362_REG(HCSWRES, 0x29, REG_WIDTH_16, REG_ACCESS_W);
220#define HCSWRES_MAGIC 0x00f6
221
222ISP1362_REG(HCBUFSTAT, 0x2c, REG_WIDTH_16, REG_ACCESS_RW);
223#define HCBUFSTAT_ISTL0_FULL (1 << 0)
224#define HCBUFSTAT_ISTL1_FULL (1 << 1)
225#define HCBUFSTAT_INTL_ACTIVE (1 << 2)
226#define HCBUFSTAT_ATL_ACTIVE (1 << 3)
227#define HCBUFSTAT_RESET_HWPP (1 << 4)
228#define HCBUFSTAT_ISTL0_ACTIVE (1 << 5)
229#define HCBUFSTAT_ISTL1_ACTIVE (1 << 6)
230#define HCBUFSTAT_ISTL0_DONE (1 << 8)
231#define HCBUFSTAT_ISTL1_DONE (1 << 9)
232#define HCBUFSTAT_PAIRED_PTDPP (1 << 10)
233
234ISP1362_REG(HCDIRADDR, 0x32, REG_WIDTH_32, REG_ACCESS_RW);
235#define HCDIRADDR_ADDR_MASK 0x0000ffff
236#define HCDIRADDR_ADDR(n) (((n) << 0) & HCDIRADDR_ADDR_MASK)
237#define HCDIRADDR_COUNT_MASK 0xffff0000
238#define HCDIRADDR_COUNT(n) (((n) << 16) & HCDIRADDR_COUNT_MASK)
239ISP1362_REG(HCDIRDATA, 0x45, REG_WIDTH_16, REG_ACCESS_RW);
240
241ISP1362_REG(HCISTLBUFSZ, 0x30, REG_WIDTH_16, REG_ACCESS_RW);
242ISP1362_REG(HCISTL0PORT, 0x40, REG_WIDTH_16, REG_ACCESS_RW);
243ISP1362_REG(HCISTL1PORT, 0x42, REG_WIDTH_16, REG_ACCESS_RW);
244ISP1362_REG(HCISTLRATE, 0x47, REG_WIDTH_16, REG_ACCESS_RW);
245
246ISP1362_REG(HCINTLBUFSZ, 0x33, REG_WIDTH_16, REG_ACCESS_RW);
247ISP1362_REG(HCINTLPORT, 0x43, REG_WIDTH_16, REG_ACCESS_RW);
248ISP1362_REG(HCINTLBLKSZ, 0x53, REG_WIDTH_16, REG_ACCESS_RW);
249ISP1362_REG(HCINTLDONE, 0x17, REG_WIDTH_32, REG_ACCESS_R);
250ISP1362_REG(HCINTLSKIP, 0x18, REG_WIDTH_32, REG_ACCESS_RW);
251ISP1362_REG(HCINTLLAST, 0x19, REG_WIDTH_32, REG_ACCESS_RW);
252ISP1362_REG(HCINTLCURR, 0x1a, REG_WIDTH_16, REG_ACCESS_R);
253
254ISP1362_REG(HCATLBUFSZ, 0x34, REG_WIDTH_16, REG_ACCESS_RW);
255ISP1362_REG(HCATLPORT, 0x44, REG_WIDTH_16, REG_ACCESS_RW);
256ISP1362_REG(HCATLBLKSZ, 0x54, REG_WIDTH_16, REG_ACCESS_RW);
257ISP1362_REG(HCATLDONE, 0x1b, REG_WIDTH_32, REG_ACCESS_R);
258ISP1362_REG(HCATLSKIP, 0x1c, REG_WIDTH_32, REG_ACCESS_RW);
259ISP1362_REG(HCATLLAST, 0x1d, REG_WIDTH_32, REG_ACCESS_RW);
260ISP1362_REG(HCATLCURR, 0x1e, REG_WIDTH_16, REG_ACCESS_R);
261
262ISP1362_REG(HCATLDTC, 0x51, REG_WIDTH_16, REG_ACCESS_RW);
263ISP1362_REG(HCATLDTCTO, 0x52, REG_WIDTH_16, REG_ACCESS_RW);
264
265
266ISP1362_REG(OTGCONTROL, 0x62, REG_WIDTH_16, REG_ACCESS_RW);
267ISP1362_REG(OTGSTATUS, 0x67, REG_WIDTH_16, REG_ACCESS_R);
268ISP1362_REG(OTGINT, 0x68, REG_WIDTH_16, REG_ACCESS_RW);
269ISP1362_REG(OTGINTENB, 0x69, REG_WIDTH_16, REG_ACCESS_RW);
270ISP1362_REG(OTGTIMER, 0x6A, REG_WIDTH_16, REG_ACCESS_RW);
271ISP1362_REG(OTGALTTMR, 0x6C, REG_WIDTH_16, REG_ACCESS_RW);
272
273/* Philips transfer descriptor, cpu-endian */
274struct ptd {
275 u16 count;
276#define PTD_COUNT_MSK (0x3ff << 0)
277#define PTD_TOGGLE_MSK (1 << 10)
278#define PTD_ACTIVE_MSK (1 << 11)
279#define PTD_CC_MSK (0xf << 12)
280 u16 mps;
281#define PTD_MPS_MSK (0x3ff << 0)
282#define PTD_SPD_MSK (1 << 10)
283#define PTD_LAST_MSK (1 << 11)
284#define PTD_EP_MSK (0xf << 12)
285 u16 len;
286#define PTD_LEN_MSK (0x3ff << 0)
287#define PTD_DIR_MSK (3 << 10)
288#define PTD_DIR_SETUP (0)
289#define PTD_DIR_OUT (1)
290#define PTD_DIR_IN (2)
291 u16 faddr;
292#define PTD_FA_MSK (0x7f << 0)
293/* PTD Byte 7: [StartingFrame (if ISO PTD) | StartingFrame[0..4], PollingRate[0..2] (if INT PTD)] */
294#define PTD_SF_ISO_MSK (0xff << 8)
295#define PTD_SF_INT_MSK (0x1f << 8)
296#define PTD_PR_MSK (0x07 << 13)
297} __attribute__ ((packed, aligned(2)));
298#define PTD_HEADER_SIZE sizeof(struct ptd)
299
300/* ------------------------------------------------------------------------- */
301/* Copied from ohci.h: */
302/*
303 * Hardware transfer status codes -- CC from PTD
304 */
305#define PTD_CC_NOERROR 0x00
306#define PTD_CC_CRC 0x01
307#define PTD_CC_BITSTUFFING 0x02
308#define PTD_CC_DATATOGGLEM 0x03
309#define PTD_CC_STALL 0x04
310#define PTD_DEVNOTRESP 0x05
311#define PTD_PIDCHECKFAIL 0x06
312#define PTD_UNEXPECTEDPID 0x07
313#define PTD_DATAOVERRUN 0x08
314#define PTD_DATAUNDERRUN 0x09
315 /* 0x0A, 0x0B reserved for hardware */
316#define PTD_BUFFEROVERRUN 0x0C
317#define PTD_BUFFERUNDERRUN 0x0D
318 /* 0x0E, 0x0F reserved for HCD */
319#define PTD_NOTACCESSED 0x0F
320
321
322/* map OHCI TD status codes (CC) to errno values */
323static const int cc_to_error[16] = {
324 /* No Error */ 0,
325 /* CRC Error */ -EILSEQ,
326 /* Bit Stuff */ -EPROTO,
327 /* Data Togg */ -EILSEQ,
328 /* Stall */ -EPIPE,
329 /* DevNotResp */ -ETIMEDOUT,
330 /* PIDCheck */ -EPROTO,
331 /* UnExpPID */ -EPROTO,
332 /* DataOver */ -EOVERFLOW,
333 /* DataUnder */ -EREMOTEIO,
334 /* (for hw) */ -EIO,
335 /* (for hw) */ -EIO,
336 /* BufferOver */ -ECOMM,
337 /* BuffUnder */ -ENOSR,
338 /* (for HCD) */ -EALREADY,
339 /* (for HCD) */ -EALREADY
340};
341
342
343/*
344 * HcControl (control) register masks
345 */
346#define OHCI_CTRL_HCFS (3 << 6) /* host controller functional state */
347#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
348#define OHCI_CTRL_RWE (1 << 10) /* remote wakeup enable */
349
350/* pre-shifted values for HCFS */
351# define OHCI_USB_RESET (0 << 6)
352# define OHCI_USB_RESUME (1 << 6)
353# define OHCI_USB_OPER (2 << 6)
354# define OHCI_USB_SUSPEND (3 << 6)
355
356/*
357 * HcCommandStatus (cmdstatus) register masks
358 */
359#define OHCI_HCR (1 << 0) /* host controller reset */
360#define OHCI_SOC (3 << 16) /* scheduling overrun count */
361
362/*
363 * masks used with interrupt registers:
364 * HcInterruptStatus (intrstatus)
365 * HcInterruptEnable (intrenable)
366 * HcInterruptDisable (intrdisable)
367 */
368#define OHCI_INTR_SO (1 << 0) /* scheduling overrun */
369#define OHCI_INTR_WDH (1 << 1) /* writeback of done_head */
370#define OHCI_INTR_SF (1 << 2) /* start frame */
371#define OHCI_INTR_RD (1 << 3) /* resume detect */
372#define OHCI_INTR_UE (1 << 4) /* unrecoverable error */
373#define OHCI_INTR_FNO (1 << 5) /* frame number overflow */
374#define OHCI_INTR_RHSC (1 << 6) /* root hub status change */
375#define OHCI_INTR_OC (1 << 30) /* ownership change */
376#define OHCI_INTR_MIE (1 << 31) /* master interrupt enable */
377
378/* roothub.portstatus [i] bits */
379#define RH_PS_CCS 0x00000001 /* current connect status */
380#define RH_PS_PES 0x00000002 /* port enable status*/
381#define RH_PS_PSS 0x00000004 /* port suspend status */
382#define RH_PS_POCI 0x00000008 /* port over current indicator */
383#define RH_PS_PRS 0x00000010 /* port reset status */
384#define RH_PS_PPS 0x00000100 /* port power status */
385#define RH_PS_LSDA 0x00000200 /* low speed device attached */
386#define RH_PS_CSC 0x00010000 /* connect status change */
387#define RH_PS_PESC 0x00020000 /* port enable status change */
388#define RH_PS_PSSC 0x00040000 /* port suspend status change */
389#define RH_PS_OCIC 0x00080000 /* over current indicator change */
390#define RH_PS_PRSC 0x00100000 /* port reset status change */
391
392/* roothub.status bits */
393#define RH_HS_LPS 0x00000001 /* local power status */
394#define RH_HS_OCI 0x00000002 /* over current indicator */
395#define RH_HS_DRWE 0x00008000 /* device remote wakeup enable */
396#define RH_HS_LPSC 0x00010000 /* local power status change */
397#define RH_HS_OCIC 0x00020000 /* over current indicator change */
398#define RH_HS_CRWE 0x80000000 /* clear remote wakeup enable */
399
400/* roothub.b masks */
401#define RH_B_DR 0x0000ffff /* device removable flags */
402#define RH_B_PPCM 0xffff0000 /* port power control mask */
403
404/* roothub.a masks */
405#define RH_A_NDP (0xff << 0) /* number of downstream ports */
406#define RH_A_PSM (1 << 8) /* power switching mode */
407#define RH_A_NPS (1 << 9) /* no power switching */
408#define RH_A_DT (1 << 10) /* device type (mbz) */
409#define RH_A_OCPM (1 << 11) /* over current protection mode */
410#define RH_A_NOCP (1 << 12) /* no over current protection */
411#define RH_A_POTPGT (0xff << 24) /* power on to power good time */
412
413#define FI 0x2edf /* 12000 bits per frame (-1) */
414#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
415#define LSTHRESH 0x628 /* lowspeed bit threshold */
416
417/* ------------------------------------------------------------------------- */
418
419/* PTD accessor macros. */
420#define PTD_GET_COUNT(p) (((p)->count & PTD_COUNT_MSK) >> 0)
421#define PTD_COUNT(v) (((v) << 0) & PTD_COUNT_MSK)
422#define PTD_GET_TOGGLE(p) (((p)->count & PTD_TOGGLE_MSK) >> 10)
423#define PTD_TOGGLE(v) (((v) << 10) & PTD_TOGGLE_MSK)
424#define PTD_GET_ACTIVE(p) (((p)->count & PTD_ACTIVE_MSK) >> 11)
425#define PTD_ACTIVE(v) (((v) << 11) & PTD_ACTIVE_MSK)
426#define PTD_GET_CC(p) (((p)->count & PTD_CC_MSK) >> 12)
427#define PTD_CC(v) (((v) << 12) & PTD_CC_MSK)
428#define PTD_GET_MPS(p) (((p)->mps & PTD_MPS_MSK) >> 0)
429#define PTD_MPS(v) (((v) << 0) & PTD_MPS_MSK)
430#define PTD_GET_SPD(p) (((p)->mps & PTD_SPD_MSK) >> 10)
431#define PTD_SPD(v) (((v) << 10) & PTD_SPD_MSK)
432#define PTD_GET_LAST(p) (((p)->mps & PTD_LAST_MSK) >> 11)
433#define PTD_LAST(v) (((v) << 11) & PTD_LAST_MSK)
434#define PTD_GET_EP(p) (((p)->mps & PTD_EP_MSK) >> 12)
435#define PTD_EP(v) (((v) << 12) & PTD_EP_MSK)
436#define PTD_GET_LEN(p) (((p)->len & PTD_LEN_MSK) >> 0)
437#define PTD_LEN(v) (((v) << 0) & PTD_LEN_MSK)
438#define PTD_GET_DIR(p) (((p)->len & PTD_DIR_MSK) >> 10)
439#define PTD_DIR(v) (((v) << 10) & PTD_DIR_MSK)
440#define PTD_GET_FA(p) (((p)->faddr & PTD_FA_MSK) >> 0)
441#define PTD_FA(v) (((v) << 0) & PTD_FA_MSK)
442#define PTD_GET_SF_INT(p) (((p)->faddr & PTD_SF_INT_MSK) >> 8)
443#define PTD_SF_INT(v) (((v) << 8) & PTD_SF_INT_MSK)
444#define PTD_GET_SF_ISO(p) (((p)->faddr & PTD_SF_ISO_MSK) >> 8)
445#define PTD_SF_ISO(v) (((v) << 8) & PTD_SF_ISO_MSK)
446#define PTD_GET_PR(p) (((p)->faddr & PTD_PR_MSK) >> 13)
447#define PTD_PR(v) (((v) << 13) & PTD_PR_MSK)
448
449#define LOG2_PERIODIC_SIZE 5 /* arbitrary; this matches OHCI */
450#define PERIODIC_SIZE (1 << LOG2_PERIODIC_SIZE)
451
452struct isp1362_ep {
453 struct usb_host_endpoint *hep;
454 struct usb_device *udev;
455
456 /* philips transfer descriptor */
457 struct ptd ptd;
458
459 u8 maxpacket;
460 u8 epnum;
461 u8 nextpid;
462 u16 error_count;
463 u16 length; /* of current packet */
464 s16 ptd_offset; /* buffer offset in ISP1362 where
465 PTD has been stored
466 (for access thru HCDIRDATA) */
467 int ptd_index;
468 int num_ptds;
469 void *data; /* to databuf */
470 /* queue of active EPs (the ones transmitted to the chip) */
471 struct list_head active;
472
473 /* periodic schedule */
474 u8 branch;
475 u16 interval;
476 u16 load;
477 u16 last_iso;
478
479 /* async schedule */
480 struct list_head schedule; /* list of all EPs that need processing */
481 struct list_head remove_list;
482 int num_req;
483};
484
485struct isp1362_ep_queue {
486 struct list_head active; /* list of PTDs currently processed by HC */
487 atomic_t finishing;
488 unsigned long buf_map;
489 unsigned long skip_map;
490 int free_ptd;
491 u16 buf_start;
492 u16 buf_size;
493 u16 blk_size; /* PTD buffer block size for ATL and INTL */
494 u8 buf_count;
495 u8 buf_avail;
496 char name[16];
497
498 /* for statistical tracking */
499 u8 stat_maxptds; /* Max # of ptds seen simultaneously in fifo */
500 u8 ptd_count; /* number of ptds submitted to this queue */
501};
502
503struct isp1362_hcd {
504 spinlock_t lock;
505 void __iomem *addr_reg;
506 void __iomem *data_reg;
507
508 struct isp1362_platform_data *board;
509
510 struct proc_dir_entry *pde;
511 unsigned long stat1, stat2, stat4, stat8, stat16;
512
513 /* HC registers */
514 u32 intenb; /* "OHCI" interrupts */
515 u16 irqenb; /* uP interrupts */
516
517 /* Root hub registers */
518 u32 rhdesca;
519 u32 rhdescb;
520 u32 rhstatus;
521 u32 rhport[MAX_ROOT_PORTS];
522 unsigned long next_statechange;
523
524 /* HC control reg shadow copy */
525 u32 hc_control;
526
527 /* async schedule: control, bulk */
528 struct list_head async;
529
530 /* periodic schedule: int */
531 u16 load[PERIODIC_SIZE];
532 struct list_head periodic;
533 u16 fmindex;
534
535 /* periodic schedule: isochronous */
536 struct list_head isoc;
537 int istl_flip:1;
538 int irq_active:1;
539
540 /* Schedules for the current frame */
541 struct isp1362_ep_queue atl_queue;
542 struct isp1362_ep_queue intl_queue;
543 struct isp1362_ep_queue istl_queue[2];
544
545 /* list of PTDs retrieved from HC */
546 struct list_head remove_list;
547 enum {
548 ISP1362_INT_SOF,
549 ISP1362_INT_ISTL0,
550 ISP1362_INT_ISTL1,
551 ISP1362_INT_EOT,
552 ISP1362_INT_OPR,
553 ISP1362_INT_SUSP,
554 ISP1362_INT_CLKRDY,
555 ISP1362_INT_INTL,
556 ISP1362_INT_ATL,
557 ISP1362_INT_OTG,
558 NUM_ISP1362_IRQS
559 } IRQ_NAMES;
560 unsigned int irq_stat[NUM_ISP1362_IRQS];
561 int req_serial;
562};
563
564static inline const char *ISP1362_INT_NAME(int n)
565{
566 switch (n) {
567 case ISP1362_INT_SOF: return "SOF";
568 case ISP1362_INT_ISTL0: return "ISTL0";
569 case ISP1362_INT_ISTL1: return "ISTL1";
570 case ISP1362_INT_EOT: return "EOT";
571 case ISP1362_INT_OPR: return "OPR";
572 case ISP1362_INT_SUSP: return "SUSP";
573 case ISP1362_INT_CLKRDY: return "CLKRDY";
574 case ISP1362_INT_INTL: return "INTL";
575 case ISP1362_INT_ATL: return "ATL";
576 case ISP1362_INT_OTG: return "OTG";
577 default: return "unknown";
578 }
579}
580
581static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr)
582{
583 unsigned p = (unsigned)ptr;
584 if (!(p & 0xf))
585 isp1362_hcd->stat16++;
586 else if (!(p & 0x7))
587 isp1362_hcd->stat8++;
588 else if (!(p & 0x3))
589 isp1362_hcd->stat4++;
590 else if (!(p & 0x1))
591 isp1362_hcd->stat2++;
592 else
593 isp1362_hcd->stat1++;
594}
595
596static inline struct isp1362_hcd *hcd_to_isp1362_hcd(struct usb_hcd *hcd)
597{
598 return (struct isp1362_hcd *) (hcd->hcd_priv);
599}
600
601static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd)
602{
603 return container_of((void *)isp1362_hcd, struct usb_hcd, hcd_priv);
604}
605
606#define frame_before(f1, f2) ((s16)((u16)f1 - (u16)f2) < 0)
607
608/*
609 * ISP1362 HW Interface
610 */
611
612#ifdef ISP1362_DEBUG
613#define DBG(level, fmt...) \
614 do { \
615 if (dbg_level > level) \
616 pr_debug(fmt); \
617 } while (0)
618#define _DBG(level, fmt...) \
619 do { \
620 if (dbg_level > level) \
621 printk(fmt); \
622 } while (0)
623#else
624#define DBG(fmt...) do {} while (0)
625#define _DBG DBG
626#endif
627
628#ifdef VERBOSE
629# define VDBG(fmt...) DBG(3, fmt)
630#else
631# define VDBG(fmt...) do {} while (0)
632#endif
633
634#ifdef REGISTERS
635# define RDBG(fmt...) DBG(1, fmt)
636#else
637# define RDBG(fmt...) do {} while (0)
638#endif
639
640#ifdef URB_TRACE
641#define URB_DBG(fmt...) DBG(0, fmt)
642#else
643#define URB_DBG(fmt...) do {} while (0)
644#endif
645
646
647#if USE_PLATFORM_DELAY
648#if USE_NDELAY
649#error USE_PLATFORM_DELAY and USE_NDELAY defined simultaneously.
650#endif
651#define isp1362_delay(h, d) (h)->board->delay(isp1362_hcd_to_hcd(h)->self.controller, d)
652#elif USE_NDELAY
653#define isp1362_delay(h, d) ndelay(d)
654#else
655#define isp1362_delay(h, d) do {} while (0)
656#endif
657
658#define get_urb(ep) ({ \
659 BUG_ON(list_empty(&ep->hep->urb_list)); \
660 container_of(ep->hep->urb_list.next, struct urb, urb_list); \
661})
662
663/* basic access functions for ISP1362 chip registers */
664/* NOTE: The contents of the address pointer register cannot be read back! The driver must ensure,
665 * that all register accesses are performed with interrupts disabled, since the interrupt
666 * handler has no way of restoring the previous state.
667 */
668static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg)
669{
670 /*_BUG_ON((reg & ISP1362_REG_WRITE_OFFSET) && !(reg & REG_ACCESS_W));*/
671 REG_ACCESS_TEST(reg);
672 _BUG_ON(!irqs_disabled());
673 DUMMY_DELAY_ACCESS;
674 writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg);
675 DUMMY_DELAY_ACCESS;
676 isp1362_delay(isp1362_hcd, 1);
677}
678
679static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val)
680{
681 _BUG_ON(!irqs_disabled());
682 DUMMY_DELAY_ACCESS;
683 writew(val, isp1362_hcd->data_reg);
684}
685
686static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd)
687{
688 u16 val;
689
690 _BUG_ON(!irqs_disabled());
691 DUMMY_DELAY_ACCESS;
692 val = readw(isp1362_hcd->data_reg);
693
694 return val;
695}
696
697static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val)
698{
699 _BUG_ON(!irqs_disabled());
700#if USE_32BIT
701 DUMMY_DELAY_ACCESS;
702 writel(val, isp1362_hcd->data_reg);
703#else
704 DUMMY_DELAY_ACCESS;
705 writew((u16)val, isp1362_hcd->data_reg);
706 DUMMY_DELAY_ACCESS;
707 writew(val >> 16, isp1362_hcd->data_reg);
708#endif
709}
710
711static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd)
712{
713 u32 val;
714
715 _BUG_ON(!irqs_disabled());
716#if USE_32BIT
717 DUMMY_DELAY_ACCESS;
718 val = readl(isp1362_hcd->data_reg);
719#else
720 DUMMY_DELAY_ACCESS;
721 val = (u32)readw(isp1362_hcd->data_reg);
722 DUMMY_DELAY_ACCESS;
723 val |= (u32)readw(isp1362_hcd->data_reg) << 16;
724#endif
725 return val;
726}
727
728/* use readsw/writesw to access the fifo whenever possible */
729/* assume HCDIRDATA or XFERCTR & addr_reg have been set up */
730static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len)
731{
732 u8 *dp = buf;
733 u16 data;
734
735 if (!len)
736 return;
737
738 _BUG_ON(!irqs_disabled());
739
740 RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf);
741#if USE_32BIT
742 if (len >= 4) {
743 RDBG("%s: Using readsl for %d dwords\n", __func__, len >> 2);
744 readsl(isp1362_hcd->data_reg, dp, len >> 2);
745 dp += len & ~3;
746 len &= 3;
747 }
748#endif
749 if (len >= 2) {
750 RDBG("%s: Using readsw for %d words\n", __func__, len >> 1);
751 insw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1);
752 dp += len & ~1;
753 len &= 1;
754 }
755
756 BUG_ON(len & ~1);
757 if (len > 0) {
758 data = isp1362_read_data16(isp1362_hcd);
759 RDBG("%s: Reading trailing byte %02x to mem @ %08x\n", __func__,
760 (u8)data, (u32)dp);
761 *dp = (u8)data;
762 }
763}
764
765static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len)
766{
767 u8 *dp = buf;
768 u16 data;
769
770 if (!len)
771 return;
772
773 if ((unsigned)dp & 0x1) {
774 /* not aligned */
775 for (; len > 1; len -= 2) {
776 data = *dp++;
777 data |= *dp++ << 8;
778 isp1362_write_data16(isp1362_hcd, data);
779 }
780 if (len)
781 isp1362_write_data16(isp1362_hcd, *dp);
782 return;
783 }
784
785 _BUG_ON(!irqs_disabled());
786
787 RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf);
788#if USE_32BIT
789 if (len >= 4) {
790 RDBG("%s: Using writesl for %d dwords\n", __func__, len >> 2);
791 writesl(isp1362_hcd->data_reg, dp, len >> 2);
792 dp += len & ~3;
793 len &= 3;
794 }
795#endif
796 if (len >= 2) {
797 RDBG("%s: Using writesw for %d words\n", __func__, len >> 1);
798 outsw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1);
799 dp += len & ~1;
800 len &= 1;
801 }
802
803 BUG_ON(len & ~1);
804 if (len > 0) {
805 /* finally write any trailing byte; we don't need to care
806 * about the high byte of the last word written
807 */
808 data = (u16)*dp;
809 RDBG("%s: Sending trailing byte %02x from mem @ %08x\n", __func__,
810 data, (u32)dp);
811 isp1362_write_data16(isp1362_hcd, data);
812 }
813}
814
815#define isp1362_read_reg16(d, r) ({ \
816 u16 __v; \
817 REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \
818 isp1362_write_addr(d, ISP1362_REG_##r); \
819 __v = isp1362_read_data16(d); \
820 RDBG("%s: Read %04x from %s[%02x]\n", __func__, __v, #r, \
821 ISP1362_REG_NO(ISP1362_REG_##r)); \
822 __v; \
823})
824
825#define isp1362_read_reg32(d, r) ({ \
826 u32 __v; \
827 REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \
828 isp1362_write_addr(d, ISP1362_REG_##r); \
829 __v = isp1362_read_data32(d); \
830 RDBG("%s: Read %08x from %s[%02x]\n", __func__, __v, #r, \
831 ISP1362_REG_NO(ISP1362_REG_##r)); \
832 __v; \
833})
834
835#define isp1362_write_reg16(d, r, v) { \
836 REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \
837 isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \
838 isp1362_write_data16(d, (u16)(v)); \
839 RDBG("%s: Wrote %04x to %s[%02x]\n", __func__, (u16)(v), #r, \
840 ISP1362_REG_NO(ISP1362_REG_##r)); \
841}
842
843#define isp1362_write_reg32(d, r, v) { \
844 REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \
845 isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \
846 isp1362_write_data32(d, (u32)(v)); \
847 RDBG("%s: Wrote %08x to %s[%02x]\n", __func__, (u32)(v), #r, \
848 ISP1362_REG_NO(ISP1362_REG_##r)); \
849}
850
851#define isp1362_set_mask16(d, r, m) { \
852 u16 __v; \
853 __v = isp1362_read_reg16(d, r); \
854 if ((__v | m) != __v) \
855 isp1362_write_reg16(d, r, __v | m); \
856}
857
858#define isp1362_clr_mask16(d, r, m) { \
859 u16 __v; \
860 __v = isp1362_read_reg16(d, r); \
861 if ((__v & ~m) != __v) \
862 isp1362_write_reg16(d, r, __v & ~m); \
863}
864
865#define isp1362_set_mask32(d, r, m) { \
866 u32 __v; \
867 __v = isp1362_read_reg32(d, r); \
868 if ((__v | m) != __v) \
869 isp1362_write_reg32(d, r, __v | m); \
870}
871
872#define isp1362_clr_mask32(d, r, m) { \
873 u32 __v; \
874 __v = isp1362_read_reg32(d, r); \
875 if ((__v & ~m) != __v) \
876 isp1362_write_reg32(d, r, __v & ~m); \
877}
878
879#ifdef ISP1362_DEBUG
880#define isp1362_show_reg(d, r) { \
881 if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32) \
882 DBG(0, "%-12s[%02x]: %08x\n", #r, \
883 ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg32(d, r)); \
884 else \
885 DBG(0, "%-12s[%02x]: %04x\n", #r, \
886 ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r)); \
887}
888#else
889#define isp1362_show_reg(d, r) do {} while (0)
890#endif
891
892static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd)
893{
894 isp1362_show_reg(isp1362_hcd, HCREVISION);
895 isp1362_show_reg(isp1362_hcd, HCCONTROL);
896 isp1362_show_reg(isp1362_hcd, HCCMDSTAT);
897 isp1362_show_reg(isp1362_hcd, HCINTSTAT);
898 isp1362_show_reg(isp1362_hcd, HCINTENB);
899 isp1362_show_reg(isp1362_hcd, HCFMINTVL);
900 isp1362_show_reg(isp1362_hcd, HCFMREM);
901 isp1362_show_reg(isp1362_hcd, HCFMNUM);
902 isp1362_show_reg(isp1362_hcd, HCLSTHRESH);
903 isp1362_show_reg(isp1362_hcd, HCRHDESCA);
904 isp1362_show_reg(isp1362_hcd, HCRHDESCB);
905 isp1362_show_reg(isp1362_hcd, HCRHSTATUS);
906 isp1362_show_reg(isp1362_hcd, HCRHPORT1);
907 isp1362_show_reg(isp1362_hcd, HCRHPORT2);
908
909 isp1362_show_reg(isp1362_hcd, HCHWCFG);
910 isp1362_show_reg(isp1362_hcd, HCDMACFG);
911 isp1362_show_reg(isp1362_hcd, HCXFERCTR);
912 isp1362_show_reg(isp1362_hcd, HCuPINT);
913
914 if (in_interrupt())
915 DBG(0, "%-12s[%02x]: %04x\n", "HCuPINTENB",
916 ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), isp1362_hcd->irqenb);
917 else
918 isp1362_show_reg(isp1362_hcd, HCuPINTENB);
919 isp1362_show_reg(isp1362_hcd, HCCHIPID);
920 isp1362_show_reg(isp1362_hcd, HCSCRATCH);
921 isp1362_show_reg(isp1362_hcd, HCBUFSTAT);
922 isp1362_show_reg(isp1362_hcd, HCDIRADDR);
923 /* Access would advance fifo
924 * isp1362_show_reg(isp1362_hcd, HCDIRDATA);
925 */
926 isp1362_show_reg(isp1362_hcd, HCISTLBUFSZ);
927 isp1362_show_reg(isp1362_hcd, HCISTLRATE);
928 isp1362_show_reg(isp1362_hcd, HCINTLBUFSZ);
929 isp1362_show_reg(isp1362_hcd, HCINTLBLKSZ);
930 isp1362_show_reg(isp1362_hcd, HCINTLDONE);
931 isp1362_show_reg(isp1362_hcd, HCINTLSKIP);
932 isp1362_show_reg(isp1362_hcd, HCINTLLAST);
933 isp1362_show_reg(isp1362_hcd, HCINTLCURR);
934 isp1362_show_reg(isp1362_hcd, HCATLBUFSZ);
935 isp1362_show_reg(isp1362_hcd, HCATLBLKSZ);
936 /* only valid after ATL_DONE interrupt
937 * isp1362_show_reg(isp1362_hcd, HCATLDONE);
938 */
939 isp1362_show_reg(isp1362_hcd, HCATLSKIP);
940 isp1362_show_reg(isp1362_hcd, HCATLLAST);
941 isp1362_show_reg(isp1362_hcd, HCATLCURR);
942 isp1362_show_reg(isp1362_hcd, HCATLDTC);
943 isp1362_show_reg(isp1362_hcd, HCATLDTCTO);
944}
945
946static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len)
947{
948 _BUG_ON(offset & 1);
949 _BUG_ON(offset >= ISP1362_BUF_SIZE);
950 _BUG_ON(len > ISP1362_BUF_SIZE);
951 _BUG_ON(offset + len > ISP1362_BUF_SIZE);
952 len = (len + 1) & ~1;
953
954 isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE);
955 isp1362_write_reg32(isp1362_hcd, HCDIRADDR,
956 HCDIRADDR_ADDR(offset) | HCDIRADDR_COUNT(len));
957}
958
959static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
960{
961 _BUG_ON(offset & 1);
962
963 isp1362_write_diraddr(isp1362_hcd, offset, len);
964
965 DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %08x\n", __func__,
966 len, offset, (u32)buf);
967
968 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
969 _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
970
971 isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA);
972
973 isp1362_read_fifo(isp1362_hcd, buf, len);
974 _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
975 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
976 _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
977}
978
979static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
980{
981 _BUG_ON(offset & 1);
982
983 isp1362_write_diraddr(isp1362_hcd, offset, len);
984
985 DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %08x\n", __func__,
986 len, offset, (u32)buf);
987
988 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
989 _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
990
991 isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET);
992 isp1362_write_fifo(isp1362_hcd, buf, len);
993
994 _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
995 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
996 _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
997}
998
999static void __attribute__((unused)) dump_data(char *buf, int len)
1000{
1001 if (dbg_level > 0) {
1002 int k;
1003 int lf = 0;
1004
1005 for (k = 0; k < len; ++k) {
1006 if (!lf)
1007 DBG(0, "%04x:", k);
1008 printk(" %02x", ((u8 *) buf)[k]);
1009 lf = 1;
1010 if (!k)
1011 continue;
1012 if (k % 16 == 15) {
1013 printk("\n");
1014 lf = 0;
1015 continue;
1016 }
1017 if (k % 8 == 7)
1018 printk(" ");
1019 if (k % 4 == 3)
1020 printk(" ");
1021 }
1022 if (lf)
1023 printk("\n");
1024 }
1025}
1026
1027#if defined(ISP1362_DEBUG) && defined(PTD_TRACE)
1028
1029static void dump_ptd(struct ptd *ptd)
1030{
1031 DBG(0, "EP %p: CC=%x EP=%d DIR=%x CNT=%d LEN=%d MPS=%d TGL=%x ACT=%x FA=%d SPD=%x SF=%x PR=%x LST=%x\n",
1032 container_of(ptd, struct isp1362_ep, ptd),
1033 PTD_GET_CC(ptd), PTD_GET_EP(ptd), PTD_GET_DIR(ptd),
1034 PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd),
1035 PTD_GET_TOGGLE(ptd), PTD_GET_ACTIVE(ptd), PTD_GET_FA(ptd),
1036 PTD_GET_SPD(ptd), PTD_GET_SF_INT(ptd), PTD_GET_PR(ptd), PTD_GET_LAST(ptd));
1037 DBG(0, " %04x %04x %04x %04x\n", ptd->count, ptd->mps, ptd->len, ptd->faddr);
1038}
1039
1040static void dump_ptd_out_data(struct ptd *ptd, u8 *buf)
1041{
1042 if (dbg_level > 0) {
1043 if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) {
1044 DBG(0, "--out->\n");
1045 dump_data(buf, PTD_GET_LEN(ptd));
1046 }
1047 }
1048}
1049
1050static void dump_ptd_in_data(struct ptd *ptd, u8 *buf)
1051{
1052 if (dbg_level > 0) {
1053 if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) {
1054 DBG(0, "<--in--\n");
1055 dump_data(buf, PTD_GET_COUNT(ptd));
1056 }
1057 DBG(0, "-----\n");
1058 }
1059}
1060
1061static void dump_ptd_queue(struct isp1362_ep_queue *epq)
1062{
1063 struct isp1362_ep *ep;
1064 int dbg = dbg_level;
1065
1066 dbg_level = 1;
1067 list_for_each_entry(ep, &epq->active, active) {
1068 dump_ptd(&ep->ptd);
1069 dump_data(ep->data, ep->length);
1070 }
1071 dbg_level = dbg;
1072}
1073#else
1074#define dump_ptd(ptd) do {} while (0)
1075#define dump_ptd_in_data(ptd, buf) do {} while (0)
1076#define dump_ptd_out_data(ptd, buf) do {} while (0)
1077#define dump_ptd_data(ptd, buf) do {} while (0)
1078#define dump_ptd_queue(epq) do {} while (0)
1079#endif