aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/uhci-debug.c50
-rw-r--r--drivers/usb/host/uhci-hcd.c52
-rw-r--r--drivers/usb/host/uhci-hcd.h74
-rw-r--r--drivers/usb/host/uhci-q.c193
4 files changed, 256 insertions, 113 deletions
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index a0677133577b..8d24d3dc0a61 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -220,16 +220,6 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
220 return out - buf; 220 return out - buf;
221} 221}
222 222
223static const char * const qh_names[] = {
224 "skel_unlink_qh", "skel_iso_qh",
225 "skel_int128_qh", "skel_int64_qh",
226 "skel_int32_qh", "skel_int16_qh",
227 "skel_int8_qh", "skel_int4_qh",
228 "skel_int2_qh", "skel_int1_qh",
229 "skel_ls_control_qh", "skel_fs_control_qh",
230 "skel_bulk_qh", "skel_term_qh"
231};
232
233static int uhci_show_sc(int port, unsigned short status, char *buf, int len) 223static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
234{ 224{
235 char *out = buf; 225 char *out = buf;
@@ -352,6 +342,12 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
352 struct uhci_td *td; 342 struct uhci_td *td;
353 struct list_head *tmp, *head; 343 struct list_head *tmp, *head;
354 int nframes, nerrs; 344 int nframes, nerrs;
345 __le32 link;
346
347 static const char * const qh_names[] = {
348 "unlink", "iso", "int128", "int64", "int32", "int16",
349 "int8", "int4", "int2", "async", "term"
350 };
355 351
356 out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); 352 out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
357 out += sprintf(out, "HC status\n"); 353 out += sprintf(out, "HC status\n");
@@ -374,7 +370,7 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
374 nframes = 10; 370 nframes = 10;
375 nerrs = 0; 371 nerrs = 0;
376 for (i = 0; i < UHCI_NUMFRAMES; ++i) { 372 for (i = 0; i < UHCI_NUMFRAMES; ++i) {
377 __le32 link, qh_dma; 373 __le32 qh_dma;
378 374
379 j = 0; 375 j = 0;
380 td = uhci->frame_cpu[i]; 376 td = uhci->frame_cpu[i];
@@ -430,23 +426,21 @@ check_link:
430 426
431 for (i = 0; i < UHCI_NUM_SKELQH; ++i) { 427 for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
432 int cnt = 0; 428 int cnt = 0;
429 __le32 fsbr_link = 0;
433 430
434 qh = uhci->skelqh[i]; 431 qh = uhci->skelqh[i];
435 out += sprintf(out, "- %s\n", qh_names[i]); \ 432 out += sprintf(out, "- skel_%s_qh\n", qh_names[i]); \
436 out += uhci_show_qh(qh, out, len - (out - buf), 4); 433 out += uhci_show_qh(qh, out, len - (out - buf), 4);
437 434
438 /* Last QH is the Terminating QH, it's different */ 435 /* Last QH is the Terminating QH, it's different */
439 if (i == UHCI_NUM_SKELQH - 1) { 436 if (i == SKEL_TERM) {
440 if (qh->link != UHCI_PTR_TERM)
441 out += sprintf(out, " bandwidth reclamation on!\n");
442
443 if (qh_element(qh) != LINK_TO_TD(uhci->term_td)) 437 if (qh_element(qh) != LINK_TO_TD(uhci->term_td))
444 out += sprintf(out, " skel_term_qh element is not set to term_td!\n"); 438 out += sprintf(out, " skel_term_qh element is not set to term_td!\n");
445 439 if (link == LINK_TO_QH(uhci->skel_term_qh))
440 goto check_qh_link;
446 continue; 441 continue;
447 } 442 }
448 443
449 j = (i < 9) ? 9 : i+1; /* Next skeleton */
450 head = &qh->node; 444 head = &qh->node;
451 tmp = head->next; 445 tmp = head->next;
452 446
@@ -456,14 +450,26 @@ check_link:
456 if (++cnt <= 10) 450 if (++cnt <= 10)
457 out += uhci_show_qh(qh, out, 451 out += uhci_show_qh(qh, out,
458 len - (out - buf), 4); 452 len - (out - buf), 4);
453 if (!fsbr_link && qh->skel >= SKEL_FSBR)
454 fsbr_link = LINK_TO_QH(qh);
459 } 455 }
460 if ((cnt -= 10) > 0) 456 if ((cnt -= 10) > 0)
461 out += sprintf(out, " Skipped %d QHs\n", cnt); 457 out += sprintf(out, " Skipped %d QHs\n", cnt);
462 458
463 if (i > 1 && i < UHCI_NUM_SKELQH - 1) { 459 link = UHCI_PTR_TERM;
464 if (qh->link != LINK_TO_QH(uhci->skelqh[j])) 460 if (i <= SKEL_ISO)
465 out += sprintf(out, " last QH not linked to next skeleton!\n"); 461 ;
466 } 462 else if (i < SKEL_ASYNC)
463 link = LINK_TO_QH(uhci->skel_async_qh);
464 else if (!uhci->fsbr_is_on)
465 ;
466 else if (fsbr_link)
467 link = fsbr_link;
468 else
469 link = LINK_TO_QH(uhci->skel_term_qh);
470check_qh_link:
471 if (qh->link != link)
472 out += sprintf(out, " last QH not linked to next skeleton!\n");
467 } 473 }
468 474
469 return out - buf; 475 return out - buf;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 1f0833ab294a..44da4334f1d6 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -13,7 +13,7 @@
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu 16 * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
17 * 17 *
18 * Intel documents this fairly well, and as far as I know there 18 * Intel documents this fairly well, and as far as I know there
19 * are no royalties or anything like that, but even so there are 19 * are no royalties or anything like that, but even so there are
@@ -107,10 +107,10 @@ static __le32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
107 * interrupt QHs, which will help spread out bandwidth utilization. 107 * interrupt QHs, which will help spread out bandwidth utilization.
108 * 108 *
109 * ffs (Find First bit Set) does exactly what we need: 109 * ffs (Find First bit Set) does exactly what we need:
110 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8], 110 * 1,3,5,... => ffs = 0 => use period-2 QH = skelqh[8],
111 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc. 111 * 2,6,10,... => ffs = 1 => use period-4 QH = skelqh[7], etc.
112 * ffs >= 7 => not on any high-period queue, so use 112 * ffs >= 7 => not on any high-period queue, so use
113 * skel_int1_qh = skelqh[9]. 113 * period-1 QH = skelqh[9].
114 * Add in UHCI_NUMFRAMES to insure at least one bit is set. 114 * Add in UHCI_NUMFRAMES to insure at least one bit is set.
115 */ 115 */
116 skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES); 116 skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES);
@@ -540,16 +540,18 @@ static void uhci_shutdown(struct pci_dev *pdev)
540 * 540 *
541 * The hardware doesn't really know any difference 541 * The hardware doesn't really know any difference
542 * in the queues, but the order does matter for the 542 * in the queues, but the order does matter for the
543 * protocols higher up. The order is: 543 * protocols higher up. The order in which the queues
544 * are encountered by the hardware is:
544 * 545 *
545 * - any isochronous events handled before any 546 * - All isochronous events are handled before any
546 * of the queues. We don't do that here, because 547 * of the queues. We don't do that here, because
547 * we'll create the actual TD entries on demand. 548 * we'll create the actual TD entries on demand.
548 * - The first queue is the interrupt queue. 549 * - The first queue is the high-period interrupt queue.
549 * - The second queue is the control queue, split into low- and full-speed 550 * - The second queue is the period-1 interrupt and async
550 * - The third queue is bulk queue. 551 * (low-speed control, full-speed control, then bulk) queue.
551 * - The fourth queue is the bandwidth reclamation queue, which loops back 552 * - The third queue is the terminating bandwidth reclamation queue,
552 * to the full-speed control queue. 553 * which contains no members, loops back to itself, and is present
554 * only when FSBR is on and there are no full-speed control or bulk QHs.
553 */ 555 */
554static int uhci_start(struct usb_hcd *hcd) 556static int uhci_start(struct usb_hcd *hcd)
555{ 557{
@@ -626,30 +628,18 @@ static int uhci_start(struct usb_hcd *hcd)
626 } 628 }
627 629
628 /* 630 /*
629 * 8 Interrupt queues; link all higher int queues to int1, 631 * 8 Interrupt queues; link all higher int queues to int1 = async
630 * then link int1 to control and control to bulk
631 */ 632 */
632 uhci->skel_int128_qh->link = 633 for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i)
633 uhci->skel_int64_qh->link = 634 uhci->skelqh[i]->link = LINK_TO_QH(uhci->skel_async_qh);
634 uhci->skel_int32_qh->link = 635 uhci->skel_async_qh->link = uhci->skel_term_qh->link = UHCI_PTR_TERM;
635 uhci->skel_int16_qh->link =
636 uhci->skel_int8_qh->link =
637 uhci->skel_int4_qh->link =
638 uhci->skel_int2_qh->link = LINK_TO_QH(
639 uhci->skel_int1_qh);
640
641 uhci->skel_int1_qh->link = LINK_TO_QH(uhci->skel_ls_control_qh);
642 uhci->skel_ls_control_qh->link = LINK_TO_QH(uhci->skel_fs_control_qh);
643 uhci->skel_fs_control_qh->link = LINK_TO_QH(uhci->skel_bulk_qh);
644 uhci->skel_bulk_qh->link = LINK_TO_QH(uhci->skel_term_qh);
645 636
646 /* This dummy TD is to work around a bug in Intel PIIX controllers */ 637 /* This dummy TD is to work around a bug in Intel PIIX controllers */
647 uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | 638 uhci_fill_td(uhci->term_td, 0, uhci_explen(0) |
648 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); 639 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
649 uhci->term_td->link = LINK_TO_TD(uhci->term_td); 640 uhci->term_td->link = UHCI_PTR_TERM;
650 641 uhci->skel_async_qh->element = uhci->skel_term_qh->element =
651 uhci->skel_term_qh->link = UHCI_PTR_TERM; 642 LINK_TO_TD(uhci->term_td);
652 uhci->skel_term_qh->element = LINK_TO_TD(uhci->term_td);
653 643
654 /* 644 /*
655 * Fill the frame list: make all entries point to the proper 645 * Fill the frame list: make all entries point to the proper
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index a8c256b44d8e..1b3d23406ac4 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -135,7 +135,6 @@ struct uhci_qh {
135 struct usb_host_endpoint *hep; /* Endpoint information */ 135 struct usb_host_endpoint *hep; /* Endpoint information */
136 struct usb_device *udev; 136 struct usb_device *udev;
137 struct list_head queue; /* Queue of urbps for this QH */ 137 struct list_head queue; /* Queue of urbps for this QH */
138 struct uhci_qh *skel; /* Skeleton for this QH */
139 struct uhci_td *dummy_td; /* Dummy TD to end the queue */ 138 struct uhci_td *dummy_td; /* Dummy TD to end the queue */
140 struct uhci_td *post_td; /* Last TD completed */ 139 struct uhci_td *post_td; /* Last TD completed */
141 140
@@ -151,6 +150,7 @@ struct uhci_qh {
151 150
152 int state; /* QH_STATE_xxx; see above */ 151 int state; /* QH_STATE_xxx; see above */
153 int type; /* Queue type (control, bulk, etc) */ 152 int type; /* Queue type (control, bulk, etc) */
153 int skel; /* Skeleton queue number */
154 154
155 unsigned int initial_toggle:1; /* Endpoint's current toggle value */ 155 unsigned int initial_toggle:1; /* Endpoint's current toggle value */
156 unsigned int needs_fixup:1; /* Must fix the TD toggle values */ 156 unsigned int needs_fixup:1; /* Must fix the TD toggle values */
@@ -276,12 +276,13 @@ static inline u32 td_status(struct uhci_td *td) {
276/* 276/*
277 * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for 277 * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for
278 * automatic queuing. To make it easy to insert entries into the schedule, 278 * automatic queuing. To make it easy to insert entries into the schedule,
279 * we have a skeleton of QHs for each predefined Interrupt latency, 279 * we have a skeleton of QHs for each predefined Interrupt latency.
280 * low-speed control, full-speed control, bulk, and terminating QH 280 * Asynchronous QHs (low-speed control, full-speed control, and bulk)
281 * (see explanation for the terminating QH below). 281 * go onto the period-1 interrupt list, since they all get accessed on
282 * every frame.
282 * 283 *
283 * When we want to add a new QH, we add it to the end of the list for the 284 * When we want to add a new QH, we add it to the list starting from the
284 * skeleton QH. For instance, the schedule list can look like this: 285 * appropriate skeleton QH. For instance, the schedule can look like this:
285 * 286 *
286 * skel int128 QH 287 * skel int128 QH
287 * dev 1 interrupt QH 288 * dev 1 interrupt QH
@@ -289,50 +290,47 @@ static inline u32 td_status(struct uhci_td *td) {
289 * skel int64 QH 290 * skel int64 QH
290 * skel int32 QH 291 * skel int32 QH
291 * ... 292 * ...
292 * skel int1 QH 293 * skel int1 + async QH
293 * skel low-speed control QH 294 * dev 5 low-speed control QH
294 * dev 5 control QH
295 * skel full-speed control QH
296 * skel bulk QH
297 * dev 1 bulk QH 295 * dev 1 bulk QH
298 * dev 2 bulk QH 296 * dev 2 bulk QH
299 * skel terminating QH
300 * 297 *
301 * The terminating QH is used for 2 reasons: 298 * There is a special terminating QH used to keep full-speed bandwidth
302 * - To place a terminating TD which is used to workaround a PIIX bug 299 * reclamation active when no full-speed control or bulk QHs are linked
303 * (see Intel errata for explanation), and 300 * into the schedule. It has an inactive TD (to work around a PIIX bug,
304 * - To loop back to the full-speed control queue for full-speed bandwidth 301 * see the Intel errata) and it points back to itself.
305 * reclamation.
306 * 302 *
307 * There's a special skeleton QH for Isochronous QHs. It never appears 303 * There's a special skeleton QH for Isochronous QHs which never appears
308 * on the schedule, and Isochronous TDs go on the schedule before the 304 * on the schedule. Isochronous TDs go on the schedule before the
309 * the skeleton QHs. The hardware accesses them directly rather than 305 * the skeleton QHs. The hardware accesses them directly rather than
310 * through their QH, which is used only for bookkeeping purposes. 306 * through their QH, which is used only for bookkeeping purposes.
311 * While the UHCI spec doesn't forbid the use of QHs for Isochronous, 307 * While the UHCI spec doesn't forbid the use of QHs for Isochronous,
312 * it doesn't use them either. And the spec says that queues never 308 * it doesn't use them either. And the spec says that queues never
313 * advance on an error completion status, which makes them totally 309 * advance on an error completion status, which makes them totally
314 * unsuitable for Isochronous transfers. 310 * unsuitable for Isochronous transfers.
311 *
312 * There's also a special skeleton QH used for QHs which are in the process
313 * of unlinking and so may still be in use by the hardware. It too never
314 * appears on the schedule.
315 */ 315 */
316 316
317#define UHCI_NUM_SKELQH 14 317#define UHCI_NUM_SKELQH 11
318#define skel_unlink_qh skelqh[0] 318#define SKEL_UNLINK 0
319#define skel_iso_qh skelqh[1] 319#define skel_unlink_qh skelqh[SKEL_UNLINK]
320#define skel_int128_qh skelqh[2] 320#define SKEL_ISO 1
321#define skel_int64_qh skelqh[3] 321#define skel_iso_qh skelqh[SKEL_ISO]
322#define skel_int32_qh skelqh[4] 322 /* int128, int64, ..., int1 = 2, 3, ..., 9 */
323#define skel_int16_qh skelqh[5] 323#define SKEL_INDEX(exponent) (9 - exponent)
324#define skel_int8_qh skelqh[6] 324#define SKEL_ASYNC 9
325#define skel_int4_qh skelqh[7] 325#define skel_async_qh skelqh[SKEL_ASYNC]
326#define skel_int2_qh skelqh[8] 326#define SKEL_TERM 10
327#define skel_int1_qh skelqh[9] 327#define skel_term_qh skelqh[SKEL_TERM]
328#define skel_ls_control_qh skelqh[10] 328
329#define skel_fs_control_qh skelqh[11] 329/* The following entries refer to sublists of skel_async_qh */
330#define skel_bulk_qh skelqh[12] 330#define SKEL_LS_CONTROL 20
331#define skel_term_qh skelqh[13] 331#define SKEL_FS_CONTROL 21
332 332#define SKEL_FSBR SKEL_FS_CONTROL
333/* Find the skelqh entry corresponding to an interval exponent */ 333#define SKEL_BULK 22
334#define UHCI_SKEL_INDEX(exponent) (9 - exponent)
335
336 334
337/* 335/*
338 * The UHCI controller and root hub 336 * The UHCI controller and root hub
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index a0c6bf6128a3..f4ebdb3e488f 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -13,7 +13,7 @@
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu 16 * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
17 */ 17 */
18 18
19 19
@@ -45,14 +45,43 @@ static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
45 */ 45 */
46static void uhci_fsbr_on(struct uhci_hcd *uhci) 46static void uhci_fsbr_on(struct uhci_hcd *uhci)
47{ 47{
48 struct uhci_qh *fsbr_qh, *lqh, *tqh;
49
48 uhci->fsbr_is_on = 1; 50 uhci->fsbr_is_on = 1;
49 uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_fs_control_qh); 51 lqh = list_entry(uhci->skel_async_qh->node.prev,
52 struct uhci_qh, node);
53
54 /* Find the first FSBR QH. Linear search through the list is
55 * acceptable because normally FSBR gets turned on as soon as
56 * one QH needs it. */
57 fsbr_qh = NULL;
58 list_for_each_entry_reverse(tqh, &uhci->skel_async_qh->node, node) {
59 if (tqh->skel < SKEL_FSBR)
60 break;
61 fsbr_qh = tqh;
62 }
63
64 /* No FSBR QH means we must insert the terminating skeleton QH */
65 if (!fsbr_qh) {
66 uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_term_qh);
67 wmb();
68 lqh->link = uhci->skel_term_qh->link;
69
70 /* Otherwise loop the last QH to the first FSBR QH */
71 } else
72 lqh->link = LINK_TO_QH(fsbr_qh);
50} 73}
51 74
52static void uhci_fsbr_off(struct uhci_hcd *uhci) 75static void uhci_fsbr_off(struct uhci_hcd *uhci)
53{ 76{
77 struct uhci_qh *lqh;
78
54 uhci->fsbr_is_on = 0; 79 uhci->fsbr_is_on = 0;
55 uhci->skel_term_qh->link = UHCI_PTR_TERM; 80 lqh = list_entry(uhci->skel_async_qh->node.prev,
81 struct uhci_qh, node);
82
83 /* End the async list normally and unlink the terminating QH */
84 lqh->link = uhci->skel_term_qh->link = UHCI_PTR_TERM;
56} 85}
57 86
58static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) 87static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
@@ -404,12 +433,81 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
404} 433}
405 434
406/* 435/*
407 * Put a QH on the schedule in both hardware and software 436 * Link an Isochronous QH into its skeleton's list
408 */ 437 */
409static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 438static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
439{
440 list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
441
442 /* Isochronous QHs aren't linked by the hardware */
443}
444
445/*
446 * Link a high-period interrupt QH into the schedule at the end of its
447 * skeleton's list
448 */
449static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
410{ 450{
411 struct uhci_qh *pqh; 451 struct uhci_qh *pqh;
412 452
453 list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
454
455 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
456 qh->link = pqh->link;
457 wmb();
458 pqh->link = LINK_TO_QH(qh);
459}
460
461/*
462 * Link a period-1 interrupt or async QH into the schedule at the
463 * correct spot in the async skeleton's list, and update the FSBR link
464 */
465static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
466{
467 struct uhci_qh *pqh, *lqh;
468 __le32 link_to_new_qh;
469 __le32 *extra_link = &link_to_new_qh;
470
471 /* Find the predecessor QH for our new one and insert it in the list.
472 * The list of QHs is expected to be short, so linear search won't
473 * take too long. */
474 list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
475 if (pqh->skel <= qh->skel)
476 break;
477 }
478 list_add(&qh->node, &pqh->node);
479 qh->link = pqh->link;
480
481 link_to_new_qh = LINK_TO_QH(qh);
482
483 /* If this is now the first FSBR QH, take special action */
484 if (uhci->fsbr_is_on && pqh->skel < SKEL_FSBR &&
485 qh->skel >= SKEL_FSBR) {
486 lqh = list_entry(uhci->skel_async_qh->node.prev,
487 struct uhci_qh, node);
488
489 /* If the new QH is also the last one, we must unlink
490 * the terminating skeleton QH and make the new QH point
491 * back to itself. */
492 if (qh == lqh) {
493 qh->link = link_to_new_qh;
494 extra_link = &uhci->skel_term_qh->link;
495
496 /* Otherwise the last QH must point to the new QH */
497 } else
498 extra_link = &lqh->link;
499 }
500
501 /* Link it into the schedule */
502 wmb();
503 *extra_link = pqh->link = link_to_new_qh;
504}
505
506/*
507 * Put a QH on the schedule in both hardware and software
508 */
509static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
510{
413 WARN_ON(list_empty(&qh->queue)); 511 WARN_ON(list_empty(&qh->queue));
414 512
415 /* Set the element pointer if it isn't set already. 513 /* Set the element pointer if it isn't set already.
@@ -431,18 +529,64 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
431 return; 529 return;
432 qh->state = QH_STATE_ACTIVE; 530 qh->state = QH_STATE_ACTIVE;
433 531
434 /* Move the QH from its old list to the end of the appropriate 532 /* Move the QH from its old list to the correct spot in the appropriate
435 * skeleton's list */ 533 * skeleton's list */
436 if (qh == uhci->next_qh) 534 if (qh == uhci->next_qh)
437 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, 535 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
438 node); 536 node);
439 list_move_tail(&qh->node, &qh->skel->node); 537 list_del(&qh->node);
538
539 if (qh->skel == SKEL_ISO)
540 link_iso(uhci, qh);
541 else if (qh->skel < SKEL_ASYNC)
542 link_interrupt(uhci, qh);
543 else
544 link_async(uhci, qh);
545}
546
547/*
548 * Unlink a high-period interrupt QH from the schedule
549 */
550static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
551{
552 struct uhci_qh *pqh;
440 553
441 /* Link it into the schedule */
442 pqh = list_entry(qh->node.prev, struct uhci_qh, node); 554 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
443 qh->link = pqh->link; 555 pqh->link = qh->link;
444 wmb(); 556 mb();
445 pqh->link = LINK_TO_QH(qh); 557}
558
559/*
560 * Unlink a period-1 interrupt or async QH from the schedule
561 */
562static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
563{
564 struct uhci_qh *pqh, *lqh;
565 __le32 link_to_next_qh = qh->link;
566
567 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
568
569 /* If this is the first FSBQ QH, take special action */
570 if (uhci->fsbr_is_on && pqh->skel < SKEL_FSBR &&
571 qh->skel >= SKEL_FSBR) {
572 lqh = list_entry(uhci->skel_async_qh->node.prev,
573 struct uhci_qh, node);
574
575 /* If this QH is also the last one, we must link in
576 * the terminating skeleton QH. */
577 if (qh == lqh) {
578 link_to_next_qh = LINK_TO_QH(uhci->skel_term_qh);
579 uhci->skel_term_qh->link = link_to_next_qh;
580 wmb();
581 qh->link = link_to_next_qh;
582
583 /* Otherwise the last QH must point to the new first FSBR QH */
584 } else
585 lqh->link = link_to_next_qh;
586 }
587
588 pqh->link = link_to_next_qh;
589 mb();
446} 590}
447 591
448/* 592/*
@@ -450,17 +594,18 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
450 */ 594 */
451static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 595static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
452{ 596{
453 struct uhci_qh *pqh;
454
455 if (qh->state == QH_STATE_UNLINKING) 597 if (qh->state == QH_STATE_UNLINKING)
456 return; 598 return;
457 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); 599 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
458 qh->state = QH_STATE_UNLINKING; 600 qh->state = QH_STATE_UNLINKING;
459 601
460 /* Unlink the QH from the schedule and record when we did it */ 602 /* Unlink the QH from the schedule and record when we did it */
461 pqh = list_entry(qh->node.prev, struct uhci_qh, node); 603 if (qh->skel == SKEL_ISO)
462 pqh->link = qh->link; 604 ;
463 mb(); 605 else if (qh->skel < SKEL_ASYNC)
606 unlink_interrupt(uhci, qh);
607 else
608 unlink_async(uhci, qh);
464 609
465 uhci_get_current_frame_number(uhci); 610 uhci_get_current_frame_number(uhci);
466 qh->unlink_frame = uhci->frame_number; 611 qh->unlink_frame = uhci->frame_number;
@@ -696,6 +841,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
696 dma_addr_t data = urb->transfer_dma; 841 dma_addr_t data = urb->transfer_dma;
697 __le32 *plink; 842 __le32 *plink;
698 struct urb_priv *urbp = urb->hcpriv; 843 struct urb_priv *urbp = urb->hcpriv;
844 int skel;
699 845
700 /* The "pipe" thing contains the destination in bits 8--18 */ 846 /* The "pipe" thing contains the destination in bits 8--18 */
701 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; 847 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
@@ -796,11 +942,13 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
796 * isn't in the CONFIGURED state. */ 942 * isn't in the CONFIGURED state. */
797 if (urb->dev->speed == USB_SPEED_LOW || 943 if (urb->dev->speed == USB_SPEED_LOW ||
798 urb->dev->state != USB_STATE_CONFIGURED) 944 urb->dev->state != USB_STATE_CONFIGURED)
799 qh->skel = uhci->skel_ls_control_qh; 945 skel = SKEL_LS_CONTROL;
800 else { 946 else {
801 qh->skel = uhci->skel_fs_control_qh; 947 skel = SKEL_FS_CONTROL;
802 uhci_add_fsbr(uhci, urb); 948 uhci_add_fsbr(uhci, urb);
803 } 949 }
950 if (qh->state != QH_STATE_ACTIVE)
951 qh->skel = skel;
804 952
805 urb->actual_length = -8; /* Account for the SETUP packet */ 953 urb->actual_length = -8; /* Account for the SETUP packet */
806 return 0; 954 return 0;
@@ -930,7 +1078,7 @@ nomem:
930 return -ENOMEM; 1078 return -ENOMEM;
931} 1079}
932 1080
933static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, 1081static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
934 struct uhci_qh *qh) 1082 struct uhci_qh *qh)
935{ 1083{
936 int ret; 1084 int ret;
@@ -939,7 +1087,8 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
939 if (urb->dev->speed == USB_SPEED_LOW) 1087 if (urb->dev->speed == USB_SPEED_LOW)
940 return -EINVAL; 1088 return -EINVAL;
941 1089
942 qh->skel = uhci->skel_bulk_qh; 1090 if (qh->state != QH_STATE_ACTIVE)
1091 qh->skel = SKEL_BULK;
943 ret = uhci_submit_common(uhci, urb, qh); 1092 ret = uhci_submit_common(uhci, urb, qh);
944 if (ret == 0) 1093 if (ret == 0)
945 uhci_add_fsbr(uhci, urb); 1094 uhci_add_fsbr(uhci, urb);
@@ -967,7 +1116,7 @@ static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
967 if (exponent < 0) 1116 if (exponent < 0)
968 return -EINVAL; 1117 return -EINVAL;
969 qh->period = 1 << exponent; 1118 qh->period = 1 << exponent;
970 qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; 1119 qh->skel = SKEL_INDEX(exponent);
971 1120
972 /* For now, interrupt phase is fixed by the layout 1121 /* For now, interrupt phase is fixed by the layout
973 * of the QH lists. */ 1122 * of the QH lists. */
@@ -1215,7 +1364,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1215 qh->iso_status = 0; 1364 qh->iso_status = 0;
1216 } 1365 }
1217 1366
1218 qh->skel = uhci->skel_iso_qh; 1367 qh->skel = SKEL_ISO;
1219 if (!qh->bandwidth_reserved) 1368 if (!qh->bandwidth_reserved)
1220 uhci_reserve_bandwidth(uhci, qh); 1369 uhci_reserve_bandwidth(uhci, qh);
1221 return 0; 1370 return 0;