diff options
author | Alan Stern <stern@rowland.harvard.edu> | 2005-12-17 17:58:46 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2006-03-20 17:49:57 -0500 |
commit | dccf4a48d47120a42382ba526f1a0848c13ba2a4 (patch) | |
tree | 788a0a9f491d1a42df1dee1781156ccfc363b6ef /drivers/usb/host/uhci-hcd.h | |
parent | 499003e815344304c7b0c93aad923ddf644d24e0 (diff) |
[PATCH] UHCI: use one QH per endpoint, not per URB
This patch (as623) changes the uhci-hcd driver to make it use one QH per
device endpoint, instead of a QH per URB as it does now. Numerous areas
of the code are affected by this. For example, the distinction between
"queued" URBs and non-"queued" URBs no longer exists; all URBs belong to
a queue and some just happen to be at the queue's head.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/uhci-hcd.h')
-rw-r--r-- | drivers/usb/host/uhci-hcd.h | 177 |
1 files changed, 96 insertions, 81 deletions
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index 8b4b887a7d41..7a9481c09a05 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h | |||
@@ -28,8 +28,9 @@ | |||
28 | #define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */ | 28 | #define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */ |
29 | #define USBSTS_ERROR 0x0002 /* Interrupt due to error */ | 29 | #define USBSTS_ERROR 0x0002 /* Interrupt due to error */ |
30 | #define USBSTS_RD 0x0004 /* Resume Detect */ | 30 | #define USBSTS_RD 0x0004 /* Resume Detect */ |
31 | #define USBSTS_HSE 0x0008 /* Host System Error - basically PCI problems */ | 31 | #define USBSTS_HSE 0x0008 /* Host System Error: PCI problems */ |
32 | #define USBSTS_HCPE 0x0010 /* Host Controller Process Error - the scripts were buggy */ | 32 | #define USBSTS_HCPE 0x0010 /* Host Controller Process Error: |
33 | * the schedule is buggy */ | ||
33 | #define USBSTS_HCH 0x0020 /* HC Halted */ | 34 | #define USBSTS_HCH 0x0020 /* HC Halted */ |
34 | 35 | ||
35 | /* Interrupt enable register */ | 36 | /* Interrupt enable register */ |
@@ -47,7 +48,8 @@ | |||
47 | /* USB port status and control registers */ | 48 | /* USB port status and control registers */ |
48 | #define USBPORTSC1 16 | 49 | #define USBPORTSC1 16 |
49 | #define USBPORTSC2 18 | 50 | #define USBPORTSC2 18 |
50 | #define USBPORTSC_CCS 0x0001 /* Current Connect Status ("device present") */ | 51 | #define USBPORTSC_CCS 0x0001 /* Current Connect Status |
52 | * ("device present") */ | ||
51 | #define USBPORTSC_CSC 0x0002 /* Connect Status Change */ | 53 | #define USBPORTSC_CSC 0x0002 /* Connect Status Change */ |
52 | #define USBPORTSC_PE 0x0004 /* Port Enable */ | 54 | #define USBPORTSC_PE 0x0004 /* Port Enable */ |
53 | #define USBPORTSC_PEC 0x0008 /* Port Enable Change */ | 55 | #define USBPORTSC_PEC 0x0008 /* Port Enable Change */ |
@@ -71,15 +73,16 @@ | |||
71 | #define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ | 73 | #define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ |
72 | #define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ | 74 | #define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ |
73 | 75 | ||
74 | #define UHCI_PTR_BITS cpu_to_le32(0x000F) | 76 | #define UHCI_PTR_BITS __constant_cpu_to_le32(0x000F) |
75 | #define UHCI_PTR_TERM cpu_to_le32(0x0001) | 77 | #define UHCI_PTR_TERM __constant_cpu_to_le32(0x0001) |
76 | #define UHCI_PTR_QH cpu_to_le32(0x0002) | 78 | #define UHCI_PTR_QH __constant_cpu_to_le32(0x0002) |
77 | #define UHCI_PTR_DEPTH cpu_to_le32(0x0004) | 79 | #define UHCI_PTR_DEPTH __constant_cpu_to_le32(0x0004) |
78 | #define UHCI_PTR_BREADTH cpu_to_le32(0x0000) | 80 | #define UHCI_PTR_BREADTH __constant_cpu_to_le32(0x0000) |
79 | 81 | ||
80 | #define UHCI_NUMFRAMES 1024 /* in the frame list [array] */ | 82 | #define UHCI_NUMFRAMES 1024 /* in the frame list [array] */ |
81 | #define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ | 83 | #define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ |
82 | #define CAN_SCHEDULE_FRAMES 1000 /* how far future frames can be scheduled */ | 84 | #define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames |
85 | * can be scheduled */ | ||
83 | 86 | ||
84 | 87 | ||
85 | /* | 88 | /* |
@@ -87,38 +90,54 @@ | |||
87 | */ | 90 | */ |
88 | 91 | ||
89 | /* | 92 | /* |
90 | * One role of a QH is to hold a queue of TDs for some endpoint. Each QH is | 93 | * One role of a QH is to hold a queue of TDs for some endpoint. One QH goes |
91 | * used with one URB, and qh->element (updated by the HC) is either: | 94 | * with each endpoint, and qh->element (updated by the HC) is either: |
92 | * - the next unprocessed TD for the URB, or | 95 | * - the next unprocessed TD in the endpoint's queue, or |
93 | * - UHCI_PTR_TERM (when there's no more traffic for this endpoint), or | 96 | * - UHCI_PTR_TERM (when there's no more traffic for this endpoint). |
94 | * - the QH for the next URB queued to the same endpoint. | ||
95 | * | 97 | * |
96 | * The other role of a QH is to serve as a "skeleton" framelist entry, so we | 98 | * The other role of a QH is to serve as a "skeleton" framelist entry, so we |
97 | * can easily splice a QH for some endpoint into the schedule at the right | 99 | * can easily splice a QH for some endpoint into the schedule at the right |
98 | * place. Then qh->element is UHCI_PTR_TERM. | 100 | * place. Then qh->element is UHCI_PTR_TERM. |
99 | * | 101 | * |
100 | * In the frame list, qh->link maintains a list of QHs seen by the HC: | 102 | * In the schedule, qh->link maintains a list of QHs seen by the HC: |
101 | * skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ... | 103 | * skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ... |
104 | * | ||
105 | * qh->node is the software equivalent of qh->link. The differences | ||
106 | * are that the software list is doubly-linked and QHs in the UNLINKING | ||
107 | * state are on the software list but not the hardware schedule. | ||
108 | * | ||
109 | * For bookkeeping purposes we maintain QHs even for Isochronous endpoints, | ||
110 | * but they never get added to the hardware schedule. | ||
102 | */ | 111 | */ |
112 | #define QH_STATE_IDLE 1 /* QH is not being used */ | ||
113 | #define QH_STATE_UNLINKING 2 /* QH has been removed from the | ||
114 | * schedule but the hardware may | ||
115 | * still be using it */ | ||
116 | #define QH_STATE_ACTIVE 3 /* QH is on the schedule */ | ||
117 | |||
103 | struct uhci_qh { | 118 | struct uhci_qh { |
104 | /* Hardware fields */ | 119 | /* Hardware fields */ |
105 | __le32 link; /* Next queue */ | 120 | __le32 link; /* Next QH in the schedule */ |
106 | __le32 element; /* Queue element pointer */ | 121 | __le32 element; /* Queue element (TD) pointer */ |
107 | 122 | ||
108 | /* Software fields */ | 123 | /* Software fields */ |
109 | dma_addr_t dma_handle; | 124 | dma_addr_t dma_handle; |
110 | 125 | ||
111 | struct urb_priv *urbp; | 126 | struct list_head node; /* Node in the list of QHs */ |
127 | struct usb_host_endpoint *hep; /* Endpoint information */ | ||
128 | struct usb_device *udev; | ||
129 | struct list_head queue; /* Queue of urbps for this QH */ | ||
130 | struct uhci_qh *skel; /* Skeleton for this QH */ | ||
112 | 131 | ||
113 | struct list_head list; | 132 | unsigned int unlink_frame; /* When the QH was unlinked */ |
114 | struct list_head remove_list; | 133 | int state; /* QH_STATE_xxx; see above */ |
115 | } __attribute__((aligned(16))); | 134 | } __attribute__((aligned(16))); |
116 | 135 | ||
117 | /* | 136 | /* |
118 | * We need a special accessor for the element pointer because it is | 137 | * We need a special accessor for the element pointer because it is |
119 | * subject to asynchronous updates by the controller. | 138 | * subject to asynchronous updates by the controller. |
120 | */ | 139 | */ |
121 | static __le32 inline qh_element(struct uhci_qh *qh) { | 140 | static inline __le32 qh_element(struct uhci_qh *qh) { |
122 | __le32 element = qh->element; | 141 | __le32 element = qh->element; |
123 | 142 | ||
124 | barrier(); | 143 | barrier(); |
@@ -149,11 +168,13 @@ static __le32 inline qh_element(struct uhci_qh *qh) { | |||
149 | #define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */ | 168 | #define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */ |
150 | 169 | ||
151 | #define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \ | 170 | #define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \ |
152 | TD_CTRL_BABBLE | TD_CTRL_CRCTIME | TD_CTRL_BITSTUFF) | 171 | TD_CTRL_BABBLE | TD_CTRL_CRCTIME | \ |
172 | TD_CTRL_BITSTUFF) | ||
153 | 173 | ||
154 | #define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT) | 174 | #define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT) |
155 | #define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000) | 175 | #define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000) |
156 | #define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & TD_CTRL_ACTLEN_MASK) /* 1-based */ | 176 | #define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & \ |
177 | TD_CTRL_ACTLEN_MASK) /* 1-based */ | ||
157 | 178 | ||
158 | /* | 179 | /* |
159 | * for TD <info>: (a.k.a. Token) | 180 | * for TD <info>: (a.k.a. Token) |
@@ -163,7 +184,7 @@ static __le32 inline qh_element(struct uhci_qh *qh) { | |||
163 | #define TD_TOKEN_TOGGLE_SHIFT 19 | 184 | #define TD_TOKEN_TOGGLE_SHIFT 19 |
164 | #define TD_TOKEN_TOGGLE (1 << 19) | 185 | #define TD_TOKEN_TOGGLE (1 << 19) |
165 | #define TD_TOKEN_EXPLEN_SHIFT 21 | 186 | #define TD_TOKEN_EXPLEN_SHIFT 21 |
166 | #define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n - 1 */ | 187 | #define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n-1 */ |
167 | #define TD_TOKEN_PID_MASK 0xFF | 188 | #define TD_TOKEN_PID_MASK 0xFF |
168 | 189 | ||
169 | #define uhci_explen(len) ((((len) - 1) & TD_TOKEN_EXPLEN_MASK) << \ | 190 | #define uhci_explen(len) ((((len) - 1) & TD_TOKEN_EXPLEN_MASK) << \ |
@@ -187,7 +208,7 @@ static __le32 inline qh_element(struct uhci_qh *qh) { | |||
187 | * sw space after the TD entry. | 208 | * sw space after the TD entry. |
188 | * | 209 | * |
189 | * td->link points to either another TD (not necessarily for the same urb or | 210 | * td->link points to either another TD (not necessarily for the same urb or |
190 | * even the same endpoint), or nothing (PTR_TERM), or a QH (for queued urbs). | 211 | * even the same endpoint), or nothing (PTR_TERM), or a QH. |
191 | */ | 212 | */ |
192 | struct uhci_td { | 213 | struct uhci_td { |
193 | /* Hardware fields */ | 214 | /* Hardware fields */ |
@@ -210,7 +231,7 @@ struct uhci_td { | |||
210 | * We need a special accessor for the control/status word because it is | 231 | * We need a special accessor for the control/status word because it is |
211 | * subject to asynchronous updates by the controller. | 232 | * subject to asynchronous updates by the controller. |
212 | */ | 233 | */ |
213 | static u32 inline td_status(struct uhci_td *td) { | 234 | static inline u32 td_status(struct uhci_td *td) { |
214 | __le32 status = td->status; | 235 | __le32 status = td->status; |
215 | 236 | ||
216 | barrier(); | 237 | barrier(); |
@@ -223,17 +244,14 @@ static u32 inline td_status(struct uhci_td *td) { | |||
223 | */ | 244 | */ |
224 | 245 | ||
225 | /* | 246 | /* |
226 | * The UHCI driver places Interrupt, Control and Bulk into QHs both | 247 | * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for |
227 | * to group together TDs for one transfer, and also to facilitate queuing | 248 | * automatic queuing. To make it easy to insert entries into the schedule, |
228 | * of URBs. To make it easy to insert entries into the schedule, we have | 249 | * we have a skeleton of QHs for each predefined Interrupt latency, |
229 | * a skeleton of QHs for each predefined Interrupt latency, low-speed | 250 | * low-speed control, full-speed control, bulk, and terminating QH |
230 | * control, full-speed control and terminating QH (see explanation for | 251 | * (see explanation for the terminating QH below). |
231 | * the terminating QH below). | ||
232 | * | 252 | * |
233 | * When we want to add a new QH, we add it to the end of the list for the | 253 | * When we want to add a new QH, we add it to the end of the list for the |
234 | * skeleton QH. | 254 | * skeleton QH. For instance, the schedule list can look like this: |
235 | * | ||
236 | * For instance, the queue can look like this: | ||
237 | * | 255 | * |
238 | * skel int128 QH | 256 | * skel int128 QH |
239 | * dev 1 interrupt QH | 257 | * dev 1 interrupt QH |
@@ -256,26 +274,31 @@ static u32 inline td_status(struct uhci_td *td) { | |||
256 | * - To loop back to the full-speed control queue for full-speed bandwidth | 274 | * - To loop back to the full-speed control queue for full-speed bandwidth |
257 | * reclamation. | 275 | * reclamation. |
258 | * | 276 | * |
259 | * Isochronous transfers are stored before the start of the skeleton | 277 | * There's a special skeleton QH for Isochronous QHs. It never appears |
260 | * schedule and don't use QHs. While the UHCI spec doesn't forbid the | 278 | * on the schedule, and Isochronous TDs go on the schedule before the |
261 | * use of QHs for Isochronous, it doesn't use them either. And the spec | 279 | * the skeleton QHs. The hardware accesses them directly rather than |
262 | * says that queues never advance on an error completion status, which | 280 | * through their QH, which is used only for bookkeeping purposes. |
263 | * makes them totally unsuitable for Isochronous transfers. | 281 | * While the UHCI spec doesn't forbid the use of QHs for Isochronous, |
282 | * it doesn't use them either. And the spec says that queues never | ||
283 | * advance on an error completion status, which makes them totally | ||
284 | * unsuitable for Isochronous transfers. | ||
264 | */ | 285 | */ |
265 | 286 | ||
266 | #define UHCI_NUM_SKELQH 12 | 287 | #define UHCI_NUM_SKELQH 14 |
267 | #define skel_int128_qh skelqh[0] | 288 | #define skel_unlink_qh skelqh[0] |
268 | #define skel_int64_qh skelqh[1] | 289 | #define skel_iso_qh skelqh[1] |
269 | #define skel_int32_qh skelqh[2] | 290 | #define skel_int128_qh skelqh[2] |
270 | #define skel_int16_qh skelqh[3] | 291 | #define skel_int64_qh skelqh[3] |
271 | #define skel_int8_qh skelqh[4] | 292 | #define skel_int32_qh skelqh[4] |
272 | #define skel_int4_qh skelqh[5] | 293 | #define skel_int16_qh skelqh[5] |
273 | #define skel_int2_qh skelqh[6] | 294 | #define skel_int8_qh skelqh[6] |
274 | #define skel_int1_qh skelqh[7] | 295 | #define skel_int4_qh skelqh[7] |
275 | #define skel_ls_control_qh skelqh[8] | 296 | #define skel_int2_qh skelqh[8] |
276 | #define skel_fs_control_qh skelqh[9] | 297 | #define skel_int1_qh skelqh[9] |
277 | #define skel_bulk_qh skelqh[10] | 298 | #define skel_ls_control_qh skelqh[10] |
278 | #define skel_term_qh skelqh[11] | 299 | #define skel_fs_control_qh skelqh[11] |
300 | #define skel_bulk_qh skelqh[12] | ||
301 | #define skel_term_qh skelqh[13] | ||
279 | 302 | ||
280 | /* | 303 | /* |
281 | * Search tree for determining where <interval> fits in the skelqh[] | 304 | * Search tree for determining where <interval> fits in the skelqh[] |
@@ -293,21 +316,21 @@ static inline int __interval_to_skel(int interval) | |||
293 | if (interval < 16) { | 316 | if (interval < 16) { |
294 | if (interval < 4) { | 317 | if (interval < 4) { |
295 | if (interval < 2) | 318 | if (interval < 2) |
296 | return 7; /* int1 for 0-1 ms */ | 319 | return 9; /* int1 for 0-1 ms */ |
297 | return 6; /* int2 for 2-3 ms */ | 320 | return 8; /* int2 for 2-3 ms */ |
298 | } | 321 | } |
299 | if (interval < 8) | 322 | if (interval < 8) |
300 | return 5; /* int4 for 4-7 ms */ | 323 | return 7; /* int4 for 4-7 ms */ |
301 | return 4; /* int8 for 8-15 ms */ | 324 | return 6; /* int8 for 8-15 ms */ |
302 | } | 325 | } |
303 | if (interval < 64) { | 326 | if (interval < 64) { |
304 | if (interval < 32) | 327 | if (interval < 32) |
305 | return 3; /* int16 for 16-31 ms */ | 328 | return 5; /* int16 for 16-31 ms */ |
306 | return 2; /* int32 for 32-63 ms */ | 329 | return 4; /* int32 for 32-63 ms */ |
307 | } | 330 | } |
308 | if (interval < 128) | 331 | if (interval < 128) |
309 | return 1; /* int64 for 64-127 ms */ | 332 | return 3; /* int64 for 64-127 ms */ |
310 | return 0; /* int128 for 128-255 ms (Max.) */ | 333 | return 2; /* int128 for 128-255 ms (Max.) */ |
311 | } | 334 | } |
312 | 335 | ||
313 | 336 | ||
@@ -363,12 +386,12 @@ struct uhci_hcd { | |||
363 | 386 | ||
364 | spinlock_t lock; | 387 | spinlock_t lock; |
365 | 388 | ||
366 | dma_addr_t frame_dma_handle; /* Hardware frame list */ | 389 | dma_addr_t frame_dma_handle; /* Hardware frame list */ |
367 | __le32 *frame; | 390 | __le32 *frame; |
368 | void **frame_cpu; /* CPU's frame list */ | 391 | void **frame_cpu; /* CPU's frame list */ |
369 | 392 | ||
370 | int fsbr; /* Full-speed bandwidth reclamation */ | 393 | int fsbr; /* Full-speed bandwidth reclamation */ |
371 | unsigned long fsbrtimeout; /* FSBR delay */ | 394 | unsigned long fsbrtimeout; /* FSBR delay */ |
372 | 395 | ||
373 | enum uhci_rh_state rh_state; | 396 | enum uhci_rh_state rh_state; |
374 | unsigned long auto_stop_time; /* When to AUTO_STOP */ | 397 | unsigned long auto_stop_time; /* When to AUTO_STOP */ |
@@ -392,24 +415,19 @@ struct uhci_hcd { | |||
392 | /* Main list of URBs currently controlled by this HC */ | 415 | /* Main list of URBs currently controlled by this HC */ |
393 | struct list_head urb_list; | 416 | struct list_head urb_list; |
394 | 417 | ||
395 | /* List of QHs that are done, but waiting to be unlinked (race) */ | ||
396 | struct list_head qh_remove_list; | ||
397 | unsigned int qh_remove_age; /* Age in frames */ | ||
398 | |||
399 | /* List of TDs that are done, but waiting to be freed (race) */ | 418 | /* List of TDs that are done, but waiting to be freed (race) */ |
400 | struct list_head td_remove_list; | 419 | struct list_head td_remove_list; |
401 | unsigned int td_remove_age; /* Age in frames */ | 420 | unsigned int td_remove_age; /* Age in frames */ |
402 | 421 | ||
403 | /* List of asynchronously unlinked URBs */ | ||
404 | struct list_head urb_remove_list; | ||
405 | unsigned int urb_remove_age; /* Age in frames */ | ||
406 | |||
407 | /* List of URBs awaiting completion callback */ | 422 | /* List of URBs awaiting completion callback */ |
408 | struct list_head complete_list; | 423 | struct list_head complete_list; |
409 | 424 | ||
425 | struct list_head idle_qh_list; /* Where the idle QHs live */ | ||
426 | |||
410 | int rh_numports; /* Number of root-hub ports */ | 427 | int rh_numports; /* Number of root-hub ports */ |
411 | 428 | ||
412 | wait_queue_head_t waitqh; /* endpoint_disable waiters */ | 429 | wait_queue_head_t waitqh; /* endpoint_disable waiters */ |
430 | int num_waiting; /* Number of waiters */ | ||
413 | }; | 431 | }; |
414 | 432 | ||
415 | /* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ | 433 | /* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ |
@@ -430,22 +448,19 @@ static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci) | |||
430 | */ | 448 | */ |
431 | struct urb_priv { | 449 | struct urb_priv { |
432 | struct list_head urb_list; | 450 | struct list_head urb_list; |
451 | struct list_head node; /* Node in the QH's urbp list */ | ||
433 | 452 | ||
434 | struct urb *urb; | 453 | struct urb *urb; |
435 | 454 | ||
436 | struct uhci_qh *qh; /* QH for this URB */ | 455 | struct uhci_qh *qh; /* QH for this URB */ |
437 | struct list_head td_list; | 456 | struct list_head td_list; |
438 | 457 | ||
439 | unsigned fsbr : 1; /* URB turned on FSBR */ | ||
440 | unsigned fsbr_timeout : 1; /* URB timed out on FSBR */ | ||
441 | unsigned queued : 1; /* QH was queued (not linked in) */ | ||
442 | unsigned short_control_packet : 1; /* If we get a short packet during */ | ||
443 | /* a control transfer, retrigger */ | ||
444 | /* the status phase */ | ||
445 | |||
446 | unsigned long fsbrtime; /* In jiffies */ | 458 | unsigned long fsbrtime; /* In jiffies */ |
447 | 459 | ||
448 | struct list_head queue_list; | 460 | unsigned fsbr : 1; /* URB turned on FSBR */ |
461 | unsigned fsbr_timeout : 1; /* URB timed out on FSBR */ | ||
462 | unsigned short_transfer : 1; /* URB got a short transfer, no | ||
463 | * need to rescan */ | ||
449 | }; | 464 | }; |
450 | 465 | ||
451 | 466 | ||