diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-08-06 12:48:31 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-08-06 12:48:31 -0400 |
commit | c87985a3ce723995fc7b25e598238d67154108a1 (patch) | |
tree | e60def1b77c25c1d74180f62e8a5603f9826f209 /drivers/usb/host/ehci.h | |
parent | d155255a344c417acad74156654295a2964e6b81 (diff) | |
parent | 0d7614f09c1ebdbaa1599a5aba7593f147bf96ee (diff) |
Merge tty-next into 3.6-rc1
This handles the merge issue in:
arch/um/drivers/line.c
arch/um/drivers/line.h
And resolves the duplicate patches that were in both trees do to the
tty-next branch not getting merged into 3.6-rc1.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host/ehci.h')
-rw-r--r-- | drivers/usb/host/ehci.h | 138 |
1 files changed, 75 insertions, 63 deletions
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 2694ed6558d2..da07d98f7d1d 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
@@ -42,7 +42,7 @@ struct ehci_stats { | |||
42 | /* irq usage */ | 42 | /* irq usage */ |
43 | unsigned long normal; | 43 | unsigned long normal; |
44 | unsigned long error; | 44 | unsigned long error; |
45 | unsigned long reclaim; | 45 | unsigned long iaa; |
46 | unsigned long lost_iaa; | 46 | unsigned long lost_iaa; |
47 | 47 | ||
48 | /* termination of urbs from core */ | 48 | /* termination of urbs from core */ |
@@ -51,7 +51,7 @@ struct ehci_stats { | |||
51 | }; | 51 | }; |
52 | 52 | ||
53 | /* ehci_hcd->lock guards shared data against other CPUs: | 53 | /* ehci_hcd->lock guards shared data against other CPUs: |
54 | * ehci_hcd: async, reclaim, periodic (and shadow), ... | 54 | * ehci_hcd: async, unlink, periodic (and shadow), ... |
55 | * usb_host_endpoint: hcpriv | 55 | * usb_host_endpoint: hcpriv |
56 | * ehci_qh: qh_next, qtd_list | 56 | * ehci_qh: qh_next, qtd_list |
57 | * ehci_qtd: qtd_list | 57 | * ehci_qtd: qtd_list |
@@ -62,13 +62,48 @@ struct ehci_stats { | |||
62 | 62 | ||
63 | #define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */ | 63 | #define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */ |
64 | 64 | ||
65 | /* | ||
66 | * ehci_rh_state values of EHCI_RH_RUNNING or above mean that the | ||
67 | * controller may be doing DMA. Lower values mean there's no DMA. | ||
68 | */ | ||
65 | enum ehci_rh_state { | 69 | enum ehci_rh_state { |
66 | EHCI_RH_HALTED, | 70 | EHCI_RH_HALTED, |
67 | EHCI_RH_SUSPENDED, | 71 | EHCI_RH_SUSPENDED, |
68 | EHCI_RH_RUNNING | 72 | EHCI_RH_RUNNING, |
73 | EHCI_RH_STOPPING | ||
69 | }; | 74 | }; |
70 | 75 | ||
76 | /* | ||
77 | * Timer events, ordered by increasing delay length. | ||
78 | * Always update event_delays_ns[] and event_handlers[] (defined in | ||
79 | * ehci-timer.c) in parallel with this list. | ||
80 | */ | ||
81 | enum ehci_hrtimer_event { | ||
82 | EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */ | ||
83 | EHCI_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */ | ||
84 | EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */ | ||
85 | EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */ | ||
86 | EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */ | ||
87 | EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */ | ||
88 | EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */ | ||
89 | EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */ | ||
90 | EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */ | ||
91 | EHCI_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */ | ||
92 | EHCI_HRTIMER_NUM_EVENTS /* Must come last */ | ||
93 | }; | ||
94 | #define EHCI_HRTIMER_NO_EVENT 99 | ||
95 | |||
71 | struct ehci_hcd { /* one per controller */ | 96 | struct ehci_hcd { /* one per controller */ |
97 | /* timing support */ | ||
98 | enum ehci_hrtimer_event next_hrtimer_event; | ||
99 | unsigned enabled_hrtimer_events; | ||
100 | ktime_t hr_timeouts[EHCI_HRTIMER_NUM_EVENTS]; | ||
101 | struct hrtimer hrtimer; | ||
102 | |||
103 | int PSS_poll_count; | ||
104 | int ASS_poll_count; | ||
105 | int died_poll_count; | ||
106 | |||
72 | /* glue to PCI and HCD framework */ | 107 | /* glue to PCI and HCD framework */ |
73 | struct ehci_caps __iomem *caps; | 108 | struct ehci_caps __iomem *caps; |
74 | struct ehci_regs __iomem *regs; | 109 | struct ehci_regs __iomem *regs; |
@@ -78,30 +113,48 @@ struct ehci_hcd { /* one per controller */ | |||
78 | spinlock_t lock; | 113 | spinlock_t lock; |
79 | enum ehci_rh_state rh_state; | 114 | enum ehci_rh_state rh_state; |
80 | 115 | ||
116 | /* general schedule support */ | ||
117 | bool scanning:1; | ||
118 | bool need_rescan:1; | ||
119 | bool intr_unlinking:1; | ||
120 | bool async_unlinking:1; | ||
121 | bool shutdown:1; | ||
122 | struct ehci_qh *qh_scan_next; | ||
123 | |||
81 | /* async schedule support */ | 124 | /* async schedule support */ |
82 | struct ehci_qh *async; | 125 | struct ehci_qh *async; |
83 | struct ehci_qh *dummy; /* For AMD quirk use */ | 126 | struct ehci_qh *dummy; /* For AMD quirk use */ |
84 | struct ehci_qh *reclaim; | 127 | struct ehci_qh *async_unlink; |
85 | struct ehci_qh *qh_scan_next; | 128 | struct ehci_qh *async_unlink_last; |
86 | unsigned scanning : 1; | 129 | struct ehci_qh *async_iaa; |
130 | unsigned async_unlink_cycle; | ||
131 | unsigned async_count; /* async activity count */ | ||
87 | 132 | ||
88 | /* periodic schedule support */ | 133 | /* periodic schedule support */ |
89 | #define DEFAULT_I_TDPS 1024 /* some HCs can do less */ | 134 | #define DEFAULT_I_TDPS 1024 /* some HCs can do less */ |
90 | unsigned periodic_size; | 135 | unsigned periodic_size; |
91 | __hc32 *periodic; /* hw periodic table */ | 136 | __hc32 *periodic; /* hw periodic table */ |
92 | dma_addr_t periodic_dma; | 137 | dma_addr_t periodic_dma; |
138 | struct list_head intr_qh_list; | ||
93 | unsigned i_thresh; /* uframes HC might cache */ | 139 | unsigned i_thresh; /* uframes HC might cache */ |
94 | 140 | ||
95 | union ehci_shadow *pshadow; /* mirror hw periodic table */ | 141 | union ehci_shadow *pshadow; /* mirror hw periodic table */ |
96 | int next_uframe; /* scan periodic, start here */ | 142 | struct ehci_qh *intr_unlink; |
97 | unsigned periodic_sched; /* periodic activity count */ | 143 | struct ehci_qh *intr_unlink_last; |
144 | unsigned intr_unlink_cycle; | ||
145 | unsigned now_frame; /* frame from HC hardware */ | ||
146 | unsigned next_frame; /* scan periodic, start here */ | ||
147 | unsigned intr_count; /* intr activity count */ | ||
148 | unsigned isoc_count; /* isoc activity count */ | ||
149 | unsigned periodic_count; /* periodic activity count */ | ||
98 | unsigned uframe_periodic_max; /* max periodic time per uframe */ | 150 | unsigned uframe_periodic_max; /* max periodic time per uframe */ |
99 | 151 | ||
100 | 152 | ||
101 | /* list of itds & sitds completed while clock_frame was still active */ | 153 | /* list of itds & sitds completed while now_frame was still active */ |
102 | struct list_head cached_itd_list; | 154 | struct list_head cached_itd_list; |
155 | struct ehci_itd *last_itd_to_free; | ||
103 | struct list_head cached_sitd_list; | 156 | struct list_head cached_sitd_list; |
104 | unsigned clock_frame; | 157 | struct ehci_sitd *last_sitd_to_free; |
105 | 158 | ||
106 | /* per root hub port */ | 159 | /* per root hub port */ |
107 | unsigned long reset_done [EHCI_MAX_ROOT_PORTS]; | 160 | unsigned long reset_done [EHCI_MAX_ROOT_PORTS]; |
@@ -126,10 +179,6 @@ struct ehci_hcd { /* one per controller */ | |||
126 | struct dma_pool *itd_pool; /* itd per iso urb */ | 179 | struct dma_pool *itd_pool; /* itd per iso urb */ |
127 | struct dma_pool *sitd_pool; /* sitd per split iso urb */ | 180 | struct dma_pool *sitd_pool; /* sitd per split iso urb */ |
128 | 181 | ||
129 | struct timer_list iaa_watchdog; | ||
130 | struct timer_list watchdog; | ||
131 | unsigned long actions; | ||
132 | unsigned periodic_stamp; | ||
133 | unsigned random_frame; | 182 | unsigned random_frame; |
134 | unsigned long next_statechange; | 183 | unsigned long next_statechange; |
135 | ktime_t last_periodic_enable; | 184 | ktime_t last_periodic_enable; |
@@ -143,7 +192,6 @@ struct ehci_hcd { /* one per controller */ | |||
143 | unsigned big_endian_capbase:1; | 192 | unsigned big_endian_capbase:1; |
144 | unsigned has_amcc_usb23:1; | 193 | unsigned has_amcc_usb23:1; |
145 | unsigned need_io_watchdog:1; | 194 | unsigned need_io_watchdog:1; |
146 | unsigned broken_periodic:1; | ||
147 | unsigned amd_pll_fix:1; | 195 | unsigned amd_pll_fix:1; |
148 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ | 196 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ |
149 | unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ | 197 | unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ |
@@ -175,10 +223,6 @@ struct ehci_hcd { /* one per controller */ | |||
175 | #ifdef DEBUG | 223 | #ifdef DEBUG |
176 | struct dentry *debug_dir; | 224 | struct dentry *debug_dir; |
177 | #endif | 225 | #endif |
178 | /* | ||
179 | * OTG controllers and transceivers need software interaction | ||
180 | */ | ||
181 | struct usb_phy *transceiver; | ||
182 | }; | 226 | }; |
183 | 227 | ||
184 | /* convert between an HCD pointer and the corresponding EHCI_HCD */ | 228 | /* convert between an HCD pointer and the corresponding EHCI_HCD */ |
@@ -191,34 +235,6 @@ static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci) | |||
191 | return container_of ((void *) ehci, struct usb_hcd, hcd_priv); | 235 | return container_of ((void *) ehci, struct usb_hcd, hcd_priv); |
192 | } | 236 | } |
193 | 237 | ||
194 | |||
195 | static inline void | ||
196 | iaa_watchdog_start(struct ehci_hcd *ehci) | ||
197 | { | ||
198 | WARN_ON(timer_pending(&ehci->iaa_watchdog)); | ||
199 | mod_timer(&ehci->iaa_watchdog, | ||
200 | jiffies + msecs_to_jiffies(EHCI_IAA_MSECS)); | ||
201 | } | ||
202 | |||
203 | static inline void iaa_watchdog_done(struct ehci_hcd *ehci) | ||
204 | { | ||
205 | del_timer(&ehci->iaa_watchdog); | ||
206 | } | ||
207 | |||
208 | enum ehci_timer_action { | ||
209 | TIMER_IO_WATCHDOG, | ||
210 | TIMER_ASYNC_SHRINK, | ||
211 | TIMER_ASYNC_OFF, | ||
212 | }; | ||
213 | |||
214 | static inline void | ||
215 | timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action) | ||
216 | { | ||
217 | clear_bit (action, &ehci->actions); | ||
218 | } | ||
219 | |||
220 | static void free_cached_lists(struct ehci_hcd *ehci); | ||
221 | |||
222 | /*-------------------------------------------------------------------------*/ | 238 | /*-------------------------------------------------------------------------*/ |
223 | 239 | ||
224 | #include <linux/usb/ehci_def.h> | 240 | #include <linux/usb/ehci_def.h> |
@@ -328,7 +344,13 @@ union ehci_shadow { | |||
328 | struct ehci_qh_hw { | 344 | struct ehci_qh_hw { |
329 | __hc32 hw_next; /* see EHCI 3.6.1 */ | 345 | __hc32 hw_next; /* see EHCI 3.6.1 */ |
330 | __hc32 hw_info1; /* see EHCI 3.6.2 */ | 346 | __hc32 hw_info1; /* see EHCI 3.6.2 */ |
331 | #define QH_HEAD 0x00008000 | 347 | #define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */ |
348 | #define QH_HEAD (1 << 15) /* Head of async reclamation list */ | ||
349 | #define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */ | ||
350 | #define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */ | ||
351 | #define QH_LOW_SPEED (1 << 12) | ||
352 | #define QH_FULL_SPEED (0 << 12) | ||
353 | #define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */ | ||
332 | __hc32 hw_info2; /* see EHCI 3.6.2 */ | 354 | __hc32 hw_info2; /* see EHCI 3.6.2 */ |
333 | #define QH_SMASK 0x000000ff | 355 | #define QH_SMASK 0x000000ff |
334 | #define QH_CMASK 0x0000ff00 | 356 | #define QH_CMASK 0x0000ff00 |
@@ -346,32 +368,23 @@ struct ehci_qh_hw { | |||
346 | } __attribute__ ((aligned(32))); | 368 | } __attribute__ ((aligned(32))); |
347 | 369 | ||
348 | struct ehci_qh { | 370 | struct ehci_qh { |
349 | struct ehci_qh_hw *hw; | 371 | struct ehci_qh_hw *hw; /* Must come first */ |
350 | /* the rest is HCD-private */ | 372 | /* the rest is HCD-private */ |
351 | dma_addr_t qh_dma; /* address of qh */ | 373 | dma_addr_t qh_dma; /* address of qh */ |
352 | union ehci_shadow qh_next; /* ptr to qh; or periodic */ | 374 | union ehci_shadow qh_next; /* ptr to qh; or periodic */ |
353 | struct list_head qtd_list; /* sw qtd list */ | 375 | struct list_head qtd_list; /* sw qtd list */ |
376 | struct list_head intr_node; /* list of intr QHs */ | ||
354 | struct ehci_qtd *dummy; | 377 | struct ehci_qtd *dummy; |
355 | struct ehci_qh *reclaim; /* next to reclaim */ | 378 | struct ehci_qh *unlink_next; /* next on unlink list */ |
356 | |||
357 | struct ehci_hcd *ehci; | ||
358 | unsigned long unlink_time; | ||
359 | 379 | ||
360 | /* | 380 | unsigned unlink_cycle; |
361 | * Do NOT use atomic operations for QH refcounting. On some CPUs | ||
362 | * (PPC7448 for example), atomic operations cannot be performed on | ||
363 | * memory that is cache-inhibited (i.e. being used for DMA). | ||
364 | * Spinlocks are used to protect all QH fields. | ||
365 | */ | ||
366 | u32 refcount; | ||
367 | unsigned stamp; | ||
368 | 381 | ||
369 | u8 needs_rescan; /* Dequeue during giveback */ | 382 | u8 needs_rescan; /* Dequeue during giveback */ |
370 | u8 qh_state; | 383 | u8 qh_state; |
371 | #define QH_STATE_LINKED 1 /* HC sees this */ | 384 | #define QH_STATE_LINKED 1 /* HC sees this */ |
372 | #define QH_STATE_UNLINK 2 /* HC may still see this */ | 385 | #define QH_STATE_UNLINK 2 /* HC may still see this */ |
373 | #define QH_STATE_IDLE 3 /* HC doesn't see this */ | 386 | #define QH_STATE_IDLE 3 /* HC doesn't see this */ |
374 | #define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */ | 387 | #define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */ |
375 | #define QH_STATE_COMPLETING 5 /* don't touch token.HALT */ | 388 | #define QH_STATE_COMPLETING 5 /* don't touch token.HALT */ |
376 | 389 | ||
377 | u8 xacterrs; /* XactErr retry counter */ | 390 | u8 xacterrs; /* XactErr retry counter */ |
@@ -421,7 +434,6 @@ struct ehci_iso_stream { | |||
421 | /* first field matches ehci_hq, but is NULL */ | 434 | /* first field matches ehci_hq, but is NULL */ |
422 | struct ehci_qh_hw *hw; | 435 | struct ehci_qh_hw *hw; |
423 | 436 | ||
424 | u32 refcount; | ||
425 | u8 bEndpointAddress; | 437 | u8 bEndpointAddress; |
426 | u8 highspeed; | 438 | u8 highspeed; |
427 | struct list_head td_list; /* queued itds/sitds */ | 439 | struct list_head td_list; /* queued itds/sitds */ |