aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-27 22:53:56 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:48 -0400
commit7f84eef0dafb1d318263d8b71c38700aaf2d530d (patch)
treed7de1ac3d91fb206a5cec2e85b0ad7f4a7b78b21 /drivers/usb/host/xhci-ring.c
parenta74588f94655263b96dacbbf14aac0958d8b7409 (diff)
USB: xhci: No-op command queueing and irq handler.
xHCI host controllers can optionally implement a no-op test. This simple test ensures the OS has correctly setup all basic data structures and can correctly respond to interrupts from the host controller hardware. There are two rings exercised by the no-op test: the command ring, and the event ring. The host controller driver writes a no-op command TRB to the command ring, and rings the doorbell for the command ring (the first entry in the doorbell array). The hardware receives this event, places a command completion event on the event ring, and fires an interrupt. The host controller driver sees the interrupt, and checks the event ring for TRBs it can process, and sees the command completion event. (See the rules in xhci-ring.c for who "owns" a TRB. This is a simplified set of rules, and may not contain all the details that are in the xHCI 0.95 spec.) A timer fires every 60 seconds to debug the state of the hardware and command and event rings. This timer only runs if CONFIG_USB_XHCI_HCD_DEBUGGING is 'y'. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c367
1 files changed, 367 insertions, 0 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 000000000000..c7e3c7142b9d
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,367 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
67#include "xhci.h"
68
69/*
70 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
71 * address of the TRB.
72 */
73dma_addr_t trb_virt_to_dma(struct xhci_segment *seg,
74 union xhci_trb *trb)
75{
76 unsigned int offset;
77
78 if (!seg || !trb || (void *) trb < (void *) seg->trbs)
79 return 0;
80 /* offset in bytes, since these are byte-addressable */
81 offset = (unsigned int) trb - (unsigned int) seg->trbs;
82 /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */
83 if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0)
84 return 0;
85 return seg->dma + offset;
86}
87
88/* Does this link TRB point to the first segment in a ring,
89 * or was the previous TRB the last TRB on the last segment in the ERST?
90 */
91static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
92 struct xhci_segment *seg, union xhci_trb *trb)
93{
94 if (ring == xhci->event_ring)
95 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
96 (seg->next == xhci->event_ring->first_seg);
97 else
98 return trb->link.control & LINK_TOGGLE;
99}
100
101/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
102 * segment? I.e. would the updated event TRB pointer step off the end of the
103 * event seg?
104 */
105static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
106 struct xhci_segment *seg, union xhci_trb *trb)
107{
108 if (ring == xhci->event_ring)
109 return trb == &seg->trbs[TRBS_PER_SEGMENT];
110 else
111 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
112}
113
114/*
115 * See Cycle bit rules. SW is the consumer for the event ring only.
116 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
117 */
118static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
119{
120 union xhci_trb *next = ++(ring->dequeue);
121
122 ring->deq_updates++;
123 /* Update the dequeue pointer further if that was a link TRB or we're at
124 * the end of an event ring segment (which doesn't have link TRBS)
125 */
126 while (last_trb(xhci, ring, ring->deq_seg, next)) {
127 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
128 ring->cycle_state = (ring->cycle_state ? 0 : 1);
129 if (!in_interrupt())
130 xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
131 (unsigned int) ring,
132 (unsigned int) ring->cycle_state);
133 }
134 ring->deq_seg = ring->deq_seg->next;
135 ring->dequeue = ring->deq_seg->trbs;
136 next = ring->dequeue;
137 }
138}
139
140/*
141 * See Cycle bit rules. SW is the consumer for the event ring only.
142 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
143 *
144 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
145 * chain bit is set), then set the chain bit in all the following link TRBs.
146 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
147 * have their chain bit cleared (so that each Link TRB is a separate TD).
148 *
149 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
150 * set, but other sections talk about dealing with the chain bit set.
151 * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
152 */
153static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
154{
155 u32 chain;
156 union xhci_trb *next;
157
158 chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
159 next = ++(ring->enqueue);
160
161 ring->enq_updates++;
162 /* Update the dequeue pointer further if that was a link TRB or we're at
163 * the end of an event ring segment (which doesn't have link TRBS)
164 */
165 while (last_trb(xhci, ring, ring->enq_seg, next)) {
166 if (!consumer) {
167 if (ring != xhci->event_ring) {
168 /* Give this link TRB to the hardware */
169 if (next->link.control & TRB_CYCLE)
170 next->link.control &= (u32) ~TRB_CYCLE;
171 else
172 next->link.control |= (u32) TRB_CYCLE;
173 next->link.control &= TRB_CHAIN;
174 next->link.control |= chain;
175 }
176 /* Toggle the cycle bit after the last ring segment. */
177 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
178 ring->cycle_state = (ring->cycle_state ? 0 : 1);
179 if (!in_interrupt())
180 xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
181 (unsigned int) ring,
182 (unsigned int) ring->cycle_state);
183 }
184 }
185 ring->enq_seg = ring->enq_seg->next;
186 ring->enqueue = ring->enq_seg->trbs;
187 next = ring->enqueue;
188 }
189}
190
191/*
192 * Check to see if there's room to enqueue num_trbs on the ring. See rules
193 * above.
194 * FIXME: this would be simpler and faster if we just kept track of the number
195 * of free TRBs in a ring.
196 */
197static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
198 unsigned int num_trbs)
199{
200 int i;
201 union xhci_trb *enq = ring->enqueue;
202 struct xhci_segment *enq_seg = ring->enq_seg;
203
204 /* Check if ring is empty */
205 if (enq == ring->dequeue)
206 return 1;
207 /* Make sure there's an extra empty TRB available */
208 for (i = 0; i <= num_trbs; ++i) {
209 if (enq == ring->dequeue)
210 return 0;
211 enq++;
212 while (last_trb(xhci, ring, enq_seg, enq)) {
213 enq_seg = enq_seg->next;
214 enq = enq_seg->trbs;
215 }
216 }
217 return 1;
218}
219
220void set_hc_event_deq(struct xhci_hcd *xhci)
221{
222 u32 temp;
223 dma_addr_t deq;
224
225 deq = trb_virt_to_dma(xhci->event_ring->deq_seg,
226 xhci->event_ring->dequeue);
227 if (deq == 0 && !in_interrupt())
228 xhci_warn(xhci, "WARN something wrong with SW event ring "
229 "dequeue ptr.\n");
230 /* Update HC event ring dequeue pointer */
231 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
232 temp &= ERST_PTR_MASK;
233 if (!in_interrupt())
234 xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
235 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
236 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
237 &xhci->ir_set->erst_dequeue[0]);
238}
239
240/* Ring the host controller doorbell after placing a command on the ring */
241void ring_cmd_db(struct xhci_hcd *xhci)
242{
243 u32 temp;
244
245 xhci_dbg(xhci, "// Ding dong!\n");
246 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
247 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
248 /* Flush PCI posted writes */
249 xhci_readl(xhci, &xhci->dba->doorbell[0]);
250}
251
252static void handle_cmd_completion(struct xhci_hcd *xhci,
253 struct xhci_event_cmd *event)
254{
255 u64 cmd_dma;
256 dma_addr_t cmd_dequeue_dma;
257
258 /* Check completion code */
259 if (GET_COMP_CODE(event->status) != COMP_SUCCESS)
260 xhci_dbg(xhci, "WARN: unsuccessful no-op command\n");
261
262 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
263 cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg,
264 xhci->cmd_ring->dequeue);
265 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
266 if (cmd_dequeue_dma == 0) {
267 xhci->error_bitmask |= 1 << 4;
268 return;
269 }
270 /* Does the DMA address match our internal dequeue pointer address? */
271 if (cmd_dma != (u64) cmd_dequeue_dma) {
272 xhci->error_bitmask |= 1 << 5;
273 return;
274 }
275 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
276 case TRB_TYPE(TRB_CMD_NOOP):
277 ++xhci->noops_handled;
278 break;
279 default:
280 /* Skip over unknown commands on the event ring */
281 xhci->error_bitmask |= 1 << 6;
282 break;
283 }
284 inc_deq(xhci, xhci->cmd_ring, false);
285}
286
287void handle_event(struct xhci_hcd *xhci)
288{
289 union xhci_trb *event;
290
291 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
292 xhci->error_bitmask |= 1 << 1;
293 return;
294 }
295
296 event = xhci->event_ring->dequeue;
297 /* Does the HC or OS own the TRB? */
298 if ((event->event_cmd.flags & TRB_CYCLE) !=
299 xhci->event_ring->cycle_state) {
300 xhci->error_bitmask |= 1 << 2;
301 return;
302 }
303
304 /* FIXME: Only handles command completion events. */
305 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
306 case TRB_TYPE(TRB_COMPLETION):
307 handle_cmd_completion(xhci, &event->event_cmd);
308 break;
309 default:
310 xhci->error_bitmask |= 1 << 3;
311 }
312
313 /* Update SW and HC event ring dequeue pointer */
314 inc_deq(xhci, xhci->event_ring, true);
315 set_hc_event_deq(xhci);
316 /* Are there more items on the event ring? */
317 handle_event(xhci);
318}
319
320/*
321 * Generic function for queueing a TRB on a ring.
322 * The caller must have checked to make sure there's room on the ring.
323 */
324static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
325 bool consumer,
326 u32 field1, u32 field2, u32 field3, u32 field4)
327{
328 struct xhci_generic_trb *trb;
329
330 trb = &ring->enqueue->generic;
331 trb->field[0] = field1;
332 trb->field[1] = field2;
333 trb->field[2] = field3;
334 trb->field[3] = field4;
335 inc_enq(xhci, ring, consumer);
336}
337
338/* Generic function for queueing a command TRB on the command ring */
339static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
340{
341 if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
342 if (!in_interrupt())
343 xhci_err(xhci, "ERR: No room for command on command ring\n");
344 return -ENOMEM;
345 }
346 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
347 field4 | xhci->cmd_ring->cycle_state);
348 return 0;
349}
350
351/* Queue a no-op command on the command ring */
352static int queue_cmd_noop(struct xhci_hcd *xhci)
353{
354 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
355}
356
357/*
358 * Place a no-op command on the command ring to test the command and
359 * event ring.
360 */
361void *setup_one_noop(struct xhci_hcd *xhci)
362{
363 if (queue_cmd_noop(xhci) < 0)
364 return NULL;
365 xhci->noops_submitted++;
366 return ring_cmd_db;
367}