diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-04 02:05:39 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-04 02:05:39 -0400 |
commit | 00111076f725ec498f5ada19cdd984d111b0e9b3 (patch) | |
tree | d7d08779bfc836382537c23298613f5948368bdc /drivers/usb | |
parent | 133b170f08d6c20578f25b1ae71f80a5e638ccb6 (diff) | |
parent | 2e6713c7662cc5ebc7346b033c404cb2f708fd51 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6 into sh/hwblk
Diffstat (limited to 'drivers/usb')
-rw-r--r-- | drivers/usb/core/config.c | 48 | ||||
-rw-r--r-- | drivers/usb/host/ehci-orion.c | 2 | ||||
-rw-r--r-- | drivers/usb/host/ohci-omap.c | 1 | ||||
-rw-r--r-- | drivers/usb/host/xhci-dbg.c | 199 | ||||
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 290 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 300 | ||||
-rw-r--r-- | drivers/usb/host/xhci-pci.c | 1 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 305 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 148 | ||||
-rw-r--r-- | drivers/usb/misc/Kconfig | 2 | ||||
-rw-r--r-- | drivers/usb/musb/musb_core.c | 21 | ||||
-rw-r--r-- | drivers/usb/musb/musb_gadget_ep0.c | 2 | ||||
-rw-r--r-- | drivers/usb/musb/musb_regs.h | 1 | ||||
-rw-r--r-- | drivers/usb/serial/cp210x.c | 3 | ||||
-rw-r--r-- | drivers/usb/serial/ftdi_sio.c | 1 | ||||
-rw-r--r-- | drivers/usb/serial/ftdi_sio.h | 7 | ||||
-rw-r--r-- | drivers/usb/serial/mos7840.c | 9 | ||||
-rw-r--r-- | drivers/usb/serial/option.c | 133 | ||||
-rw-r--r-- | drivers/usb/serial/usb-serial.c | 36 | ||||
-rw-r--r-- | drivers/usb/storage/transport.c | 2 |
20 files changed, 1055 insertions, 456 deletions
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 24dfb33f90cb..a16c538d0132 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
80 | int max_tx; | 80 | int max_tx; |
81 | int i; | 81 | int i; |
82 | 82 | ||
83 | /* Allocate space for the SS endpoint companion descriptor */ | ||
84 | ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), | ||
85 | GFP_KERNEL); | ||
86 | if (!ep->ss_ep_comp) | ||
87 | return -ENOMEM; | ||
88 | desc = (struct usb_ss_ep_comp_descriptor *) buffer; | 83 | desc = (struct usb_ss_ep_comp_descriptor *) buffer; |
89 | if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { | 84 | if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { |
90 | dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " | 85 | dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " |
91 | " interface %d altsetting %d ep %d: " | 86 | " interface %d altsetting %d ep %d: " |
92 | "using minimum values\n", | 87 | "using minimum values\n", |
93 | cfgno, inum, asnum, ep->desc.bEndpointAddress); | 88 | cfgno, inum, asnum, ep->desc.bEndpointAddress); |
94 | ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; | ||
95 | ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; | ||
96 | ep->ss_ep_comp->desc.bMaxBurst = 0; | ||
97 | /* | ||
98 | * Leave bmAttributes as zero, which will mean no streams for | ||
99 | * bulk, and isoc won't support multiple bursts of packets. | ||
100 | * With bursts of only one packet, and a Mult of 1, the max | ||
101 | * amount of data moved per endpoint service interval is one | ||
102 | * packet. | ||
103 | */ | ||
104 | if (usb_endpoint_xfer_isoc(&ep->desc) || | ||
105 | usb_endpoint_xfer_int(&ep->desc)) | ||
106 | ep->ss_ep_comp->desc.wBytesPerInterval = | ||
107 | ep->desc.wMaxPacketSize; | ||
108 | /* | 89 | /* |
109 | * The next descriptor is for an Endpoint or Interface, | 90 | * The next descriptor is for an Endpoint or Interface, |
110 | * no extra descriptors to copy into the companion structure, | 91 | * no extra descriptors to copy into the companion structure, |
111 | * and we didn't eat up any of the buffer. | 92 | * and we didn't eat up any of the buffer. |
112 | */ | 93 | */ |
113 | retval = 0; | 94 | return 0; |
114 | goto valid; | ||
115 | } | 95 | } |
116 | memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); | 96 | memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); |
117 | desc = &ep->ss_ep_comp->desc; | 97 | desc = &ep->ss_ep_comp->desc; |
@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
320 | buffer += i; | 300 | buffer += i; |
321 | size -= i; | 301 | size -= i; |
322 | 302 | ||
303 | /* Allocate space for the SS endpoint companion descriptor */ | ||
304 | endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), | ||
305 | GFP_KERNEL); | ||
306 | if (!endpoint->ss_ep_comp) | ||
307 | return -ENOMEM; | ||
308 | |||
309 | /* Fill in some default values (may be overwritten later) */ | ||
310 | endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; | ||
311 | endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; | ||
312 | endpoint->ss_ep_comp->desc.bMaxBurst = 0; | ||
313 | /* | ||
314 | * Leave bmAttributes as zero, which will mean no streams for | ||
315 | * bulk, and isoc won't support multiple bursts of packets. | ||
316 | * With bursts of only one packet, and a Mult of 1, the max | ||
317 | * amount of data moved per endpoint service interval is one | ||
318 | * packet. | ||
319 | */ | ||
320 | if (usb_endpoint_xfer_isoc(&endpoint->desc) || | ||
321 | usb_endpoint_xfer_int(&endpoint->desc)) | ||
322 | endpoint->ss_ep_comp->desc.wBytesPerInterval = | ||
323 | endpoint->desc.wMaxPacketSize; | ||
324 | |||
323 | if (size > 0) { | 325 | if (size > 0) { |
324 | retval = usb_parse_ss_endpoint_companion(ddev, cfgno, | 326 | retval = usb_parse_ss_endpoint_companion(ddev, cfgno, |
325 | inum, asnum, endpoint, num_ep, buffer, | 327 | inum, asnum, endpoint, num_ep, buffer, |
@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
329 | retval = buffer - buffer0; | 331 | retval = buffer - buffer0; |
330 | } | 332 | } |
331 | } else { | 333 | } else { |
334 | dev_warn(ddev, "config %d interface %d altsetting %d " | ||
335 | "endpoint 0x%X has no " | ||
336 | "SuperSpeed companion descriptor\n", | ||
337 | cfgno, inum, asnum, d->bEndpointAddress); | ||
332 | retval = buffer - buffer0; | 338 | retval = buffer - buffer0; |
333 | } | 339 | } |
334 | } else { | 340 | } else { |
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index dc2ac613a9d1..1d283e1b2b8d 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c | |||
@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd) | |||
105 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 105 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
106 | int retval; | 106 | int retval; |
107 | 107 | ||
108 | ehci_reset(ehci); | ||
108 | retval = ehci_halt(ehci); | 109 | retval = ehci_halt(ehci); |
109 | if (retval) | 110 | if (retval) |
110 | return retval; | 111 | return retval; |
@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd) | |||
118 | 119 | ||
119 | hcd->has_tt = 1; | 120 | hcd->has_tt = 1; |
120 | 121 | ||
121 | ehci_reset(ehci); | ||
122 | ehci_port_power(ehci, 0); | 122 | ehci_port_power(ehci, 0); |
123 | 123 | ||
124 | return retval; | 124 | return retval; |
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index f3aaba35e912..83cbecd2a1ed 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c | |||
@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) | |||
282 | static void ohci_omap_stop(struct usb_hcd *hcd) | 282 | static void ohci_omap_stop(struct usb_hcd *hcd) |
283 | { | 283 | { |
284 | dev_dbg(hcd->self.controller, "stopping USB Controller\n"); | 284 | dev_dbg(hcd->self.controller, "stopping USB Controller\n"); |
285 | ohci_stop(hcd); | ||
285 | omap_ohci_clock_power(0); | 286 | omap_ohci_clock_power(0); |
286 | } | 287 | } |
287 | 288 | ||
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 2501c571f855..705e34324156 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int | |||
173 | { | 173 | { |
174 | void *addr; | 174 | void *addr; |
175 | u32 temp; | 175 | u32 temp; |
176 | u64 temp_64; | ||
176 | 177 | ||
177 | addr = &ir_set->irq_pending; | 178 | addr = &ir_set->irq_pending; |
178 | temp = xhci_readl(xhci, addr); | 179 | temp = xhci_readl(xhci, addr); |
@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int | |||
200 | xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", | 201 | xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", |
201 | addr, (unsigned int)temp); | 202 | addr, (unsigned int)temp); |
202 | 203 | ||
203 | addr = &ir_set->erst_base[0]; | 204 | addr = &ir_set->erst_base; |
204 | temp = xhci_readl(xhci, addr); | 205 | temp_64 = xhci_read_64(xhci, addr); |
205 | xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", | 206 | xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", |
206 | addr, (unsigned int) temp); | 207 | addr, temp_64); |
207 | |||
208 | addr = &ir_set->erst_base[1]; | ||
209 | temp = xhci_readl(xhci, addr); | ||
210 | xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n", | ||
211 | addr, (unsigned int) temp); | ||
212 | 208 | ||
213 | addr = &ir_set->erst_dequeue[0]; | 209 | addr = &ir_set->erst_dequeue; |
214 | temp = xhci_readl(xhci, addr); | 210 | temp_64 = xhci_read_64(xhci, addr); |
215 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", | 211 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", |
216 | addr, (unsigned int) temp); | 212 | addr, temp_64); |
217 | |||
218 | addr = &ir_set->erst_dequeue[1]; | ||
219 | temp = xhci_readl(xhci, addr); | ||
220 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n", | ||
221 | addr, (unsigned int) temp); | ||
222 | } | 213 | } |
223 | 214 | ||
224 | void xhci_print_run_regs(struct xhci_hcd *xhci) | 215 | void xhci_print_run_regs(struct xhci_hcd *xhci) |
@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
268 | xhci_dbg(xhci, "Link TRB:\n"); | 259 | xhci_dbg(xhci, "Link TRB:\n"); |
269 | xhci_print_trb_offsets(xhci, trb); | 260 | xhci_print_trb_offsets(xhci, trb); |
270 | 261 | ||
271 | address = trb->link.segment_ptr[0] + | 262 | address = trb->link.segment_ptr; |
272 | (((u64) trb->link.segment_ptr[1]) << 32); | ||
273 | xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); | 263 | xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); |
274 | 264 | ||
275 | xhci_dbg(xhci, "Interrupter target = 0x%x\n", | 265 | xhci_dbg(xhci, "Interrupter target = 0x%x\n", |
@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
282 | (unsigned int) (trb->link.control & TRB_NO_SNOOP)); | 272 | (unsigned int) (trb->link.control & TRB_NO_SNOOP)); |
283 | break; | 273 | break; |
284 | case TRB_TYPE(TRB_TRANSFER): | 274 | case TRB_TYPE(TRB_TRANSFER): |
285 | address = trb->trans_event.buffer[0] + | 275 | address = trb->trans_event.buffer; |
286 | (((u64) trb->trans_event.buffer[1]) << 32); | ||
287 | /* | 276 | /* |
288 | * FIXME: look at flags to figure out if it's an address or if | 277 | * FIXME: look at flags to figure out if it's an address or if |
289 | * the data is directly in the buffer field. | 278 | * the data is directly in the buffer field. |
@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
291 | xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); | 280 | xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); |
292 | break; | 281 | break; |
293 | case TRB_TYPE(TRB_COMPLETION): | 282 | case TRB_TYPE(TRB_COMPLETION): |
294 | address = trb->event_cmd.cmd_trb[0] + | 283 | address = trb->event_cmd.cmd_trb; |
295 | (((u64) trb->event_cmd.cmd_trb[1]) << 32); | ||
296 | xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); | 284 | xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); |
297 | xhci_dbg(xhci, "Completion status = %u\n", | 285 | xhci_dbg(xhci, "Completion status = %u\n", |
298 | (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); | 286 | (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); |
@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) | |||
328 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { | 316 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { |
329 | trb = &seg->trbs[i]; | 317 | trb = &seg->trbs[i]; |
330 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, | 318 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, |
331 | (unsigned int) trb->link.segment_ptr[0], | 319 | lower_32_bits(trb->link.segment_ptr), |
332 | (unsigned int) trb->link.segment_ptr[1], | 320 | upper_32_bits(trb->link.segment_ptr), |
333 | (unsigned int) trb->link.intr_target, | 321 | (unsigned int) trb->link.intr_target, |
334 | (unsigned int) trb->link.control); | 322 | (unsigned int) trb->link.control); |
335 | addr += sizeof(*trb); | 323 | addr += sizeof(*trb); |
@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | |||
386 | entry = &erst->entries[i]; | 374 | entry = &erst->entries[i]; |
387 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", | 375 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", |
388 | (unsigned int) addr, | 376 | (unsigned int) addr, |
389 | (unsigned int) entry->seg_addr[0], | 377 | lower_32_bits(entry->seg_addr), |
390 | (unsigned int) entry->seg_addr[1], | 378 | upper_32_bits(entry->seg_addr), |
391 | (unsigned int) entry->seg_size, | 379 | (unsigned int) entry->seg_size, |
392 | (unsigned int) entry->rsvd); | 380 | (unsigned int) entry->rsvd); |
393 | addr += sizeof(*entry); | 381 | addr += sizeof(*entry); |
@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | |||
396 | 384 | ||
397 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) | 385 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) |
398 | { | 386 | { |
399 | u32 val; | 387 | u64 val; |
400 | 388 | ||
401 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | 389 | val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
402 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); | 390 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", |
403 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); | 391 | lower_32_bits(val)); |
404 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); | 392 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", |
393 | upper_32_bits(val)); | ||
405 | } | 394 | } |
406 | 395 | ||
407 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) | 396 | /* Print the last 32 bytes for 64-byte contexts */ |
397 | static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) | ||
398 | { | ||
399 | int i; | ||
400 | for (i = 0; i < 4; ++i) { | ||
401 | xhci_dbg(xhci, "@%p (virt) @%08llx " | ||
402 | "(dma) %#08llx - rsvd64[%d]\n", | ||
403 | &ctx[4 + i], (unsigned long long)dma, | ||
404 | ctx[4 + i], i); | ||
405 | dma += 8; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | ||
408 | { | 410 | { |
409 | int i, j; | ||
410 | int last_ep_ctx = 31; | ||
411 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | 411 | /* Fields are 32 bits wide, DMA addresses are in bytes */ |
412 | int field_size = 32 / 8; | 412 | int field_size = 32 / 8; |
413 | int i; | ||
413 | 414 | ||
414 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", | 415 | struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); |
415 | &ctx->drop_flags, (unsigned long long)dma, | 416 | dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx); |
416 | ctx->drop_flags); | 417 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); |
417 | dma += field_size; | ||
418 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", | ||
419 | &ctx->add_flags, (unsigned long long)dma, | ||
420 | ctx->add_flags); | ||
421 | dma += field_size; | ||
422 | for (i = 0; i > 6; ++i) { | ||
423 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | ||
424 | &ctx->rsvd[i], (unsigned long long)dma, | ||
425 | ctx->rsvd[i], i); | ||
426 | dma += field_size; | ||
427 | } | ||
428 | 418 | ||
429 | xhci_dbg(xhci, "Slot Context:\n"); | 419 | xhci_dbg(xhci, "Slot Context:\n"); |
430 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", | 420 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", |
431 | &ctx->slot.dev_info, | 421 | &slot_ctx->dev_info, |
432 | (unsigned long long)dma, ctx->slot.dev_info); | 422 | (unsigned long long)dma, slot_ctx->dev_info); |
433 | dma += field_size; | 423 | dma += field_size; |
434 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", | 424 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", |
435 | &ctx->slot.dev_info2, | 425 | &slot_ctx->dev_info2, |
436 | (unsigned long long)dma, ctx->slot.dev_info2); | 426 | (unsigned long long)dma, slot_ctx->dev_info2); |
437 | dma += field_size; | 427 | dma += field_size; |
438 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", | 428 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", |
439 | &ctx->slot.tt_info, | 429 | &slot_ctx->tt_info, |
440 | (unsigned long long)dma, ctx->slot.tt_info); | 430 | (unsigned long long)dma, slot_ctx->tt_info); |
441 | dma += field_size; | 431 | dma += field_size; |
442 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", | 432 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", |
443 | &ctx->slot.dev_state, | 433 | &slot_ctx->dev_state, |
444 | (unsigned long long)dma, ctx->slot.dev_state); | 434 | (unsigned long long)dma, slot_ctx->dev_state); |
445 | dma += field_size; | 435 | dma += field_size; |
446 | for (i = 0; i > 4; ++i) { | 436 | for (i = 0; i < 4; ++i) { |
447 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | 437 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", |
448 | &ctx->slot.reserved[i], (unsigned long long)dma, | 438 | &slot_ctx->reserved[i], (unsigned long long)dma, |
449 | ctx->slot.reserved[i], i); | 439 | slot_ctx->reserved[i], i); |
450 | dma += field_size; | 440 | dma += field_size; |
451 | } | 441 | } |
452 | 442 | ||
443 | if (csz) | ||
444 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); | ||
445 | } | ||
446 | |||
447 | void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, | ||
448 | struct xhci_container_ctx *ctx, | ||
449 | unsigned int last_ep) | ||
450 | { | ||
451 | int i, j; | ||
452 | int last_ep_ctx = 31; | ||
453 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | ||
454 | int field_size = 32 / 8; | ||
455 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); | ||
456 | |||
453 | if (last_ep < 31) | 457 | if (last_ep < 31) |
454 | last_ep_ctx = last_ep + 1; | 458 | last_ep_ctx = last_ep + 1; |
455 | for (i = 0; i < last_ep_ctx; ++i) { | 459 | for (i = 0; i < last_ep_ctx; ++i) { |
460 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); | ||
461 | dma_addr_t dma = ctx->dma + | ||
462 | ((unsigned long)ep_ctx - (unsigned long)ctx); | ||
463 | |||
456 | xhci_dbg(xhci, "Endpoint %02d Context:\n", i); | 464 | xhci_dbg(xhci, "Endpoint %02d Context:\n", i); |
457 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", | 465 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", |
458 | &ctx->ep[i].ep_info, | 466 | &ep_ctx->ep_info, |
459 | (unsigned long long)dma, ctx->ep[i].ep_info); | 467 | (unsigned long long)dma, ep_ctx->ep_info); |
460 | dma += field_size; | 468 | dma += field_size; |
461 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", | 469 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", |
462 | &ctx->ep[i].ep_info2, | 470 | &ep_ctx->ep_info2, |
463 | (unsigned long long)dma, ctx->ep[i].ep_info2); | 471 | (unsigned long long)dma, ep_ctx->ep_info2); |
464 | dma += field_size; | ||
465 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n", | ||
466 | &ctx->ep[i].deq[0], | ||
467 | (unsigned long long)dma, ctx->ep[i].deq[0]); | ||
468 | dma += field_size; | ||
469 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n", | ||
470 | &ctx->ep[i].deq[1], | ||
471 | (unsigned long long)dma, ctx->ep[i].deq[1]); | ||
472 | dma += field_size; | 472 | dma += field_size; |
473 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n", | ||
474 | &ep_ctx->deq, | ||
475 | (unsigned long long)dma, ep_ctx->deq); | ||
476 | dma += 2*field_size; | ||
473 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", | 477 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", |
474 | &ctx->ep[i].tx_info, | 478 | &ep_ctx->tx_info, |
475 | (unsigned long long)dma, ctx->ep[i].tx_info); | 479 | (unsigned long long)dma, ep_ctx->tx_info); |
476 | dma += field_size; | 480 | dma += field_size; |
477 | for (j = 0; j < 3; ++j) { | 481 | for (j = 0; j < 3; ++j) { |
478 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | 482 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", |
479 | &ctx->ep[i].reserved[j], | 483 | &ep_ctx->reserved[j], |
480 | (unsigned long long)dma, | 484 | (unsigned long long)dma, |
481 | ctx->ep[i].reserved[j], j); | 485 | ep_ctx->reserved[j], j); |
486 | dma += field_size; | ||
487 | } | ||
488 | |||
489 | if (csz) | ||
490 | dbg_rsvd64(xhci, (u64 *)ep_ctx, dma); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | void xhci_dbg_ctx(struct xhci_hcd *xhci, | ||
495 | struct xhci_container_ctx *ctx, | ||
496 | unsigned int last_ep) | ||
497 | { | ||
498 | int i; | ||
499 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | ||
500 | int field_size = 32 / 8; | ||
501 | struct xhci_slot_ctx *slot_ctx; | ||
502 | dma_addr_t dma = ctx->dma; | ||
503 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); | ||
504 | |||
505 | if (ctx->type == XHCI_CTX_TYPE_INPUT) { | ||
506 | struct xhci_input_control_ctx *ctrl_ctx = | ||
507 | xhci_get_input_control_ctx(xhci, ctx); | ||
508 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", | ||
509 | &ctrl_ctx->drop_flags, (unsigned long long)dma, | ||
510 | ctrl_ctx->drop_flags); | ||
511 | dma += field_size; | ||
512 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", | ||
513 | &ctrl_ctx->add_flags, (unsigned long long)dma, | ||
514 | ctrl_ctx->add_flags); | ||
515 | dma += field_size; | ||
516 | for (i = 0; i < 6; ++i) { | ||
517 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n", | ||
518 | &ctrl_ctx->rsvd2[i], (unsigned long long)dma, | ||
519 | ctrl_ctx->rsvd2[i], i); | ||
482 | dma += field_size; | 520 | dma += field_size; |
483 | } | 521 | } |
522 | |||
523 | if (csz) | ||
524 | dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma); | ||
484 | } | 525 | } |
526 | |||
527 | slot_ctx = xhci_get_slot_ctx(xhci, ctx); | ||
528 | xhci_dbg_slot_ctx(xhci, ctx); | ||
529 | xhci_dbg_ep_ctx(xhci, ctx, last_ep); | ||
485 | } | 530 | } |
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index dba3e07ccd09..816c39caca1c 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
103 | u32 state; | 103 | u32 state; |
104 | 104 | ||
105 | state = xhci_readl(xhci, &xhci->op_regs->status); | 105 | state = xhci_readl(xhci, &xhci->op_regs->status); |
106 | BUG_ON((state & STS_HALT) == 0); | 106 | if ((state & STS_HALT) == 0) { |
107 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); | ||
108 | return 0; | ||
109 | } | ||
107 | 110 | ||
108 | xhci_dbg(xhci, "// Reset the HC\n"); | 111 | xhci_dbg(xhci, "// Reset the HC\n"); |
109 | command = xhci_readl(xhci, &xhci->op_regs->command); | 112 | command = xhci_readl(xhci, &xhci->op_regs->command); |
@@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
226 | static void xhci_work(struct xhci_hcd *xhci) | 229 | static void xhci_work(struct xhci_hcd *xhci) |
227 | { | 230 | { |
228 | u32 temp; | 231 | u32 temp; |
232 | u64 temp_64; | ||
229 | 233 | ||
230 | /* | 234 | /* |
231 | * Clear the op reg interrupt status first, | 235 | * Clear the op reg interrupt status first, |
@@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci) | |||
248 | /* FIXME this should be a delayed service routine that clears the EHB */ | 252 | /* FIXME this should be a delayed service routine that clears the EHB */ |
249 | xhci_handle_event(xhci); | 253 | xhci_handle_event(xhci); |
250 | 254 | ||
251 | /* Clear the event handler busy flag; the event ring should be empty. */ | 255 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ |
252 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 256 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
253 | xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); | 257 | xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); |
254 | /* Flush posted writes -- FIXME is this necessary? */ | 258 | /* Flush posted writes -- FIXME is this necessary? */ |
255 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | 259 | xhci_readl(xhci, &xhci->ir_set->irq_pending); |
256 | } | 260 | } |
@@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
266 | { | 270 | { |
267 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 271 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
268 | u32 temp, temp2; | 272 | u32 temp, temp2; |
273 | union xhci_trb *trb; | ||
269 | 274 | ||
270 | spin_lock(&xhci->lock); | 275 | spin_lock(&xhci->lock); |
276 | trb = xhci->event_ring->dequeue; | ||
271 | /* Check if the xHC generated the interrupt, or the irq is shared */ | 277 | /* Check if the xHC generated the interrupt, or the irq is shared */ |
272 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 278 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
273 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 279 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
280 | if (temp == 0xffffffff && temp2 == 0xffffffff) | ||
281 | goto hw_died; | ||
282 | |||
274 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { | 283 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { |
275 | spin_unlock(&xhci->lock); | 284 | spin_unlock(&xhci->lock); |
276 | return IRQ_NONE; | 285 | return IRQ_NONE; |
277 | } | 286 | } |
287 | xhci_dbg(xhci, "op reg status = %08x\n", temp); | ||
288 | xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); | ||
289 | xhci_dbg(xhci, "Event ring dequeue ptr:\n"); | ||
290 | xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", | ||
291 | (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), | ||
292 | lower_32_bits(trb->link.segment_ptr), | ||
293 | upper_32_bits(trb->link.segment_ptr), | ||
294 | (unsigned int) trb->link.intr_target, | ||
295 | (unsigned int) trb->link.control); | ||
278 | 296 | ||
279 | if (temp & STS_FATAL) { | 297 | if (temp & STS_FATAL) { |
280 | xhci_warn(xhci, "WARNING: Host System Error\n"); | 298 | xhci_warn(xhci, "WARNING: Host System Error\n"); |
281 | xhci_halt(xhci); | 299 | xhci_halt(xhci); |
300 | hw_died: | ||
282 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; | 301 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; |
283 | spin_unlock(&xhci->lock); | 302 | spin_unlock(&xhci->lock); |
284 | return -ESHUTDOWN; | 303 | return -ESHUTDOWN; |
@@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
295 | { | 314 | { |
296 | unsigned long flags; | 315 | unsigned long flags; |
297 | int temp; | 316 | int temp; |
317 | u64 temp_64; | ||
298 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; | 318 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; |
299 | int i, j; | 319 | int i, j; |
300 | 320 | ||
@@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg) | |||
311 | xhci_dbg(xhci, "Event ring:\n"); | 331 | xhci_dbg(xhci, "Event ring:\n"); |
312 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); | 332 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); |
313 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | 333 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
314 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 334 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
315 | temp &= ERST_PTR_MASK; | 335 | temp_64 &= ~ERST_PTR_MASK; |
316 | xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); | 336 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); |
317 | xhci_dbg(xhci, "Command ring:\n"); | 337 | xhci_dbg(xhci, "Command ring:\n"); |
318 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); | 338 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); |
319 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | 339 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
@@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
356 | int xhci_run(struct usb_hcd *hcd) | 376 | int xhci_run(struct usb_hcd *hcd) |
357 | { | 377 | { |
358 | u32 temp; | 378 | u32 temp; |
379 | u64 temp_64; | ||
359 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 380 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
360 | void (*doorbell)(struct xhci_hcd *) = NULL; | 381 | void (*doorbell)(struct xhci_hcd *) = NULL; |
361 | 382 | ||
@@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd) | |||
382 | add_timer(&xhci->event_ring_timer); | 403 | add_timer(&xhci->event_ring_timer); |
383 | #endif | 404 | #endif |
384 | 405 | ||
406 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | ||
407 | xhci_debug_ring(xhci, xhci->cmd_ring); | ||
408 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | ||
409 | xhci_dbg_cmd_ptrs(xhci); | ||
410 | |||
411 | xhci_dbg(xhci, "ERST memory map follows:\n"); | ||
412 | xhci_dbg_erst(xhci, &xhci->erst); | ||
413 | xhci_dbg(xhci, "Event ring:\n"); | ||
414 | xhci_debug_ring(xhci, xhci->event_ring); | ||
415 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | ||
416 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | ||
417 | temp_64 &= ~ERST_PTR_MASK; | ||
418 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | ||
419 | |||
385 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); | 420 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); |
386 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); | 421 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); |
387 | temp &= ~ER_IRQ_INTERVAL_MASK; | 422 | temp &= ~ER_IRQ_INTERVAL_MASK; |
@@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd) | |||
406 | if (NUM_TEST_NOOPS > 0) | 441 | if (NUM_TEST_NOOPS > 0) |
407 | doorbell = xhci_setup_one_noop(xhci); | 442 | doorbell = xhci_setup_one_noop(xhci); |
408 | 443 | ||
409 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | ||
410 | xhci_debug_ring(xhci, xhci->cmd_ring); | ||
411 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | ||
412 | xhci_dbg_cmd_ptrs(xhci); | ||
413 | |||
414 | xhci_dbg(xhci, "ERST memory map follows:\n"); | ||
415 | xhci_dbg_erst(xhci, &xhci->erst); | ||
416 | xhci_dbg(xhci, "Event ring:\n"); | ||
417 | xhci_debug_ring(xhci, xhci->event_ring); | ||
418 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | ||
419 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | ||
420 | temp &= ERST_PTR_MASK; | ||
421 | xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); | ||
422 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]); | ||
423 | xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp); | ||
424 | |||
425 | temp = xhci_readl(xhci, &xhci->op_regs->command); | 444 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
426 | temp |= (CMD_RUN); | 445 | temp |= (CMD_RUN); |
427 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", | 446 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", |
@@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
601 | goto exit; | 620 | goto exit; |
602 | } | 621 | } |
603 | if (usb_endpoint_xfer_control(&urb->ep->desc)) | 622 | if (usb_endpoint_xfer_control(&urb->ep->desc)) |
604 | ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, | 623 | /* We have a spinlock and interrupts disabled, so we must pass |
624 | * atomic context to this function, which may allocate memory. | ||
625 | */ | ||
626 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | ||
605 | slot_id, ep_index); | 627 | slot_id, ep_index); |
606 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) | 628 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) |
607 | ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, | 629 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
608 | slot_id, ep_index); | 630 | slot_id, ep_index); |
609 | else | 631 | else |
610 | ret = -EINVAL; | 632 | ret = -EINVAL; |
@@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
661 | goto done; | 683 | goto done; |
662 | 684 | ||
663 | xhci_dbg(xhci, "Cancel URB %p\n", urb); | 685 | xhci_dbg(xhci, "Cancel URB %p\n", urb); |
686 | xhci_dbg(xhci, "Event ring:\n"); | ||
687 | xhci_debug_ring(xhci, xhci->event_ring); | ||
664 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 688 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
665 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; | 689 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; |
690 | xhci_dbg(xhci, "Endpoint ring:\n"); | ||
691 | xhci_debug_ring(xhci, ep_ring); | ||
666 | td = (struct xhci_td *) urb->hcpriv; | 692 | td = (struct xhci_td *) urb->hcpriv; |
667 | 693 | ||
668 | ep_ring->cancels_pending++; | 694 | ep_ring->cancels_pending++; |
@@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
696 | struct usb_host_endpoint *ep) | 722 | struct usb_host_endpoint *ep) |
697 | { | 723 | { |
698 | struct xhci_hcd *xhci; | 724 | struct xhci_hcd *xhci; |
699 | struct xhci_device_control *in_ctx; | 725 | struct xhci_container_ctx *in_ctx, *out_ctx; |
726 | struct xhci_input_control_ctx *ctrl_ctx; | ||
727 | struct xhci_slot_ctx *slot_ctx; | ||
700 | unsigned int last_ctx; | 728 | unsigned int last_ctx; |
701 | unsigned int ep_index; | 729 | unsigned int ep_index; |
702 | struct xhci_ep_ctx *ep_ctx; | 730 | struct xhci_ep_ctx *ep_ctx; |
@@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
724 | } | 752 | } |
725 | 753 | ||
726 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | 754 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
755 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
756 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
727 | ep_index = xhci_get_endpoint_index(&ep->desc); | 757 | ep_index = xhci_get_endpoint_index(&ep->desc); |
728 | ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; | 758 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
729 | /* If the HC already knows the endpoint is disabled, | 759 | /* If the HC already knows the endpoint is disabled, |
730 | * or the HCD has noted it is disabled, ignore this request | 760 | * or the HCD has noted it is disabled, ignore this request |
731 | */ | 761 | */ |
732 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || | 762 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || |
733 | in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { | 763 | ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { |
734 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", | 764 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
735 | __func__, ep); | 765 | __func__, ep); |
736 | return 0; | 766 | return 0; |
737 | } | 767 | } |
738 | 768 | ||
739 | in_ctx->drop_flags |= drop_flag; | 769 | ctrl_ctx->drop_flags |= drop_flag; |
740 | new_drop_flags = in_ctx->drop_flags; | 770 | new_drop_flags = ctrl_ctx->drop_flags; |
741 | 771 | ||
742 | in_ctx->add_flags = ~drop_flag; | 772 | ctrl_ctx->add_flags = ~drop_flag; |
743 | new_add_flags = in_ctx->add_flags; | 773 | new_add_flags = ctrl_ctx->add_flags; |
744 | 774 | ||
745 | last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); | 775 | last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); |
776 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
746 | /* Update the last valid endpoint context, if we deleted the last one */ | 777 | /* Update the last valid endpoint context, if we deleted the last one */ |
747 | if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { | 778 | if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { |
748 | in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 779 | slot_ctx->dev_info &= ~LAST_CTX_MASK; |
749 | in_ctx->slot.dev_info |= LAST_CTX(last_ctx); | 780 | slot_ctx->dev_info |= LAST_CTX(last_ctx); |
750 | } | 781 | } |
751 | new_slot_info = in_ctx->slot.dev_info; | 782 | new_slot_info = slot_ctx->dev_info; |
752 | 783 | ||
753 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | 784 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
754 | 785 | ||
@@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
778 | struct usb_host_endpoint *ep) | 809 | struct usb_host_endpoint *ep) |
779 | { | 810 | { |
780 | struct xhci_hcd *xhci; | 811 | struct xhci_hcd *xhci; |
781 | struct xhci_device_control *in_ctx; | 812 | struct xhci_container_ctx *in_ctx, *out_ctx; |
782 | unsigned int ep_index; | 813 | unsigned int ep_index; |
783 | struct xhci_ep_ctx *ep_ctx; | 814 | struct xhci_ep_ctx *ep_ctx; |
815 | struct xhci_slot_ctx *slot_ctx; | ||
816 | struct xhci_input_control_ctx *ctrl_ctx; | ||
784 | u32 added_ctxs; | 817 | u32 added_ctxs; |
785 | unsigned int last_ctx; | 818 | unsigned int last_ctx; |
786 | u32 new_add_flags, new_drop_flags, new_slot_info; | 819 | u32 new_add_flags, new_drop_flags, new_slot_info; |
787 | int ret = 0; | 820 | int ret = 0; |
788 | 821 | ||
789 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | 822 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); |
790 | if (ret <= 0) | 823 | if (ret <= 0) { |
824 | /* So we won't queue a reset ep command for a root hub */ | ||
825 | ep->hcpriv = NULL; | ||
791 | return ret; | 826 | return ret; |
827 | } | ||
792 | xhci = hcd_to_xhci(hcd); | 828 | xhci = hcd_to_xhci(hcd); |
793 | 829 | ||
794 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | 830 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); |
@@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
810 | } | 846 | } |
811 | 847 | ||
812 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | 848 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
849 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
850 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
813 | ep_index = xhci_get_endpoint_index(&ep->desc); | 851 | ep_index = xhci_get_endpoint_index(&ep->desc); |
814 | ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; | 852 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
815 | /* If the HCD has already noted the endpoint is enabled, | 853 | /* If the HCD has already noted the endpoint is enabled, |
816 | * ignore this request. | 854 | * ignore this request. |
817 | */ | 855 | */ |
818 | if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { | 856 | if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { |
819 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", | 857 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
820 | __func__, ep); | 858 | __func__, ep); |
821 | return 0; | 859 | return 0; |
@@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
833 | return -ENOMEM; | 871 | return -ENOMEM; |
834 | } | 872 | } |
835 | 873 | ||
836 | in_ctx->add_flags |= added_ctxs; | 874 | ctrl_ctx->add_flags |= added_ctxs; |
837 | new_add_flags = in_ctx->add_flags; | 875 | new_add_flags = ctrl_ctx->add_flags; |
838 | 876 | ||
839 | /* If xhci_endpoint_disable() was called for this endpoint, but the | 877 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
840 | * xHC hasn't been notified yet through the check_bandwidth() call, | 878 | * xHC hasn't been notified yet through the check_bandwidth() call, |
@@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
842 | * descriptors. We must drop and re-add this endpoint, so we leave the | 880 | * descriptors. We must drop and re-add this endpoint, so we leave the |
843 | * drop flags alone. | 881 | * drop flags alone. |
844 | */ | 882 | */ |
845 | new_drop_flags = in_ctx->drop_flags; | 883 | new_drop_flags = ctrl_ctx->drop_flags; |
846 | 884 | ||
885 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
847 | /* Update the last valid endpoint context, if we just added one past */ | 886 | /* Update the last valid endpoint context, if we just added one past */ |
848 | if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { | 887 | if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { |
849 | in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 888 | slot_ctx->dev_info &= ~LAST_CTX_MASK; |
850 | in_ctx->slot.dev_info |= LAST_CTX(last_ctx); | 889 | slot_ctx->dev_info |= LAST_CTX(last_ctx); |
851 | } | 890 | } |
852 | new_slot_info = in_ctx->slot.dev_info; | 891 | new_slot_info = slot_ctx->dev_info; |
892 | |||
893 | /* Store the usb_device pointer for later use */ | ||
894 | ep->hcpriv = udev; | ||
853 | 895 | ||
854 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", | 896 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
855 | (unsigned int) ep->desc.bEndpointAddress, | 897 | (unsigned int) ep->desc.bEndpointAddress, |
@@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
860 | return 0; | 902 | return 0; |
861 | } | 903 | } |
862 | 904 | ||
863 | static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) | 905 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
864 | { | 906 | { |
907 | struct xhci_input_control_ctx *ctrl_ctx; | ||
865 | struct xhci_ep_ctx *ep_ctx; | 908 | struct xhci_ep_ctx *ep_ctx; |
909 | struct xhci_slot_ctx *slot_ctx; | ||
866 | int i; | 910 | int i; |
867 | 911 | ||
868 | /* When a device's add flag and drop flag are zero, any subsequent | 912 | /* When a device's add flag and drop flag are zero, any subsequent |
@@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) | |||
870 | * untouched. Make sure we don't leave any old state in the input | 914 | * untouched. Make sure we don't leave any old state in the input |
871 | * endpoint contexts. | 915 | * endpoint contexts. |
872 | */ | 916 | */ |
873 | virt_dev->in_ctx->drop_flags = 0; | 917 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
874 | virt_dev->in_ctx->add_flags = 0; | 918 | ctrl_ctx->drop_flags = 0; |
875 | virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 919 | ctrl_ctx->add_flags = 0; |
920 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | ||
921 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | ||
876 | /* Endpoint 0 is always valid */ | 922 | /* Endpoint 0 is always valid */ |
877 | virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); | 923 | slot_ctx->dev_info |= LAST_CTX(1); |
878 | for (i = 1; i < 31; ++i) { | 924 | for (i = 1; i < 31; ++i) { |
879 | ep_ctx = &virt_dev->in_ctx->ep[i]; | 925 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
880 | ep_ctx->ep_info = 0; | 926 | ep_ctx->ep_info = 0; |
881 | ep_ctx->ep_info2 = 0; | 927 | ep_ctx->ep_info2 = 0; |
882 | ep_ctx->deq[0] = 0; | 928 | ep_ctx->deq = 0; |
883 | ep_ctx->deq[1] = 0; | ||
884 | ep_ctx->tx_info = 0; | 929 | ep_ctx->tx_info = 0; |
885 | } | 930 | } |
886 | } | 931 | } |
@@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
903 | unsigned long flags; | 948 | unsigned long flags; |
904 | struct xhci_hcd *xhci; | 949 | struct xhci_hcd *xhci; |
905 | struct xhci_virt_device *virt_dev; | 950 | struct xhci_virt_device *virt_dev; |
951 | struct xhci_input_control_ctx *ctrl_ctx; | ||
952 | struct xhci_slot_ctx *slot_ctx; | ||
906 | 953 | ||
907 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | 954 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); |
908 | if (ret <= 0) | 955 | if (ret <= 0) |
@@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
918 | virt_dev = xhci->devs[udev->slot_id]; | 965 | virt_dev = xhci->devs[udev->slot_id]; |
919 | 966 | ||
920 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | 967 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
921 | virt_dev->in_ctx->add_flags |= SLOT_FLAG; | 968 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
922 | virt_dev->in_ctx->add_flags &= ~EP0_FLAG; | 969 | ctrl_ctx->add_flags |= SLOT_FLAG; |
923 | virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; | 970 | ctrl_ctx->add_flags &= ~EP0_FLAG; |
924 | virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; | 971 | ctrl_ctx->drop_flags &= ~SLOT_FLAG; |
972 | ctrl_ctx->drop_flags &= ~EP0_FLAG; | ||
925 | xhci_dbg(xhci, "New Input Control Context:\n"); | 973 | xhci_dbg(xhci, "New Input Control Context:\n"); |
926 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, | 974 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
927 | LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); | 975 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
976 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | ||
928 | 977 | ||
929 | spin_lock_irqsave(&xhci->lock, flags); | 978 | spin_lock_irqsave(&xhci->lock, flags); |
930 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, | 979 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, |
931 | udev->slot_id); | 980 | udev->slot_id); |
932 | if (ret < 0) { | 981 | if (ret < 0) { |
933 | spin_unlock_irqrestore(&xhci->lock, flags); | 982 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
982 | } | 1031 | } |
983 | 1032 | ||
984 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | 1033 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
985 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, | 1034 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
986 | LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); | 1035 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); |
987 | 1036 | ||
988 | xhci_zero_in_ctx(virt_dev); | 1037 | xhci_zero_in_ctx(xhci, virt_dev); |
989 | /* Free any old rings */ | 1038 | /* Free any old rings */ |
990 | for (i = 1; i < 31; ++i) { | 1039 | for (i = 1; i < 31; ++i) { |
991 | if (virt_dev->new_ep_rings[i]) { | 1040 | if (virt_dev->new_ep_rings[i]) { |
@@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1023 | virt_dev->new_ep_rings[i] = NULL; | 1072 | virt_dev->new_ep_rings[i] = NULL; |
1024 | } | 1073 | } |
1025 | } | 1074 | } |
1026 | xhci_zero_in_ctx(virt_dev); | 1075 | xhci_zero_in_ctx(xhci, virt_dev); |
1076 | } | ||
1077 | |||
1078 | /* Deal with stalled endpoints. The core should have sent the control message | ||
1079 | * to clear the halt condition. However, we need to make the xHCI hardware | ||
1080 | * reset its sequence number, since a device will expect a sequence number of | ||
1081 | * zero after the halt condition is cleared. | ||
1082 | * Context: in_interrupt | ||
1083 | */ | ||
1084 | void xhci_endpoint_reset(struct usb_hcd *hcd, | ||
1085 | struct usb_host_endpoint *ep) | ||
1086 | { | ||
1087 | struct xhci_hcd *xhci; | ||
1088 | struct usb_device *udev; | ||
1089 | unsigned int ep_index; | ||
1090 | unsigned long flags; | ||
1091 | int ret; | ||
1092 | struct xhci_dequeue_state deq_state; | ||
1093 | struct xhci_ring *ep_ring; | ||
1094 | |||
1095 | xhci = hcd_to_xhci(hcd); | ||
1096 | udev = (struct usb_device *) ep->hcpriv; | ||
1097 | /* Called with a root hub endpoint (or an endpoint that wasn't added | ||
1098 | * with xhci_add_endpoint() | ||
1099 | */ | ||
1100 | if (!ep->hcpriv) | ||
1101 | return; | ||
1102 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
1103 | ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; | ||
1104 | if (!ep_ring->stopped_td) { | ||
1105 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", | ||
1106 | ep->desc.bEndpointAddress); | ||
1107 | return; | ||
1108 | } | ||
1109 | |||
1110 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | ||
1111 | spin_lock_irqsave(&xhci->lock, flags); | ||
1112 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); | ||
1113 | /* | ||
1114 | * Can't change the ring dequeue pointer until it's transitioned to the | ||
1115 | * stopped state, which is only upon a successful reset endpoint | ||
1116 | * command. Better hope that last command worked! | ||
1117 | */ | ||
1118 | if (!ret) { | ||
1119 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | ||
1120 | /* We need to move the HW's dequeue pointer past this TD, | ||
1121 | * or it will attempt to resend it on the next doorbell ring. | ||
1122 | */ | ||
1123 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
1124 | ep_index, ep_ring->stopped_td, &deq_state); | ||
1125 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
1126 | xhci_queue_new_dequeue_state(xhci, ep_ring, | ||
1127 | udev->slot_id, | ||
1128 | ep_index, &deq_state); | ||
1129 | kfree(ep_ring->stopped_td); | ||
1130 | xhci_ring_cmd_db(xhci); | ||
1131 | } | ||
1132 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1133 | |||
1134 | if (ret) | ||
1135 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); | ||
1027 | } | 1136 | } |
1028 | 1137 | ||
1029 | /* | 1138 | /* |
@@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1120 | struct xhci_virt_device *virt_dev; | 1229 | struct xhci_virt_device *virt_dev; |
1121 | int ret = 0; | 1230 | int ret = 0; |
1122 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1231 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1123 | u32 temp; | 1232 | struct xhci_slot_ctx *slot_ctx; |
1233 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1234 | u64 temp_64; | ||
1124 | 1235 | ||
1125 | if (!udev->slot_id) { | 1236 | if (!udev->slot_id) { |
1126 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); | 1237 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); |
@@ -1133,10 +1244,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1133 | if (!udev->config) | 1244 | if (!udev->config) |
1134 | xhci_setup_addressable_virt_dev(xhci, udev); | 1245 | xhci_setup_addressable_virt_dev(xhci, udev); |
1135 | /* Otherwise, assume the core has the device configured how it wants */ | 1246 | /* Otherwise, assume the core has the device configured how it wants */ |
1247 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | ||
1248 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | ||
1136 | 1249 | ||
1137 | spin_lock_irqsave(&xhci->lock, flags); | 1250 | spin_lock_irqsave(&xhci->lock, flags); |
1138 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, | 1251 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, |
1139 | udev->slot_id); | 1252 | udev->slot_id); |
1140 | if (ret) { | 1253 | if (ret) { |
1141 | spin_unlock_irqrestore(&xhci->lock, flags); | 1254 | spin_unlock_irqrestore(&xhci->lock, flags); |
1142 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 1255 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
@@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1176 | default: | 1289 | default: |
1177 | xhci_err(xhci, "ERROR: unexpected command completion " | 1290 | xhci_err(xhci, "ERROR: unexpected command completion " |
1178 | "code 0x%x.\n", virt_dev->cmd_status); | 1291 | "code 0x%x.\n", virt_dev->cmd_status); |
1292 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | ||
1293 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); | ||
1179 | ret = -EINVAL; | 1294 | ret = -EINVAL; |
1180 | break; | 1295 | break; |
1181 | } | 1296 | } |
1182 | if (ret) { | 1297 | if (ret) { |
1183 | return ret; | 1298 | return ret; |
1184 | } | 1299 | } |
1185 | temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); | 1300 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
1186 | xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); | 1301 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); |
1187 | temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); | 1302 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", |
1188 | xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp); | ||
1189 | xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n", | ||
1190 | udev->slot_id, | ||
1191 | &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id], | ||
1192 | xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]); | ||
1193 | xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n", | ||
1194 | udev->slot_id, | 1303 | udev->slot_id, |
1195 | &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], | 1304 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
1196 | xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); | 1305 | (unsigned long long) |
1306 | xhci->dcbaa->dev_context_ptrs[udev->slot_id]); | ||
1197 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", | 1307 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
1198 | (unsigned long long)virt_dev->out_ctx_dma); | 1308 | (unsigned long long)virt_dev->out_ctx->dma); |
1199 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 1309 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
1200 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); | 1310 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
1201 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | 1311 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
1202 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); | 1312 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
1203 | /* | 1313 | /* |
1204 | * USB core uses address 1 for the roothubs, so we add one to the | 1314 | * USB core uses address 1 for the roothubs, so we add one to the |
1205 | * address given back to us by the HC. | 1315 | * address given back to us by the HC. |
1206 | */ | 1316 | */ |
1207 | udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; | 1317 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
1318 | udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; | ||
1208 | /* Zero the input context control for later use */ | 1319 | /* Zero the input context control for later use */ |
1209 | virt_dev->in_ctx->add_flags = 0; | 1320 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
1210 | virt_dev->in_ctx->drop_flags = 0; | 1321 | ctrl_ctx->add_flags = 0; |
1211 | /* Mirror flags in the output context for future ep enable/disable */ | 1322 | ctrl_ctx->drop_flags = 0; |
1212 | virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | ||
1213 | virt_dev->out_ctx->drop_flags = 0; | ||
1214 | 1323 | ||
1215 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); | 1324 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); |
1216 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ | 1325 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ |
@@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void) | |||
1252 | /* xhci_device_control has eight fields, and also | 1361 | /* xhci_device_control has eight fields, and also |
1253 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx | 1362 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
1254 | */ | 1363 | */ |
1255 | BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8); | ||
1256 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); | 1364 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
1257 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); | 1365 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
1258 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); | 1366 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c8a72de1c508..e6b9a1c6002d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
88 | return; | 88 | return; |
89 | prev->next = next; | 89 | prev->next = next; |
90 | if (link_trbs) { | 90 | if (link_trbs) { |
91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | 91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; |
92 | 92 | ||
93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | 93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ |
94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | 94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; |
@@ -189,6 +189,63 @@ fail: | |||
189 | return 0; | 189 | return 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) | ||
193 | |||
194 | struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, | ||
195 | int type, gfp_t flags) | ||
196 | { | ||
197 | struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); | ||
198 | if (!ctx) | ||
199 | return NULL; | ||
200 | |||
201 | BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); | ||
202 | ctx->type = type; | ||
203 | ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; | ||
204 | if (type == XHCI_CTX_TYPE_INPUT) | ||
205 | ctx->size += CTX_SIZE(xhci->hcc_params); | ||
206 | |||
207 | ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); | ||
208 | memset(ctx->bytes, 0, ctx->size); | ||
209 | return ctx; | ||
210 | } | ||
211 | |||
212 | void xhci_free_container_ctx(struct xhci_hcd *xhci, | ||
213 | struct xhci_container_ctx *ctx) | ||
214 | { | ||
215 | dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); | ||
216 | kfree(ctx); | ||
217 | } | ||
218 | |||
219 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, | ||
220 | struct xhci_container_ctx *ctx) | ||
221 | { | ||
222 | BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); | ||
223 | return (struct xhci_input_control_ctx *)ctx->bytes; | ||
224 | } | ||
225 | |||
226 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, | ||
227 | struct xhci_container_ctx *ctx) | ||
228 | { | ||
229 | if (ctx->type == XHCI_CTX_TYPE_DEVICE) | ||
230 | return (struct xhci_slot_ctx *)ctx->bytes; | ||
231 | |||
232 | return (struct xhci_slot_ctx *) | ||
233 | (ctx->bytes + CTX_SIZE(xhci->hcc_params)); | ||
234 | } | ||
235 | |||
236 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | ||
237 | struct xhci_container_ctx *ctx, | ||
238 | unsigned int ep_index) | ||
239 | { | ||
240 | /* increment ep index by offset of start of ep ctx array */ | ||
241 | ep_index++; | ||
242 | if (ctx->type == XHCI_CTX_TYPE_INPUT) | ||
243 | ep_index++; | ||
244 | |||
245 | return (struct xhci_ep_ctx *) | ||
246 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); | ||
247 | } | ||
248 | |||
192 | /* All the xhci_tds in the ring's TD list should be freed at this point */ | 249 | /* All the xhci_tds in the ring's TD list should be freed at this point */ |
193 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | 250 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
194 | { | 251 | { |
@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
200 | return; | 257 | return; |
201 | 258 | ||
202 | dev = xhci->devs[slot_id]; | 259 | dev = xhci->devs[slot_id]; |
203 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; | 260 | xhci->dcbaa->dev_context_ptrs[slot_id] = 0; |
204 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
205 | if (!dev) | 261 | if (!dev) |
206 | return; | 262 | return; |
207 | 263 | ||
@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
210 | xhci_ring_free(xhci, dev->ep_rings[i]); | 266 | xhci_ring_free(xhci, dev->ep_rings[i]); |
211 | 267 | ||
212 | if (dev->in_ctx) | 268 | if (dev->in_ctx) |
213 | dma_pool_free(xhci->device_pool, | 269 | xhci_free_container_ctx(xhci, dev->in_ctx); |
214 | dev->in_ctx, dev->in_ctx_dma); | ||
215 | if (dev->out_ctx) | 270 | if (dev->out_ctx) |
216 | dma_pool_free(xhci->device_pool, | 271 | xhci_free_container_ctx(xhci, dev->out_ctx); |
217 | dev->out_ctx, dev->out_ctx_dma); | 272 | |
218 | kfree(xhci->devs[slot_id]); | 273 | kfree(xhci->devs[slot_id]); |
219 | xhci->devs[slot_id] = 0; | 274 | xhci->devs[slot_id] = 0; |
220 | } | 275 | } |
@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
222 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | 277 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, |
223 | struct usb_device *udev, gfp_t flags) | 278 | struct usb_device *udev, gfp_t flags) |
224 | { | 279 | { |
225 | dma_addr_t dma; | ||
226 | struct xhci_virt_device *dev; | 280 | struct xhci_virt_device *dev; |
227 | 281 | ||
228 | /* Slot ID 0 is reserved */ | 282 | /* Slot ID 0 is reserved */ |
@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
236 | return 0; | 290 | return 0; |
237 | dev = xhci->devs[slot_id]; | 291 | dev = xhci->devs[slot_id]; |
238 | 292 | ||
239 | /* Allocate the (output) device context that will be used in the HC */ | 293 | /* Allocate the (output) device context that will be used in the HC. */ |
240 | dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | 294 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); |
241 | if (!dev->out_ctx) | 295 | if (!dev->out_ctx) |
242 | goto fail; | 296 | goto fail; |
243 | dev->out_ctx_dma = dma; | 297 | |
244 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, | 298 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, |
245 | (unsigned long long)dma); | 299 | (unsigned long long)dev->out_ctx->dma); |
246 | memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); | ||
247 | 300 | ||
248 | /* Allocate the (input) device context for address device command */ | 301 | /* Allocate the (input) device context for address device command */ |
249 | dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | 302 | dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); |
250 | if (!dev->in_ctx) | 303 | if (!dev->in_ctx) |
251 | goto fail; | 304 | goto fail; |
252 | dev->in_ctx_dma = dma; | 305 | |
253 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, | 306 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
254 | (unsigned long long)dma); | 307 | (unsigned long long)dev->in_ctx->dma); |
255 | memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); | ||
256 | 308 | ||
257 | /* Allocate endpoint 0 ring */ | 309 | /* Allocate endpoint 0 ring */ |
258 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | 310 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); |
@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
261 | 313 | ||
262 | init_completion(&dev->cmd_completion); | 314 | init_completion(&dev->cmd_completion); |
263 | 315 | ||
264 | /* | 316 | /* Point to output device context in dcbaa. */ |
265 | * Point to output device context in dcbaa; skip the output control | 317 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; |
266 | * context, which is eight 32 bit fields (or 32 bytes long) | ||
267 | */ | ||
268 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = | ||
269 | (u32) dev->out_ctx_dma + (32); | ||
270 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", | 318 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
271 | slot_id, | 319 | slot_id, |
272 | &xhci->dcbaa->dev_context_ptrs[2*slot_id], | 320 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
273 | (unsigned long long)dev->out_ctx_dma); | 321 | (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); |
274 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
275 | 322 | ||
276 | return 1; | 323 | return 1; |
277 | fail: | 324 | fail: |
@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
285 | struct xhci_virt_device *dev; | 332 | struct xhci_virt_device *dev; |
286 | struct xhci_ep_ctx *ep0_ctx; | 333 | struct xhci_ep_ctx *ep0_ctx; |
287 | struct usb_device *top_dev; | 334 | struct usb_device *top_dev; |
335 | struct xhci_slot_ctx *slot_ctx; | ||
336 | struct xhci_input_control_ctx *ctrl_ctx; | ||
288 | 337 | ||
289 | dev = xhci->devs[udev->slot_id]; | 338 | dev = xhci->devs[udev->slot_id]; |
290 | /* Slot ID 0 is reserved */ | 339 | /* Slot ID 0 is reserved */ |
@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
293 | udev->slot_id); | 342 | udev->slot_id); |
294 | return -EINVAL; | 343 | return -EINVAL; |
295 | } | 344 | } |
296 | ep0_ctx = &dev->in_ctx->ep[0]; | 345 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); |
346 | ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); | ||
347 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | ||
297 | 348 | ||
298 | /* 2) New slot context and endpoint 0 context are valid*/ | 349 | /* 2) New slot context and endpoint 0 context are valid*/ |
299 | dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | 350 | ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; |
300 | 351 | ||
301 | /* 3) Only the control endpoint is valid - one endpoint context */ | 352 | /* 3) Only the control endpoint is valid - one endpoint context */ |
302 | dev->in_ctx->slot.dev_info |= LAST_CTX(1); | 353 | slot_ctx->dev_info |= LAST_CTX(1); |
303 | 354 | ||
304 | switch (udev->speed) { | 355 | switch (udev->speed) { |
305 | case USB_SPEED_SUPER: | 356 | case USB_SPEED_SUPER: |
306 | dev->in_ctx->slot.dev_info |= (u32) udev->route; | 357 | slot_ctx->dev_info |= (u32) udev->route; |
307 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; | 358 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; |
308 | break; | 359 | break; |
309 | case USB_SPEED_HIGH: | 360 | case USB_SPEED_HIGH: |
310 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; | 361 | slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; |
311 | break; | 362 | break; |
312 | case USB_SPEED_FULL: | 363 | case USB_SPEED_FULL: |
313 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; | 364 | slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; |
314 | break; | 365 | break; |
315 | case USB_SPEED_LOW: | 366 | case USB_SPEED_LOW: |
316 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; | 367 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; |
317 | break; | 368 | break; |
318 | case USB_SPEED_VARIABLE: | 369 | case USB_SPEED_VARIABLE: |
319 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 370 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
327 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | 378 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; |
328 | top_dev = top_dev->parent) | 379 | top_dev = top_dev->parent) |
329 | /* Found device below root hub */; | 380 | /* Found device below root hub */; |
330 | dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); | 381 | slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); |
331 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); | 382 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); |
332 | 383 | ||
333 | /* Is this a LS/FS device under a HS hub? */ | 384 | /* Is this a LS/FS device under a HS hub? */ |
@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
337 | */ | 388 | */ |
338 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && | 389 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && |
339 | udev->tt) { | 390 | udev->tt) { |
340 | dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; | 391 | slot_ctx->tt_info = udev->tt->hub->slot_id; |
341 | dev->in_ctx->slot.tt_info |= udev->ttport << 8; | 392 | slot_ctx->tt_info |= udev->ttport << 8; |
342 | } | 393 | } |
343 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); | 394 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
344 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); | 395 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
360 | ep0_ctx->ep_info2 |= MAX_BURST(0); | 411 | ep0_ctx->ep_info2 |= MAX_BURST(0); |
361 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | 412 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); |
362 | 413 | ||
363 | ep0_ctx->deq[0] = | 414 | ep0_ctx->deq = |
364 | dev->ep_rings[0]->first_seg->dma; | 415 | dev->ep_rings[0]->first_seg->dma; |
365 | ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; | 416 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; |
366 | ep0_ctx->deq[1] = 0; | ||
367 | 417 | ||
368 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 418 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
369 | 419 | ||
@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
470 | unsigned int max_burst; | 520 | unsigned int max_burst; |
471 | 521 | ||
472 | ep_index = xhci_get_endpoint_index(&ep->desc); | 522 | ep_index = xhci_get_endpoint_index(&ep->desc); |
473 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | 523 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
474 | 524 | ||
475 | /* Set up the endpoint ring */ | 525 | /* Set up the endpoint ring */ |
476 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); | 526 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); |
477 | if (!virt_dev->new_ep_rings[ep_index]) | 527 | if (!virt_dev->new_ep_rings[ep_index]) |
478 | return -ENOMEM; | 528 | return -ENOMEM; |
479 | ep_ring = virt_dev->new_ep_rings[ep_index]; | 529 | ep_ring = virt_dev->new_ep_rings[ep_index]; |
480 | ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; | 530 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
481 | ep_ctx->deq[1] = 0; | ||
482 | 531 | ||
483 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 532 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |
484 | 533 | ||
485 | /* FIXME dig Mult and streams info out of ep companion desc */ | 534 | /* FIXME dig Mult and streams info out of ep companion desc */ |
486 | 535 | ||
487 | /* Allow 3 retries for everything but isoc */ | 536 | /* Allow 3 retries for everything but isoc; |
537 | * error count = 0 means infinite retries. | ||
538 | */ | ||
488 | if (!usb_endpoint_xfer_isoc(&ep->desc)) | 539 | if (!usb_endpoint_xfer_isoc(&ep->desc)) |
489 | ep_ctx->ep_info2 = ERROR_COUNT(3); | 540 | ep_ctx->ep_info2 = ERROR_COUNT(3); |
490 | else | 541 | else |
491 | ep_ctx->ep_info2 = ERROR_COUNT(0); | 542 | ep_ctx->ep_info2 = ERROR_COUNT(1); |
492 | 543 | ||
493 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | 544 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); |
494 | 545 | ||
@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
498 | max_packet = ep->desc.wMaxPacketSize; | 549 | max_packet = ep->desc.wMaxPacketSize; |
499 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 550 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); |
500 | /* dig out max burst from ep companion desc */ | 551 | /* dig out max burst from ep companion desc */ |
501 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | 552 | if (!ep->ss_ep_comp) { |
553 | xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); | ||
554 | max_packet = 0; | ||
555 | } else { | ||
556 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | ||
557 | } | ||
502 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); | 558 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); |
503 | break; | 559 | break; |
504 | case USB_SPEED_HIGH: | 560 | case USB_SPEED_HIGH: |
@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, | |||
531 | struct xhci_ep_ctx *ep_ctx; | 587 | struct xhci_ep_ctx *ep_ctx; |
532 | 588 | ||
533 | ep_index = xhci_get_endpoint_index(&ep->desc); | 589 | ep_index = xhci_get_endpoint_index(&ep->desc); |
534 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | 590 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
535 | 591 | ||
536 | ep_ctx->ep_info = 0; | 592 | ep_ctx->ep_info = 0; |
537 | ep_ctx->ep_info2 = 0; | 593 | ep_ctx->ep_info2 = 0; |
538 | ep_ctx->deq[0] = 0; | 594 | ep_ctx->deq = 0; |
539 | ep_ctx->deq[1] = 0; | ||
540 | ep_ctx->tx_info = 0; | 595 | ep_ctx->tx_info = 0; |
541 | /* Don't free the endpoint ring until the set interface or configuration | 596 | /* Don't free the endpoint ring until the set interface or configuration |
542 | * request succeeds. | 597 | * request succeeds. |
543 | */ | 598 | */ |
544 | } | 599 | } |
545 | 600 | ||
601 | /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ | ||
602 | static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | ||
603 | { | ||
604 | int i; | ||
605 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | ||
606 | int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | ||
607 | |||
608 | xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); | ||
609 | |||
610 | if (!num_sp) | ||
611 | return 0; | ||
612 | |||
613 | xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); | ||
614 | if (!xhci->scratchpad) | ||
615 | goto fail_sp; | ||
616 | |||
617 | xhci->scratchpad->sp_array = | ||
618 | pci_alloc_consistent(to_pci_dev(dev), | ||
619 | num_sp * sizeof(u64), | ||
620 | &xhci->scratchpad->sp_dma); | ||
621 | if (!xhci->scratchpad->sp_array) | ||
622 | goto fail_sp2; | ||
623 | |||
624 | xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); | ||
625 | if (!xhci->scratchpad->sp_buffers) | ||
626 | goto fail_sp3; | ||
627 | |||
628 | xhci->scratchpad->sp_dma_buffers = | ||
629 | kzalloc(sizeof(dma_addr_t) * num_sp, flags); | ||
630 | |||
631 | if (!xhci->scratchpad->sp_dma_buffers) | ||
632 | goto fail_sp4; | ||
633 | |||
634 | xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; | ||
635 | for (i = 0; i < num_sp; i++) { | ||
636 | dma_addr_t dma; | ||
637 | void *buf = pci_alloc_consistent(to_pci_dev(dev), | ||
638 | xhci->page_size, &dma); | ||
639 | if (!buf) | ||
640 | goto fail_sp5; | ||
641 | |||
642 | xhci->scratchpad->sp_array[i] = dma; | ||
643 | xhci->scratchpad->sp_buffers[i] = buf; | ||
644 | xhci->scratchpad->sp_dma_buffers[i] = dma; | ||
645 | } | ||
646 | |||
647 | return 0; | ||
648 | |||
649 | fail_sp5: | ||
650 | for (i = i - 1; i >= 0; i--) { | ||
651 | pci_free_consistent(to_pci_dev(dev), xhci->page_size, | ||
652 | xhci->scratchpad->sp_buffers[i], | ||
653 | xhci->scratchpad->sp_dma_buffers[i]); | ||
654 | } | ||
655 | kfree(xhci->scratchpad->sp_dma_buffers); | ||
656 | |||
657 | fail_sp4: | ||
658 | kfree(xhci->scratchpad->sp_buffers); | ||
659 | |||
660 | fail_sp3: | ||
661 | pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), | ||
662 | xhci->scratchpad->sp_array, | ||
663 | xhci->scratchpad->sp_dma); | ||
664 | |||
665 | fail_sp2: | ||
666 | kfree(xhci->scratchpad); | ||
667 | xhci->scratchpad = NULL; | ||
668 | |||
669 | fail_sp: | ||
670 | return -ENOMEM; | ||
671 | } | ||
672 | |||
673 | static void scratchpad_free(struct xhci_hcd *xhci) | ||
674 | { | ||
675 | int num_sp; | ||
676 | int i; | ||
677 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
678 | |||
679 | if (!xhci->scratchpad) | ||
680 | return; | ||
681 | |||
682 | num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | ||
683 | |||
684 | for (i = 0; i < num_sp; i++) { | ||
685 | pci_free_consistent(pdev, xhci->page_size, | ||
686 | xhci->scratchpad->sp_buffers[i], | ||
687 | xhci->scratchpad->sp_dma_buffers[i]); | ||
688 | } | ||
689 | kfree(xhci->scratchpad->sp_dma_buffers); | ||
690 | kfree(xhci->scratchpad->sp_buffers); | ||
691 | pci_free_consistent(pdev, num_sp * sizeof(u64), | ||
692 | xhci->scratchpad->sp_array, | ||
693 | xhci->scratchpad->sp_dma); | ||
694 | kfree(xhci->scratchpad); | ||
695 | xhci->scratchpad = NULL; | ||
696 | } | ||
697 | |||
546 | void xhci_mem_cleanup(struct xhci_hcd *xhci) | 698 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
547 | { | 699 | { |
548 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 700 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
551 | 703 | ||
552 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 704 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
553 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | 705 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); |
554 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | 706 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); |
555 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | 707 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); |
556 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | ||
557 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | ||
558 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 708 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
559 | if (xhci->erst.entries) | 709 | if (xhci->erst.entries) |
560 | pci_free_consistent(pdev, size, | 710 | pci_free_consistent(pdev, size, |
@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
566 | xhci->event_ring = NULL; | 716 | xhci->event_ring = NULL; |
567 | xhci_dbg(xhci, "Freed event ring\n"); | 717 | xhci_dbg(xhci, "Freed event ring\n"); |
568 | 718 | ||
569 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | 719 | xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); |
570 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | ||
571 | if (xhci->cmd_ring) | 720 | if (xhci->cmd_ring) |
572 | xhci_ring_free(xhci, xhci->cmd_ring); | 721 | xhci_ring_free(xhci, xhci->cmd_ring); |
573 | xhci->cmd_ring = NULL; | 722 | xhci->cmd_ring = NULL; |
@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
586 | xhci->device_pool = NULL; | 735 | xhci->device_pool = NULL; |
587 | xhci_dbg(xhci, "Freed device context pool\n"); | 736 | xhci_dbg(xhci, "Freed device context pool\n"); |
588 | 737 | ||
589 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); | 738 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); |
590 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
591 | if (xhci->dcbaa) | 739 | if (xhci->dcbaa) |
592 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | 740 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), |
593 | xhci->dcbaa, xhci->dcbaa->dma); | 741 | xhci->dcbaa, xhci->dcbaa->dma); |
@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
595 | 743 | ||
596 | xhci->page_size = 0; | 744 | xhci->page_size = 0; |
597 | xhci->page_shift = 0; | 745 | xhci->page_shift = 0; |
746 | scratchpad_free(xhci); | ||
598 | } | 747 | } |
599 | 748 | ||
600 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | 749 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
602 | dma_addr_t dma; | 751 | dma_addr_t dma; |
603 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | 752 | struct device *dev = xhci_to_hcd(xhci)->self.controller; |
604 | unsigned int val, val2; | 753 | unsigned int val, val2; |
754 | u64 val_64; | ||
605 | struct xhci_segment *seg; | 755 | struct xhci_segment *seg; |
606 | u32 page_size; | 756 | u32 page_size; |
607 | int i; | 757 | int i; |
@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
647 | xhci->dcbaa->dma = dma; | 797 | xhci->dcbaa->dma = dma; |
648 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", | 798 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", |
649 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | 799 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); |
650 | xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); | 800 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
651 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
652 | 801 | ||
653 | /* | 802 | /* |
654 | * Initialize the ring segment pool. The ring must be a contiguous | 803 | * Initialize the ring segment pool. The ring must be a contiguous |
@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
658 | */ | 807 | */ |
659 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | 808 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, |
660 | SEGMENT_SIZE, 64, xhci->page_size); | 809 | SEGMENT_SIZE, 64, xhci->page_size); |
810 | |||
661 | /* See Table 46 and Note on Figure 55 */ | 811 | /* See Table 46 and Note on Figure 55 */ |
662 | /* FIXME support 64-byte contexts */ | ||
663 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, | 812 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, |
664 | sizeof(struct xhci_device_control), | 813 | 2112, 64, xhci->page_size); |
665 | 64, xhci->page_size); | ||
666 | if (!xhci->segment_pool || !xhci->device_pool) | 814 | if (!xhci->segment_pool || !xhci->device_pool) |
667 | goto fail; | 815 | goto fail; |
668 | 816 | ||
@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
675 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | 823 | (unsigned long long)xhci->cmd_ring->first_seg->dma); |
676 | 824 | ||
677 | /* Set the address in the Command Ring Control register */ | 825 | /* Set the address in the Command Ring Control register */ |
678 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | 826 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
679 | val = (val & ~CMD_RING_ADDR_MASK) | | 827 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
680 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | 828 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | |
681 | xhci->cmd_ring->cycle_state; | 829 | xhci->cmd_ring->cycle_state; |
682 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | 830 | xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); |
683 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | 831 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
684 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | ||
685 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | ||
686 | xhci_dbg_cmd_ptrs(xhci); | 832 | xhci_dbg_cmd_ptrs(xhci); |
687 | 833 | ||
688 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | 834 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); |
@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
722 | /* set ring base address and size for each segment table entry */ | 868 | /* set ring base address and size for each segment table entry */ |
723 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | 869 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { |
724 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | 870 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; |
725 | entry->seg_addr[0] = seg->dma; | 871 | entry->seg_addr = seg->dma; |
726 | entry->seg_addr[1] = 0; | ||
727 | entry->seg_size = TRBS_PER_SEGMENT; | 872 | entry->seg_size = TRBS_PER_SEGMENT; |
728 | entry->rsvd = 0; | 873 | entry->rsvd = 0; |
729 | seg = seg->next; | 874 | seg = seg->next; |
@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
741 | /* set the segment table base address */ | 886 | /* set the segment table base address */ |
742 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", | 887 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", |
743 | (unsigned long long)xhci->erst.erst_dma_addr); | 888 | (unsigned long long)xhci->erst.erst_dma_addr); |
744 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | 889 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
745 | val &= ERST_PTR_MASK; | 890 | val_64 &= ERST_PTR_MASK; |
746 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | 891 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); |
747 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | 892 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); |
748 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | ||
749 | 893 | ||
750 | /* Set the event ring dequeue address */ | 894 | /* Set the event ring dequeue address */ |
751 | xhci_set_hc_event_deq(xhci); | 895 | xhci_set_hc_event_deq(xhci); |
@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
761 | for (i = 0; i < MAX_HC_SLOTS; ++i) | 905 | for (i = 0; i < MAX_HC_SLOTS; ++i) |
762 | xhci->devs[i] = 0; | 906 | xhci->devs[i] = 0; |
763 | 907 | ||
908 | if (scratchpad_alloc(xhci, flags)) | ||
909 | goto fail; | ||
910 | |||
764 | return 0; | 911 | return 0; |
912 | |||
765 | fail: | 913 | fail: |
766 | xhci_warn(xhci, "Couldn't initialize memory\n"); | 914 | xhci_warn(xhci, "Couldn't initialize memory\n"); |
767 | xhci_mem_cleanup(xhci); | 915 | xhci_mem_cleanup(xhci); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1462709e26c0..592fe7e623f7 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = { | |||
117 | .free_dev = xhci_free_dev, | 117 | .free_dev = xhci_free_dev, |
118 | .add_endpoint = xhci_add_endpoint, | 118 | .add_endpoint = xhci_add_endpoint, |
119 | .drop_endpoint = xhci_drop_endpoint, | 119 | .drop_endpoint = xhci_drop_endpoint, |
120 | .endpoint_reset = xhci_endpoint_reset, | ||
120 | .check_bandwidth = xhci_check_bandwidth, | 121 | .check_bandwidth = xhci_check_bandwidth, |
121 | .reset_bandwidth = xhci_reset_bandwidth, | 122 | .reset_bandwidth = xhci_reset_bandwidth, |
122 | .address_device = xhci_address_device, | 123 | .address_device = xhci_address_device, |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 02d81985c454..aa88a067148b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci, | |||
135 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | 135 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) |
136 | { | 136 | { |
137 | union xhci_trb *next = ++(ring->dequeue); | 137 | union xhci_trb *next = ++(ring->dequeue); |
138 | unsigned long long addr; | ||
138 | 139 | ||
139 | ring->deq_updates++; | 140 | ring->deq_updates++; |
140 | /* Update the dequeue pointer further if that was a link TRB or we're at | 141 | /* Update the dequeue pointer further if that was a link TRB or we're at |
@@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
152 | ring->dequeue = ring->deq_seg->trbs; | 153 | ring->dequeue = ring->deq_seg->trbs; |
153 | next = ring->dequeue; | 154 | next = ring->dequeue; |
154 | } | 155 | } |
156 | addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); | ||
157 | if (ring == xhci->event_ring) | ||
158 | xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr); | ||
159 | else if (ring == xhci->cmd_ring) | ||
160 | xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr); | ||
161 | else | ||
162 | xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr); | ||
155 | } | 163 | } |
156 | 164 | ||
157 | /* | 165 | /* |
@@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
171 | { | 179 | { |
172 | u32 chain; | 180 | u32 chain; |
173 | union xhci_trb *next; | 181 | union xhci_trb *next; |
182 | unsigned long long addr; | ||
174 | 183 | ||
175 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; | 184 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; |
176 | next = ++(ring->enqueue); | 185 | next = ++(ring->enqueue); |
@@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
204 | ring->enqueue = ring->enq_seg->trbs; | 213 | ring->enqueue = ring->enq_seg->trbs; |
205 | next = ring->enqueue; | 214 | next = ring->enqueue; |
206 | } | 215 | } |
216 | addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); | ||
217 | if (ring == xhci->event_ring) | ||
218 | xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr); | ||
219 | else if (ring == xhci->cmd_ring) | ||
220 | xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr); | ||
221 | else | ||
222 | xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr); | ||
207 | } | 223 | } |
208 | 224 | ||
209 | /* | 225 | /* |
@@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
237 | 253 | ||
238 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | 254 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) |
239 | { | 255 | { |
240 | u32 temp; | 256 | u64 temp; |
241 | dma_addr_t deq; | 257 | dma_addr_t deq; |
242 | 258 | ||
243 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, | 259 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, |
@@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
246 | xhci_warn(xhci, "WARN something wrong with SW event ring " | 262 | xhci_warn(xhci, "WARN something wrong with SW event ring " |
247 | "dequeue ptr.\n"); | 263 | "dequeue ptr.\n"); |
248 | /* Update HC event ring dequeue pointer */ | 264 | /* Update HC event ring dequeue pointer */ |
249 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 265 | temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
250 | temp &= ERST_PTR_MASK; | 266 | temp &= ERST_PTR_MASK; |
251 | if (!in_interrupt()) | 267 | /* Don't clear the EHB bit (which is RW1C) because |
252 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); | 268 | * there might be more events to service. |
253 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | 269 | */ |
254 | xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, | 270 | temp &= ~ERST_EHB; |
255 | &xhci->ir_set->erst_dequeue[0]); | 271 | xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n"); |
272 | xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, | ||
273 | &xhci->ir_set->erst_dequeue); | ||
256 | } | 274 | } |
257 | 275 | ||
258 | /* Ring the host controller doorbell after placing a command on the ring */ | 276 | /* Ring the host controller doorbell after placing a command on the ring */ |
@@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
279 | /* Don't ring the doorbell for this endpoint if there are pending | 297 | /* Don't ring the doorbell for this endpoint if there are pending |
280 | * cancellations because the we don't want to interrupt processing. | 298 | * cancellations because the we don't want to interrupt processing. |
281 | */ | 299 | */ |
282 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { | 300 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) |
301 | && !(ep_ring->state & EP_HALTED)) { | ||
283 | field = xhci_readl(xhci, db_addr) & DB_MASK; | 302 | field = xhci_readl(xhci, db_addr) & DB_MASK; |
284 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); | 303 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); |
285 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this | 304 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this |
@@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg( | |||
316 | return cur_seg; | 335 | return cur_seg; |
317 | } | 336 | } |
318 | 337 | ||
319 | struct dequeue_state { | ||
320 | struct xhci_segment *new_deq_seg; | ||
321 | union xhci_trb *new_deq_ptr; | ||
322 | int new_cycle_state; | ||
323 | }; | ||
324 | |||
325 | /* | 338 | /* |
326 | * Move the xHC's endpoint ring dequeue pointer past cur_td. | 339 | * Move the xHC's endpoint ring dequeue pointer past cur_td. |
327 | * Record the new state of the xHC's endpoint ring dequeue segment, | 340 | * Record the new state of the xHC's endpoint ring dequeue segment, |
@@ -336,24 +349,30 @@ struct dequeue_state { | |||
336 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit | 349 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit |
337 | * if we've moved it past a link TRB with the toggle cycle bit set. | 350 | * if we've moved it past a link TRB with the toggle cycle bit set. |
338 | */ | 351 | */ |
339 | static void find_new_dequeue_state(struct xhci_hcd *xhci, | 352 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
340 | unsigned int slot_id, unsigned int ep_index, | 353 | unsigned int slot_id, unsigned int ep_index, |
341 | struct xhci_td *cur_td, struct dequeue_state *state) | 354 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) |
342 | { | 355 | { |
343 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | 356 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
344 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; | 357 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; |
345 | struct xhci_generic_trb *trb; | 358 | struct xhci_generic_trb *trb; |
359 | struct xhci_ep_ctx *ep_ctx; | ||
360 | dma_addr_t addr; | ||
346 | 361 | ||
347 | state->new_cycle_state = 0; | 362 | state->new_cycle_state = 0; |
363 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | ||
348 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 364 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
349 | ep_ring->stopped_trb, | 365 | ep_ring->stopped_trb, |
350 | &state->new_cycle_state); | 366 | &state->new_cycle_state); |
351 | if (!state->new_deq_seg) | 367 | if (!state->new_deq_seg) |
352 | BUG(); | 368 | BUG(); |
353 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 369 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
354 | state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; | 370 | xhci_dbg(xhci, "Finding endpoint context\n"); |
371 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | ||
372 | state->new_cycle_state = 0x1 & ep_ctx->deq; | ||
355 | 373 | ||
356 | state->new_deq_ptr = cur_td->last_trb; | 374 | state->new_deq_ptr = cur_td->last_trb; |
375 | xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); | ||
357 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 376 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
358 | state->new_deq_ptr, | 377 | state->new_deq_ptr, |
359 | &state->new_cycle_state); | 378 | &state->new_cycle_state); |
@@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, | |||
367 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | 386 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
368 | 387 | ||
369 | /* Don't update the ring cycle state for the producer (us). */ | 388 | /* Don't update the ring cycle state for the producer (us). */ |
389 | xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", | ||
390 | state->new_deq_seg); | ||
391 | addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); | ||
392 | xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", | ||
393 | (unsigned long long) addr); | ||
394 | xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); | ||
370 | ep_ring->dequeue = state->new_deq_ptr; | 395 | ep_ring->dequeue = state->new_deq_ptr; |
371 | ep_ring->deq_seg = state->new_deq_seg; | 396 | ep_ring->deq_seg = state->new_deq_seg; |
372 | } | 397 | } |
@@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
416 | unsigned int ep_index, struct xhci_segment *deq_seg, | 441 | unsigned int ep_index, struct xhci_segment *deq_seg, |
417 | union xhci_trb *deq_ptr, u32 cycle_state); | 442 | union xhci_trb *deq_ptr, u32 cycle_state); |
418 | 443 | ||
444 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
445 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
446 | unsigned int ep_index, struct xhci_dequeue_state *deq_state) | ||
447 | { | ||
448 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | ||
449 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | ||
450 | deq_state->new_deq_seg, | ||
451 | (unsigned long long)deq_state->new_deq_seg->dma, | ||
452 | deq_state->new_deq_ptr, | ||
453 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), | ||
454 | deq_state->new_cycle_state); | ||
455 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
456 | deq_state->new_deq_seg, | ||
457 | deq_state->new_deq_ptr, | ||
458 | (u32) deq_state->new_cycle_state); | ||
459 | /* Stop the TD queueing code from ringing the doorbell until | ||
460 | * this command completes. The HC won't set the dequeue pointer | ||
461 | * if the ring is running, and ringing the doorbell starts the | ||
462 | * ring running. | ||
463 | */ | ||
464 | ep_ring->state |= SET_DEQ_PENDING; | ||
465 | xhci_ring_cmd_db(xhci); | ||
466 | } | ||
467 | |||
419 | /* | 468 | /* |
420 | * When we get a command completion for a Stop Endpoint Command, we need to | 469 | * When we get a command completion for a Stop Endpoint Command, we need to |
421 | * unlink any cancelled TDs from the ring. There are two ways to do that: | 470 | * unlink any cancelled TDs from the ring. There are two ways to do that: |
@@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
436 | struct xhci_td *cur_td = 0; | 485 | struct xhci_td *cur_td = 0; |
437 | struct xhci_td *last_unlinked_td; | 486 | struct xhci_td *last_unlinked_td; |
438 | 487 | ||
439 | struct dequeue_state deq_state; | 488 | struct xhci_dequeue_state deq_state; |
440 | #ifdef CONFIG_USB_HCD_STAT | 489 | #ifdef CONFIG_USB_HCD_STAT |
441 | ktime_t stop_time = ktime_get(); | 490 | ktime_t stop_time = ktime_get(); |
442 | #endif | 491 | #endif |
@@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
464 | * move the xHC endpoint ring dequeue pointer past this TD. | 513 | * move the xHC endpoint ring dequeue pointer past this TD. |
465 | */ | 514 | */ |
466 | if (cur_td == ep_ring->stopped_td) | 515 | if (cur_td == ep_ring->stopped_td) |
467 | find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | 516 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, |
468 | &deq_state); | 517 | &deq_state); |
469 | else | 518 | else |
470 | td_to_noop(xhci, ep_ring, cur_td); | 519 | td_to_noop(xhci, ep_ring, cur_td); |
@@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
480 | 529 | ||
481 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 530 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
482 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 531 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
483 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | 532 | xhci_queue_new_dequeue_state(xhci, ep_ring, |
484 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | 533 | slot_id, ep_index, &deq_state); |
485 | deq_state.new_deq_seg, | ||
486 | (unsigned long long)deq_state.new_deq_seg->dma, | ||
487 | deq_state.new_deq_ptr, | ||
488 | (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), | ||
489 | deq_state.new_cycle_state); | ||
490 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
491 | deq_state.new_deq_seg, | ||
492 | deq_state.new_deq_ptr, | ||
493 | (u32) deq_state.new_cycle_state); | ||
494 | /* Stop the TD queueing code from ringing the doorbell until | ||
495 | * this command completes. The HC won't set the dequeue pointer | ||
496 | * if the ring is running, and ringing the doorbell starts the | ||
497 | * ring running. | ||
498 | */ | ||
499 | ep_ring->state |= SET_DEQ_PENDING; | ||
500 | xhci_ring_cmd_db(xhci); | ||
501 | } else { | 534 | } else { |
502 | /* Otherwise just ring the doorbell to restart the ring */ | 535 | /* Otherwise just ring the doorbell to restart the ring */ |
503 | ring_ep_doorbell(xhci, slot_id, ep_index); | 536 | ring_ep_doorbell(xhci, slot_id, ep_index); |
@@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
551 | unsigned int ep_index; | 584 | unsigned int ep_index; |
552 | struct xhci_ring *ep_ring; | 585 | struct xhci_ring *ep_ring; |
553 | struct xhci_virt_device *dev; | 586 | struct xhci_virt_device *dev; |
587 | struct xhci_ep_ctx *ep_ctx; | ||
588 | struct xhci_slot_ctx *slot_ctx; | ||
554 | 589 | ||
555 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 590 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
556 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 591 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
557 | dev = xhci->devs[slot_id]; | 592 | dev = xhci->devs[slot_id]; |
558 | ep_ring = dev->ep_rings[ep_index]; | 593 | ep_ring = dev->ep_rings[ep_index]; |
594 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | ||
595 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | ||
559 | 596 | ||
560 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { | 597 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { |
561 | unsigned int ep_state; | 598 | unsigned int ep_state; |
@@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
569 | case COMP_CTX_STATE: | 606 | case COMP_CTX_STATE: |
570 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " | 607 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " |
571 | "to incorrect slot or ep state.\n"); | 608 | "to incorrect slot or ep state.\n"); |
572 | ep_state = dev->out_ctx->ep[ep_index].ep_info; | 609 | ep_state = ep_ctx->ep_info; |
573 | ep_state &= EP_STATE_MASK; | 610 | ep_state &= EP_STATE_MASK; |
574 | slot_state = dev->out_ctx->slot.dev_state; | 611 | slot_state = slot_ctx->dev_state; |
575 | slot_state = GET_SLOT_STATE(slot_state); | 612 | slot_state = GET_SLOT_STATE(slot_state); |
576 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", | 613 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", |
577 | slot_state, ep_state); | 614 | slot_state, ep_state); |
@@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
593 | * cancelling URBs, which might not be an error... | 630 | * cancelling URBs, which might not be an error... |
594 | */ | 631 | */ |
595 | } else { | 632 | } else { |
596 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " | 633 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", |
597 | "deq[1] = 0x%x.\n", | 634 | ep_ctx->deq); |
598 | dev->out_ctx->ep[ep_index].deq[0], | ||
599 | dev->out_ctx->ep[ep_index].deq[1]); | ||
600 | } | 635 | } |
601 | 636 | ||
602 | ep_ring->state &= ~SET_DEQ_PENDING; | 637 | ep_ring->state &= ~SET_DEQ_PENDING; |
603 | ring_ep_doorbell(xhci, slot_id, ep_index); | 638 | ring_ep_doorbell(xhci, slot_id, ep_index); |
604 | } | 639 | } |
605 | 640 | ||
641 | static void handle_reset_ep_completion(struct xhci_hcd *xhci, | ||
642 | struct xhci_event_cmd *event, | ||
643 | union xhci_trb *trb) | ||
644 | { | ||
645 | int slot_id; | ||
646 | unsigned int ep_index; | ||
647 | |||
648 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | ||
649 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | ||
650 | /* This command will only fail if the endpoint wasn't halted, | ||
651 | * but we don't care. | ||
652 | */ | ||
653 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", | ||
654 | (unsigned int) GET_COMP_CODE(event->status)); | ||
655 | |||
656 | /* Clear our internal halted state and restart the ring */ | ||
657 | xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; | ||
658 | ring_ep_doorbell(xhci, slot_id, ep_index); | ||
659 | } | ||
606 | 660 | ||
607 | static void handle_cmd_completion(struct xhci_hcd *xhci, | 661 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
608 | struct xhci_event_cmd *event) | 662 | struct xhci_event_cmd *event) |
@@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
611 | u64 cmd_dma; | 665 | u64 cmd_dma; |
612 | dma_addr_t cmd_dequeue_dma; | 666 | dma_addr_t cmd_dequeue_dma; |
613 | 667 | ||
614 | cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; | 668 | cmd_dma = event->cmd_trb; |
615 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 669 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
616 | xhci->cmd_ring->dequeue); | 670 | xhci->cmd_ring->dequeue); |
617 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | 671 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ |
@@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
653 | case TRB_TYPE(TRB_CMD_NOOP): | 707 | case TRB_TYPE(TRB_CMD_NOOP): |
654 | ++xhci->noops_handled; | 708 | ++xhci->noops_handled; |
655 | break; | 709 | break; |
710 | case TRB_TYPE(TRB_RESET_EP): | ||
711 | handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); | ||
712 | break; | ||
656 | default: | 713 | default: |
657 | /* Skip over unknown commands on the event ring */ | 714 | /* Skip over unknown commands on the event ring */ |
658 | xhci->error_bitmask |= 1 << 6; | 715 | xhci->error_bitmask |= 1 << 6; |
@@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
756 | union xhci_trb *event_trb; | 813 | union xhci_trb *event_trb; |
757 | struct urb *urb = 0; | 814 | struct urb *urb = 0; |
758 | int status = -EINPROGRESS; | 815 | int status = -EINPROGRESS; |
816 | struct xhci_ep_ctx *ep_ctx; | ||
759 | 817 | ||
818 | xhci_dbg(xhci, "In %s\n", __func__); | ||
760 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; | 819 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; |
761 | if (!xdev) { | 820 | if (!xdev) { |
762 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | 821 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); |
@@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
765 | 824 | ||
766 | /* Endpoint ID is 1 based, our index is zero based */ | 825 | /* Endpoint ID is 1 based, our index is zero based */ |
767 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 826 | ep_index = TRB_TO_EP_ID(event->flags) - 1; |
827 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); | ||
768 | ep_ring = xdev->ep_rings[ep_index]; | 828 | ep_ring = xdev->ep_rings[ep_index]; |
769 | if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | 829 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
830 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | ||
770 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | 831 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); |
771 | return -ENODEV; | 832 | return -ENODEV; |
772 | } | 833 | } |
773 | 834 | ||
774 | event_dma = event->buffer[0]; | 835 | event_dma = event->buffer; |
775 | if (event->buffer[1] != 0) | ||
776 | xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); | ||
777 | |||
778 | /* This TRB should be in the TD at the head of this ring's TD list */ | 836 | /* This TRB should be in the TD at the head of this ring's TD list */ |
837 | xhci_dbg(xhci, "%s - checking for list empty\n", __func__); | ||
779 | if (list_empty(&ep_ring->td_list)) { | 838 | if (list_empty(&ep_ring->td_list)) { |
780 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", | 839 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", |
781 | TRB_TO_SLOT_ID(event->flags), ep_index); | 840 | TRB_TO_SLOT_ID(event->flags), ep_index); |
@@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
785 | urb = NULL; | 844 | urb = NULL; |
786 | goto cleanup; | 845 | goto cleanup; |
787 | } | 846 | } |
847 | xhci_dbg(xhci, "%s - getting list entry\n", __func__); | ||
788 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | 848 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); |
789 | 849 | ||
790 | /* Is this a TRB in the currently executing TD? */ | 850 | /* Is this a TRB in the currently executing TD? */ |
851 | xhci_dbg(xhci, "%s - looking for TD\n", __func__); | ||
791 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | 852 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, |
792 | td->last_trb, event_dma); | 853 | td->last_trb, event_dma); |
854 | xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg); | ||
793 | if (!event_seg) { | 855 | if (!event_seg) { |
794 | /* HC is busted, give up! */ | 856 | /* HC is busted, give up! */ |
795 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); | 857 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); |
@@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
798 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; | 860 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; |
799 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | 861 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
800 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | 862 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); |
801 | xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", | 863 | xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", |
802 | (unsigned int) event->buffer[0]); | 864 | lower_32_bits(event->buffer)); |
803 | xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", | 865 | xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", |
804 | (unsigned int) event->buffer[1]); | 866 | upper_32_bits(event->buffer)); |
805 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", | 867 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", |
806 | (unsigned int) event->transfer_len); | 868 | (unsigned int) event->transfer_len); |
807 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", | 869 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", |
@@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
823 | break; | 885 | break; |
824 | case COMP_STALL: | 886 | case COMP_STALL: |
825 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); | 887 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); |
888 | ep_ring->state |= EP_HALTED; | ||
826 | status = -EPIPE; | 889 | status = -EPIPE; |
827 | break; | 890 | break; |
828 | case COMP_TRB_ERR: | 891 | case COMP_TRB_ERR: |
@@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
833 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); | 896 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); |
834 | status = -EPROTO; | 897 | status = -EPROTO; |
835 | break; | 898 | break; |
899 | case COMP_BABBLE: | ||
900 | xhci_warn(xhci, "WARN: babble error on endpoint\n"); | ||
901 | status = -EOVERFLOW; | ||
902 | break; | ||
836 | case COMP_DB_ERR: | 903 | case COMP_DB_ERR: |
837 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); | 904 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); |
838 | status = -ENOSR; | 905 | status = -ENOSR; |
@@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
874 | if (event_trb != ep_ring->dequeue) { | 941 | if (event_trb != ep_ring->dequeue) { |
875 | /* The event was for the status stage */ | 942 | /* The event was for the status stage */ |
876 | if (event_trb == td->last_trb) { | 943 | if (event_trb == td->last_trb) { |
877 | td->urb->actual_length = | 944 | if (td->urb->actual_length != 0) { |
878 | td->urb->transfer_buffer_length; | 945 | /* Don't overwrite a previously set error code */ |
946 | if (status == -EINPROGRESS || status == 0) | ||
947 | /* Did we already see a short data stage? */ | ||
948 | status = -EREMOTEIO; | ||
949 | } else { | ||
950 | td->urb->actual_length = | ||
951 | td->urb->transfer_buffer_length; | ||
952 | } | ||
879 | } else { | 953 | } else { |
880 | /* Maybe the event was for the data stage? */ | 954 | /* Maybe the event was for the data stage? */ |
881 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) | 955 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { |
882 | /* We didn't stop on a link TRB in the middle */ | 956 | /* We didn't stop on a link TRB in the middle */ |
883 | td->urb->actual_length = | 957 | td->urb->actual_length = |
884 | td->urb->transfer_buffer_length - | 958 | td->urb->transfer_buffer_length - |
885 | TRB_LEN(event->transfer_len); | 959 | TRB_LEN(event->transfer_len); |
960 | xhci_dbg(xhci, "Waiting for status stage event\n"); | ||
961 | urb = NULL; | ||
962 | goto cleanup; | ||
963 | } | ||
886 | } | 964 | } |
887 | } | 965 | } |
888 | } else { | 966 | } else { |
@@ -929,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
929 | TRB_LEN(event->transfer_len)); | 1007 | TRB_LEN(event->transfer_len)); |
930 | td->urb->actual_length = 0; | 1008 | td->urb->actual_length = 0; |
931 | } | 1009 | } |
932 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 1010 | /* Don't overwrite a previously set error code */ |
933 | status = -EREMOTEIO; | 1011 | if (status == -EINPROGRESS) { |
934 | else | 1012 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
935 | status = 0; | 1013 | status = -EREMOTEIO; |
1014 | else | ||
1015 | status = 0; | ||
1016 | } | ||
936 | } else { | 1017 | } else { |
937 | td->urb->actual_length = td->urb->transfer_buffer_length; | 1018 | td->urb->actual_length = td->urb->transfer_buffer_length; |
938 | /* Ignore a short packet completion if the | 1019 | /* Ignore a short packet completion if the |
939 | * untransferred length was zero. | 1020 | * untransferred length was zero. |
940 | */ | 1021 | */ |
941 | status = 0; | 1022 | if (status == -EREMOTEIO) |
1023 | status = 0; | ||
942 | } | 1024 | } |
943 | } else { | 1025 | } else { |
944 | /* Slow path - walk the list, starting from the dequeue | 1026 | /* Slow path - walk the list, starting from the dequeue |
@@ -965,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
965 | TRB_LEN(event->transfer_len); | 1047 | TRB_LEN(event->transfer_len); |
966 | } | 1048 | } |
967 | } | 1049 | } |
968 | /* The Endpoint Stop Command completion will take care of | ||
969 | * any stopped TDs. A stopped TD may be restarted, so don't update the | ||
970 | * ring dequeue pointer or take this TD off any lists yet. | ||
971 | */ | ||
972 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || | 1050 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || |
973 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { | 1051 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { |
1052 | /* The Endpoint Stop Command completion will take care of any | ||
1053 | * stopped TDs. A stopped TD may be restarted, so don't update | ||
1054 | * the ring dequeue pointer or take this TD off any lists yet. | ||
1055 | */ | ||
974 | ep_ring->stopped_td = td; | 1056 | ep_ring->stopped_td = td; |
975 | ep_ring->stopped_trb = event_trb; | 1057 | ep_ring->stopped_trb = event_trb; |
976 | } else { | 1058 | } else { |
977 | /* Update ring dequeue pointer */ | 1059 | if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { |
978 | while (ep_ring->dequeue != td->last_trb) | 1060 | /* The transfer is completed from the driver's |
1061 | * perspective, but we need to issue a set dequeue | ||
1062 | * command for this stalled endpoint to move the dequeue | ||
1063 | * pointer past the TD. We can't do that here because | ||
1064 | * the halt condition must be cleared first. | ||
1065 | */ | ||
1066 | ep_ring->stopped_td = td; | ||
1067 | ep_ring->stopped_trb = event_trb; | ||
1068 | } else { | ||
1069 | /* Update ring dequeue pointer */ | ||
1070 | while (ep_ring->dequeue != td->last_trb) | ||
1071 | inc_deq(xhci, ep_ring, false); | ||
979 | inc_deq(xhci, ep_ring, false); | 1072 | inc_deq(xhci, ep_ring, false); |
980 | inc_deq(xhci, ep_ring, false); | 1073 | } |
981 | 1074 | ||
982 | /* Clean up the endpoint's TD list */ | 1075 | /* Clean up the endpoint's TD list */ |
983 | urb = td->urb; | 1076 | urb = td->urb; |
@@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
987 | list_del(&td->cancelled_td_list); | 1080 | list_del(&td->cancelled_td_list); |
988 | ep_ring->cancels_pending--; | 1081 | ep_ring->cancels_pending--; |
989 | } | 1082 | } |
990 | kfree(td); | 1083 | /* Leave the TD around for the reset endpoint function to use */ |
1084 | if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { | ||
1085 | kfree(td); | ||
1086 | } | ||
991 | urb->hcpriv = NULL; | 1087 | urb->hcpriv = NULL; |
992 | } | 1088 | } |
993 | cleanup: | 1089 | cleanup: |
@@ -997,6 +1093,8 @@ cleanup: | |||
997 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ | 1093 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ |
998 | if (urb) { | 1094 | if (urb) { |
999 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | 1095 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); |
1096 | xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", | ||
1097 | urb, td->urb->actual_length, status); | ||
1000 | spin_unlock(&xhci->lock); | 1098 | spin_unlock(&xhci->lock); |
1001 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); | 1099 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); |
1002 | spin_lock(&xhci->lock); | 1100 | spin_lock(&xhci->lock); |
@@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
1014 | int update_ptrs = 1; | 1112 | int update_ptrs = 1; |
1015 | int ret; | 1113 | int ret; |
1016 | 1114 | ||
1115 | xhci_dbg(xhci, "In %s\n", __func__); | ||
1017 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | 1116 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { |
1018 | xhci->error_bitmask |= 1 << 1; | 1117 | xhci->error_bitmask |= 1 << 1; |
1019 | return; | 1118 | return; |
@@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
1026 | xhci->error_bitmask |= 1 << 2; | 1125 | xhci->error_bitmask |= 1 << 2; |
1027 | return; | 1126 | return; |
1028 | } | 1127 | } |
1128 | xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); | ||
1029 | 1129 | ||
1030 | /* FIXME: Handle more event types. */ | 1130 | /* FIXME: Handle more event types. */ |
1031 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { | 1131 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { |
1032 | case TRB_TYPE(TRB_COMPLETION): | 1132 | case TRB_TYPE(TRB_COMPLETION): |
1133 | xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); | ||
1033 | handle_cmd_completion(xhci, &event->event_cmd); | 1134 | handle_cmd_completion(xhci, &event->event_cmd); |
1135 | xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__); | ||
1034 | break; | 1136 | break; |
1035 | case TRB_TYPE(TRB_PORT_STATUS): | 1137 | case TRB_TYPE(TRB_PORT_STATUS): |
1138 | xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__); | ||
1036 | handle_port_status(xhci, event); | 1139 | handle_port_status(xhci, event); |
1140 | xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__); | ||
1037 | update_ptrs = 0; | 1141 | update_ptrs = 0; |
1038 | break; | 1142 | break; |
1039 | case TRB_TYPE(TRB_TRANSFER): | 1143 | case TRB_TYPE(TRB_TRANSFER): |
1144 | xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); | ||
1040 | ret = handle_tx_event(xhci, &event->trans_event); | 1145 | ret = handle_tx_event(xhci, &event->trans_event); |
1146 | xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); | ||
1041 | if (ret < 0) | 1147 | if (ret < 0) |
1042 | xhci->error_bitmask |= 1 << 9; | 1148 | xhci->error_bitmask |= 1 << 9; |
1043 | else | 1149 | else |
@@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
1093 | */ | 1199 | */ |
1094 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); | 1200 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); |
1095 | return -ENOENT; | 1201 | return -ENOENT; |
1096 | case EP_STATE_HALTED: | ||
1097 | case EP_STATE_ERROR: | 1202 | case EP_STATE_ERROR: |
1098 | xhci_warn(xhci, "WARN waiting for halt or error on ep " | 1203 | xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); |
1099 | "to be cleared\n"); | ||
1100 | /* FIXME event handling code for error needs to clear it */ | 1204 | /* FIXME event handling code for error needs to clear it */ |
1101 | /* XXX not sure if this should be -ENOENT or not */ | 1205 | /* XXX not sure if this should be -ENOENT or not */ |
1102 | return -EINVAL; | 1206 | return -EINVAL; |
1207 | case EP_STATE_HALTED: | ||
1208 | xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); | ||
1103 | case EP_STATE_STOPPED: | 1209 | case EP_STATE_STOPPED: |
1104 | case EP_STATE_RUNNING: | 1210 | case EP_STATE_RUNNING: |
1105 | break; | 1211 | break; |
@@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
1128 | gfp_t mem_flags) | 1234 | gfp_t mem_flags) |
1129 | { | 1235 | { |
1130 | int ret; | 1236 | int ret; |
1131 | 1237 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | |
1132 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | 1238 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], |
1133 | xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, | 1239 | ep_ctx->ep_info & EP_STATE_MASK, |
1134 | num_trbs, mem_flags); | 1240 | num_trbs, mem_flags); |
1135 | if (ret) | 1241 | if (ret) |
1136 | return ret; | 1242 | return ret; |
@@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1285 | /* Queue the first TRB, even if it's zero-length */ | 1391 | /* Queue the first TRB, even if it's zero-length */ |
1286 | do { | 1392 | do { |
1287 | u32 field = 0; | 1393 | u32 field = 0; |
1394 | u32 length_field = 0; | ||
1288 | 1395 | ||
1289 | /* Don't change the cycle bit of the first TRB until later */ | 1396 | /* Don't change the cycle bit of the first TRB until later */ |
1290 | if (first_trb) | 1397 | if (first_trb) |
@@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1314 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 1421 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
1315 | (unsigned int) addr + trb_buff_len); | 1422 | (unsigned int) addr + trb_buff_len); |
1316 | } | 1423 | } |
1424 | length_field = TRB_LEN(trb_buff_len) | | ||
1425 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | ||
1426 | TRB_INTR_TARGET(0); | ||
1317 | queue_trb(xhci, ep_ring, false, | 1427 | queue_trb(xhci, ep_ring, false, |
1318 | (u32) addr, | 1428 | lower_32_bits(addr), |
1319 | (u32) ((u64) addr >> 32), | 1429 | upper_32_bits(addr), |
1320 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | 1430 | length_field, |
1321 | /* We always want to know if the TRB was short, | 1431 | /* We always want to know if the TRB was short, |
1322 | * or we won't get an event when it completes. | 1432 | * or we won't get an event when it completes. |
1323 | * (Unless we use event data TRBs, which are a | 1433 | * (Unless we use event data TRBs, which are a |
@@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1365 | struct xhci_generic_trb *start_trb; | 1475 | struct xhci_generic_trb *start_trb; |
1366 | bool first_trb; | 1476 | bool first_trb; |
1367 | int start_cycle; | 1477 | int start_cycle; |
1368 | u32 field; | 1478 | u32 field, length_field; |
1369 | 1479 | ||
1370 | int running_total, trb_buff_len, ret; | 1480 | int running_total, trb_buff_len, ret; |
1371 | u64 addr; | 1481 | u64 addr; |
@@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1443 | td->last_trb = ep_ring->enqueue; | 1553 | td->last_trb = ep_ring->enqueue; |
1444 | field |= TRB_IOC; | 1554 | field |= TRB_IOC; |
1445 | } | 1555 | } |
1556 | length_field = TRB_LEN(trb_buff_len) | | ||
1557 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | ||
1558 | TRB_INTR_TARGET(0); | ||
1446 | queue_trb(xhci, ep_ring, false, | 1559 | queue_trb(xhci, ep_ring, false, |
1447 | (u32) addr, | 1560 | lower_32_bits(addr), |
1448 | (u32) ((u64) addr >> 32), | 1561 | upper_32_bits(addr), |
1449 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | 1562 | length_field, |
1450 | /* We always want to know if the TRB was short, | 1563 | /* We always want to know if the TRB was short, |
1451 | * or we won't get an event when it completes. | 1564 | * or we won't get an event when it completes. |
1452 | * (Unless we use event data TRBs, which are a | 1565 | * (Unless we use event data TRBs, which are a |
@@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1478 | struct usb_ctrlrequest *setup; | 1591 | struct usb_ctrlrequest *setup; |
1479 | struct xhci_generic_trb *start_trb; | 1592 | struct xhci_generic_trb *start_trb; |
1480 | int start_cycle; | 1593 | int start_cycle; |
1481 | u32 field; | 1594 | u32 field, length_field; |
1482 | struct xhci_td *td; | 1595 | struct xhci_td *td; |
1483 | 1596 | ||
1484 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1597 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; |
@@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1528 | 1641 | ||
1529 | /* If there's data, queue data TRBs */ | 1642 | /* If there's data, queue data TRBs */ |
1530 | field = 0; | 1643 | field = 0; |
1644 | length_field = TRB_LEN(urb->transfer_buffer_length) | | ||
1645 | TD_REMAINDER(urb->transfer_buffer_length) | | ||
1646 | TRB_INTR_TARGET(0); | ||
1531 | if (urb->transfer_buffer_length > 0) { | 1647 | if (urb->transfer_buffer_length > 0) { |
1532 | if (setup->bRequestType & USB_DIR_IN) | 1648 | if (setup->bRequestType & USB_DIR_IN) |
1533 | field |= TRB_DIR_IN; | 1649 | field |= TRB_DIR_IN; |
1534 | queue_trb(xhci, ep_ring, false, | 1650 | queue_trb(xhci, ep_ring, false, |
1535 | lower_32_bits(urb->transfer_dma), | 1651 | lower_32_bits(urb->transfer_dma), |
1536 | upper_32_bits(urb->transfer_dma), | 1652 | upper_32_bits(urb->transfer_dma), |
1537 | TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), | 1653 | length_field, |
1538 | /* Event on short tx */ | 1654 | /* Event on short tx */ |
1539 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); | 1655 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); |
1540 | } | 1656 | } |
@@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | |||
1603 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1719 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1604 | u32 slot_id) | 1720 | u32 slot_id) |
1605 | { | 1721 | { |
1606 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1722 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
1723 | upper_32_bits(in_ctx_ptr), 0, | ||
1607 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); | 1724 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); |
1608 | } | 1725 | } |
1609 | 1726 | ||
@@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | |||
1611 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1728 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1612 | u32 slot_id) | 1729 | u32 slot_id) |
1613 | { | 1730 | { |
1614 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1731 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
1732 | upper_32_bits(in_ctx_ptr), 0, | ||
1615 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); | 1733 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); |
1616 | } | 1734 | } |
1617 | 1735 | ||
@@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
1639 | u32 type = TRB_TYPE(TRB_SET_DEQ); | 1757 | u32 type = TRB_TYPE(TRB_SET_DEQ); |
1640 | 1758 | ||
1641 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); | 1759 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); |
1642 | if (addr == 0) | 1760 | if (addr == 0) { |
1643 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); | 1761 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); |
1644 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", | 1762 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", |
1645 | deq_seg, deq_ptr); | 1763 | deq_seg, deq_ptr); |
1646 | return queue_command(xhci, (u32) addr | cycle_state, 0, 0, | 1764 | return 0; |
1765 | } | ||
1766 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, | ||
1767 | upper_32_bits(addr), 0, | ||
1647 | trb_slot_id | trb_ep_index | type); | 1768 | trb_slot_id | trb_ep_index | type); |
1648 | } | 1769 | } |
1770 | |||
1771 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | ||
1772 | unsigned int ep_index) | ||
1773 | { | ||
1774 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | ||
1775 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | ||
1776 | u32 type = TRB_TYPE(TRB_RESET_EP); | ||
1777 | |||
1778 | return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); | ||
1779 | } | ||
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8936eeb5588b..d31d32206ba3 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #include <linux/usb.h> | 26 | #include <linux/usb.h> |
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/kernel.h> | ||
28 | 29 | ||
29 | #include "../core/hcd.h" | 30 | #include "../core/hcd.h" |
30 | /* Code sharing between pci-quirks and xhci hcd */ | 31 | /* Code sharing between pci-quirks and xhci hcd */ |
@@ -42,14 +43,6 @@ | |||
42 | * xHCI register interface. | 43 | * xHCI register interface. |
43 | * This corresponds to the eXtensible Host Controller Interface (xHCI) | 44 | * This corresponds to the eXtensible Host Controller Interface (xHCI) |
44 | * Revision 0.95 specification | 45 | * Revision 0.95 specification |
45 | * | ||
46 | * Registers should always be accessed with double word or quad word accesses. | ||
47 | * | ||
48 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
49 | * with 64-bit address pointers should be written to with dword accesses by | ||
50 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
51 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
52 | * the high dword, and write order is irrelevant. | ||
53 | */ | 46 | */ |
54 | 47 | ||
55 | /** | 48 | /** |
@@ -96,6 +89,7 @@ struct xhci_cap_regs { | |||
96 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) | 89 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) |
97 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ | 90 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ |
98 | /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ | 91 | /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ |
92 | #define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) | ||
99 | 93 | ||
100 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ | 94 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ |
101 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ | 95 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ |
@@ -166,10 +160,10 @@ struct xhci_op_regs { | |||
166 | u32 reserved1; | 160 | u32 reserved1; |
167 | u32 reserved2; | 161 | u32 reserved2; |
168 | u32 dev_notification; | 162 | u32 dev_notification; |
169 | u32 cmd_ring[2]; | 163 | u64 cmd_ring; |
170 | /* rsvd: offset 0x20-2F */ | 164 | /* rsvd: offset 0x20-2F */ |
171 | u32 reserved3[4]; | 165 | u32 reserved3[4]; |
172 | u32 dcbaa_ptr[2]; | 166 | u64 dcbaa_ptr; |
173 | u32 config_reg; | 167 | u32 config_reg; |
174 | /* rsvd: offset 0x3C-3FF */ | 168 | /* rsvd: offset 0x3C-3FF */ |
175 | u32 reserved4[241]; | 169 | u32 reserved4[241]; |
@@ -254,7 +248,7 @@ struct xhci_op_regs { | |||
254 | #define CMD_RING_RUNNING (1 << 3) | 248 | #define CMD_RING_RUNNING (1 << 3) |
255 | /* bits 4:5 reserved and should be preserved */ | 249 | /* bits 4:5 reserved and should be preserved */ |
256 | /* Command Ring pointer - bit mask for the lower 32 bits. */ | 250 | /* Command Ring pointer - bit mask for the lower 32 bits. */ |
257 | #define CMD_RING_ADDR_MASK (0xffffffc0) | 251 | #define CMD_RING_RSVD_BITS (0x3f) |
258 | 252 | ||
259 | /* CONFIG - Configure Register - config_reg bitmasks */ | 253 | /* CONFIG - Configure Register - config_reg bitmasks */ |
260 | /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ | 254 | /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ |
@@ -382,8 +376,8 @@ struct xhci_intr_reg { | |||
382 | u32 irq_control; | 376 | u32 irq_control; |
383 | u32 erst_size; | 377 | u32 erst_size; |
384 | u32 rsvd; | 378 | u32 rsvd; |
385 | u32 erst_base[2]; | 379 | u64 erst_base; |
386 | u32 erst_dequeue[2]; | 380 | u64 erst_dequeue; |
387 | }; | 381 | }; |
388 | 382 | ||
389 | /* irq_pending bitmasks */ | 383 | /* irq_pending bitmasks */ |
@@ -453,6 +447,27 @@ struct xhci_doorbell_array { | |||
453 | 447 | ||
454 | 448 | ||
455 | /** | 449 | /** |
450 | * struct xhci_container_ctx | ||
451 | * @type: Type of context. Used to calculated offsets to contained contexts. | ||
452 | * @size: Size of the context data | ||
453 | * @bytes: The raw context data given to HW | ||
454 | * @dma: dma address of the bytes | ||
455 | * | ||
456 | * Represents either a Device or Input context. Holds a pointer to the raw | ||
457 | * memory used for the context (bytes) and dma address of it (dma). | ||
458 | */ | ||
459 | struct xhci_container_ctx { | ||
460 | unsigned type; | ||
461 | #define XHCI_CTX_TYPE_DEVICE 0x1 | ||
462 | #define XHCI_CTX_TYPE_INPUT 0x2 | ||
463 | |||
464 | int size; | ||
465 | |||
466 | u8 *bytes; | ||
467 | dma_addr_t dma; | ||
468 | }; | ||
469 | |||
470 | /** | ||
456 | * struct xhci_slot_ctx | 471 | * struct xhci_slot_ctx |
457 | * @dev_info: Route string, device speed, hub info, and last valid endpoint | 472 | * @dev_info: Route string, device speed, hub info, and last valid endpoint |
458 | * @dev_info2: Max exit latency for device number, root hub port number | 473 | * @dev_info2: Max exit latency for device number, root hub port number |
@@ -538,7 +553,7 @@ struct xhci_slot_ctx { | |||
538 | struct xhci_ep_ctx { | 553 | struct xhci_ep_ctx { |
539 | u32 ep_info; | 554 | u32 ep_info; |
540 | u32 ep_info2; | 555 | u32 ep_info2; |
541 | u32 deq[2]; | 556 | u64 deq; |
542 | u32 tx_info; | 557 | u32 tx_info; |
543 | /* offset 0x14 - 0x1f reserved for HC internal use */ | 558 | /* offset 0x14 - 0x1f reserved for HC internal use */ |
544 | u32 reserved[3]; | 559 | u32 reserved[3]; |
@@ -589,18 +604,16 @@ struct xhci_ep_ctx { | |||
589 | 604 | ||
590 | 605 | ||
591 | /** | 606 | /** |
592 | * struct xhci_device_control | 607 | * struct xhci_input_control_context |
593 | * Input/Output context; see section 6.2.5. | 608 | * Input control context; see section 6.2.5. |
594 | * | 609 | * |
595 | * @drop_context: set the bit of the endpoint context you want to disable | 610 | * @drop_context: set the bit of the endpoint context you want to disable |
596 | * @add_context: set the bit of the endpoint context you want to enable | 611 | * @add_context: set the bit of the endpoint context you want to enable |
597 | */ | 612 | */ |
598 | struct xhci_device_control { | 613 | struct xhci_input_control_ctx { |
599 | u32 drop_flags; | 614 | u32 drop_flags; |
600 | u32 add_flags; | 615 | u32 add_flags; |
601 | u32 rsvd[6]; | 616 | u32 rsvd2[6]; |
602 | struct xhci_slot_ctx slot; | ||
603 | struct xhci_ep_ctx ep[31]; | ||
604 | }; | 617 | }; |
605 | 618 | ||
606 | /* drop context bitmasks */ | 619 | /* drop context bitmasks */ |
@@ -608,7 +621,6 @@ struct xhci_device_control { | |||
608 | /* add context bitmasks */ | 621 | /* add context bitmasks */ |
609 | #define ADD_EP(x) (0x1 << x) | 622 | #define ADD_EP(x) (0x1 << x) |
610 | 623 | ||
611 | |||
612 | struct xhci_virt_device { | 624 | struct xhci_virt_device { |
613 | /* | 625 | /* |
614 | * Commands to the hardware are passed an "input context" that | 626 | * Commands to the hardware are passed an "input context" that |
@@ -618,11 +630,10 @@ struct xhci_virt_device { | |||
618 | * track of input and output contexts separately because | 630 | * track of input and output contexts separately because |
619 | * these commands might fail and we don't trust the hardware. | 631 | * these commands might fail and we don't trust the hardware. |
620 | */ | 632 | */ |
621 | struct xhci_device_control *out_ctx; | 633 | struct xhci_container_ctx *out_ctx; |
622 | dma_addr_t out_ctx_dma; | ||
623 | /* Used for addressing devices and configuration changes */ | 634 | /* Used for addressing devices and configuration changes */ |
624 | struct xhci_device_control *in_ctx; | 635 | struct xhci_container_ctx *in_ctx; |
625 | dma_addr_t in_ctx_dma; | 636 | |
626 | /* FIXME when stream support is added */ | 637 | /* FIXME when stream support is added */ |
627 | struct xhci_ring *ep_rings[31]; | 638 | struct xhci_ring *ep_rings[31]; |
628 | /* Temporary storage in case the configure endpoint command fails and we | 639 | /* Temporary storage in case the configure endpoint command fails and we |
@@ -641,7 +652,7 @@ struct xhci_virt_device { | |||
641 | */ | 652 | */ |
642 | struct xhci_device_context_array { | 653 | struct xhci_device_context_array { |
643 | /* 64-bit device addresses; we only write 32-bit addresses */ | 654 | /* 64-bit device addresses; we only write 32-bit addresses */ |
644 | u32 dev_context_ptrs[2*MAX_HC_SLOTS]; | 655 | u64 dev_context_ptrs[MAX_HC_SLOTS]; |
645 | /* private xHCD pointers */ | 656 | /* private xHCD pointers */ |
646 | dma_addr_t dma; | 657 | dma_addr_t dma; |
647 | }; | 658 | }; |
@@ -654,7 +665,7 @@ struct xhci_device_context_array { | |||
654 | 665 | ||
655 | struct xhci_stream_ctx { | 666 | struct xhci_stream_ctx { |
656 | /* 64-bit stream ring address, cycle state, and stream type */ | 667 | /* 64-bit stream ring address, cycle state, and stream type */ |
657 | u32 stream_ring[2]; | 668 | u64 stream_ring; |
658 | /* offset 0x14 - 0x1f reserved for HC internal use */ | 669 | /* offset 0x14 - 0x1f reserved for HC internal use */ |
659 | u32 reserved[2]; | 670 | u32 reserved[2]; |
660 | }; | 671 | }; |
@@ -662,7 +673,7 @@ struct xhci_stream_ctx { | |||
662 | 673 | ||
663 | struct xhci_transfer_event { | 674 | struct xhci_transfer_event { |
664 | /* 64-bit buffer address, or immediate data */ | 675 | /* 64-bit buffer address, or immediate data */ |
665 | u32 buffer[2]; | 676 | u64 buffer; |
666 | u32 transfer_len; | 677 | u32 transfer_len; |
667 | /* This field is interpreted differently based on the type of TRB */ | 678 | /* This field is interpreted differently based on the type of TRB */ |
668 | u32 flags; | 679 | u32 flags; |
@@ -744,7 +755,7 @@ struct xhci_transfer_event { | |||
744 | 755 | ||
745 | struct xhci_link_trb { | 756 | struct xhci_link_trb { |
746 | /* 64-bit segment pointer*/ | 757 | /* 64-bit segment pointer*/ |
747 | u32 segment_ptr[2]; | 758 | u64 segment_ptr; |
748 | u32 intr_target; | 759 | u32 intr_target; |
749 | u32 control; | 760 | u32 control; |
750 | }; | 761 | }; |
@@ -755,7 +766,7 @@ struct xhci_link_trb { | |||
755 | /* Command completion event TRB */ | 766 | /* Command completion event TRB */ |
756 | struct xhci_event_cmd { | 767 | struct xhci_event_cmd { |
757 | /* Pointer to command TRB, or the value passed by the event data trb */ | 768 | /* Pointer to command TRB, or the value passed by the event data trb */ |
758 | u32 cmd_trb[2]; | 769 | u64 cmd_trb; |
759 | u32 status; | 770 | u32 status; |
760 | u32 flags; | 771 | u32 flags; |
761 | }; | 772 | }; |
@@ -848,8 +859,8 @@ union xhci_trb { | |||
848 | #define TRB_CONFIG_EP 12 | 859 | #define TRB_CONFIG_EP 12 |
849 | /* Evaluate Context Command */ | 860 | /* Evaluate Context Command */ |
850 | #define TRB_EVAL_CONTEXT 13 | 861 | #define TRB_EVAL_CONTEXT 13 |
851 | /* Reset Transfer Ring Command */ | 862 | /* Reset Endpoint Command */ |
852 | #define TRB_RESET_RING 14 | 863 | #define TRB_RESET_EP 14 |
853 | /* Stop Transfer Ring Command */ | 864 | /* Stop Transfer Ring Command */ |
854 | #define TRB_STOP_RING 15 | 865 | #define TRB_STOP_RING 15 |
855 | /* Set Transfer Ring Dequeue Pointer Command */ | 866 | /* Set Transfer Ring Dequeue Pointer Command */ |
@@ -929,6 +940,7 @@ struct xhci_ring { | |||
929 | unsigned int cancels_pending; | 940 | unsigned int cancels_pending; |
930 | unsigned int state; | 941 | unsigned int state; |
931 | #define SET_DEQ_PENDING (1 << 0) | 942 | #define SET_DEQ_PENDING (1 << 0) |
943 | #define EP_HALTED (1 << 1) | ||
932 | /* The TRB that was last reported in a stopped endpoint ring */ | 944 | /* The TRB that was last reported in a stopped endpoint ring */ |
933 | union xhci_trb *stopped_trb; | 945 | union xhci_trb *stopped_trb; |
934 | struct xhci_td *stopped_td; | 946 | struct xhci_td *stopped_td; |
@@ -940,9 +952,15 @@ struct xhci_ring { | |||
940 | u32 cycle_state; | 952 | u32 cycle_state; |
941 | }; | 953 | }; |
942 | 954 | ||
955 | struct xhci_dequeue_state { | ||
956 | struct xhci_segment *new_deq_seg; | ||
957 | union xhci_trb *new_deq_ptr; | ||
958 | int new_cycle_state; | ||
959 | }; | ||
960 | |||
943 | struct xhci_erst_entry { | 961 | struct xhci_erst_entry { |
944 | /* 64-bit event ring segment address */ | 962 | /* 64-bit event ring segment address */ |
945 | u32 seg_addr[2]; | 963 | u64 seg_addr; |
946 | u32 seg_size; | 964 | u32 seg_size; |
947 | /* Set to zero */ | 965 | /* Set to zero */ |
948 | u32 rsvd; | 966 | u32 rsvd; |
@@ -957,6 +975,13 @@ struct xhci_erst { | |||
957 | unsigned int erst_size; | 975 | unsigned int erst_size; |
958 | }; | 976 | }; |
959 | 977 | ||
978 | struct xhci_scratchpad { | ||
979 | u64 *sp_array; | ||
980 | dma_addr_t sp_dma; | ||
981 | void **sp_buffers; | ||
982 | dma_addr_t *sp_dma_buffers; | ||
983 | }; | ||
984 | |||
960 | /* | 985 | /* |
961 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: | 986 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: |
962 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, | 987 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, |
@@ -1011,6 +1036,9 @@ struct xhci_hcd { | |||
1011 | struct xhci_ring *cmd_ring; | 1036 | struct xhci_ring *cmd_ring; |
1012 | struct xhci_ring *event_ring; | 1037 | struct xhci_ring *event_ring; |
1013 | struct xhci_erst erst; | 1038 | struct xhci_erst erst; |
1039 | /* Scratchpad */ | ||
1040 | struct xhci_scratchpad *scratchpad; | ||
1041 | |||
1014 | /* slot enabling and address device helpers */ | 1042 | /* slot enabling and address device helpers */ |
1015 | struct completion addr_dev; | 1043 | struct completion addr_dev; |
1016 | int slot_id; | 1044 | int slot_id; |
@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, | |||
1071 | static inline void xhci_writel(struct xhci_hcd *xhci, | 1099 | static inline void xhci_writel(struct xhci_hcd *xhci, |
1072 | const unsigned int val, __u32 __iomem *regs) | 1100 | const unsigned int val, __u32 __iomem *regs) |
1073 | { | 1101 | { |
1074 | if (!in_interrupt()) | 1102 | xhci_dbg(xhci, |
1075 | xhci_dbg(xhci, | 1103 | "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", |
1076 | "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", | 1104 | regs, val); |
1077 | regs, val); | ||
1078 | writel(val, regs); | 1105 | writel(val, regs); |
1079 | } | 1106 | } |
1080 | 1107 | ||
1108 | /* | ||
1109 | * Registers should always be accessed with double word or quad word accesses. | ||
1110 | * | ||
1111 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
1112 | * with 64-bit address pointers should be written to with dword accesses by | ||
1113 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
1114 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
1115 | * the high dword, and write order is irrelevant. | ||
1116 | */ | ||
1117 | static inline u64 xhci_read_64(const struct xhci_hcd *xhci, | ||
1118 | __u64 __iomem *regs) | ||
1119 | { | ||
1120 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1121 | u64 val_lo = readl(ptr); | ||
1122 | u64 val_hi = readl(ptr + 1); | ||
1123 | return val_lo + (val_hi << 32); | ||
1124 | } | ||
1125 | static inline void xhci_write_64(struct xhci_hcd *xhci, | ||
1126 | const u64 val, __u64 __iomem *regs) | ||
1127 | { | ||
1128 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1129 | u32 val_lo = lower_32_bits(val); | ||
1130 | u32 val_hi = upper_32_bits(val); | ||
1131 | |||
1132 | xhci_dbg(xhci, | ||
1133 | "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n", | ||
1134 | regs, (long unsigned int) val); | ||
1135 | writel(val_lo, ptr); | ||
1136 | writel(val_hi, ptr + 1); | ||
1137 | } | ||
1138 | |||
1081 | /* xHCI debugging */ | 1139 | /* xHCI debugging */ |
1082 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); | 1140 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); |
1083 | void xhci_print_registers(struct xhci_hcd *xhci); | 1141 | void xhci_print_registers(struct xhci_hcd *xhci); |
@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); | |||
1090 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); | 1148 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); |
1091 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); | 1149 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); |
1092 | void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); | 1150 | void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); |
1093 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); | 1151 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); |
1094 | 1152 | ||
1095 | /* xHCI memory managment */ | 1153 | /* xHCI memory managment */ |
1096 | void xhci_mem_cleanup(struct xhci_hcd *xhci); | 1154 | void xhci_mem_cleanup(struct xhci_hcd *xhci); |
@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); | |||
1128 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); | 1186 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); |
1129 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); | 1187 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); |
1130 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); | 1188 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); |
1189 | void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); | ||
1131 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); | 1190 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1132 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); | 1191 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1133 | 1192 | ||
@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | |||
1148 | int slot_id, unsigned int ep_index); | 1207 | int slot_id, unsigned int ep_index); |
1149 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1208 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1150 | u32 slot_id); | 1209 | u32 slot_id); |
1210 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | ||
1211 | unsigned int ep_index); | ||
1212 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | ||
1213 | unsigned int slot_id, unsigned int ep_index, | ||
1214 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); | ||
1215 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
1216 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
1217 | unsigned int ep_index, struct xhci_dequeue_state *deq_state); | ||
1151 | 1218 | ||
1152 | /* xHCI roothub code */ | 1219 | /* xHCI roothub code */ |
1153 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, | 1220 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |
1154 | char *buf, u16 wLength); | 1221 | char *buf, u16 wLength); |
1155 | int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); | 1222 | int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); |
1156 | 1223 | ||
1224 | /* xHCI contexts */ | ||
1225 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); | ||
1226 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); | ||
1227 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); | ||
1228 | |||
1157 | #endif /* __LINUX_XHCI_HCD_H */ | 1229 | #endif /* __LINUX_XHCI_HCD_H */ |
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index a68d91a11bee..abe3aa67ed00 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig | |||
@@ -220,7 +220,7 @@ config USB_IOWARRIOR | |||
220 | 220 | ||
221 | config USB_TEST | 221 | config USB_TEST |
222 | tristate "USB testing driver" | 222 | tristate "USB testing driver" |
223 | depends on USB && USB_DEVICEFS | 223 | depends on USB |
224 | help | 224 | help |
225 | This driver is for testing host controller software. It is used | 225 | This driver is for testing host controller software. It is used |
226 | with specialized device firmware for regression and stress testing, | 226 | with specialized device firmware for regression and stress testing, |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 554a414f65d1..1d26beddf2ca 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1326 | int i; | 1326 | int i; |
1327 | 1327 | ||
1328 | /* log core options (read using indexed model) */ | 1328 | /* log core options (read using indexed model) */ |
1329 | musb_ep_select(mbase, 0); | ||
1330 | reg = musb_read_configdata(mbase); | 1329 | reg = musb_read_configdata(mbase); |
1331 | 1330 | ||
1332 | strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); | 1331 | strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); |
@@ -1990,7 +1989,7 @@ bad_config: | |||
1990 | if (status < 0) | 1989 | if (status < 0) |
1991 | goto fail2; | 1990 | goto fail2; |
1992 | 1991 | ||
1993 | #ifdef CONFIG_USB_OTG | 1992 | #ifdef CONFIG_USB_MUSB_OTG |
1994 | setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); | 1993 | setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); |
1995 | #endif | 1994 | #endif |
1996 | 1995 | ||
@@ -2168,8 +2167,9 @@ static int __devexit musb_remove(struct platform_device *pdev) | |||
2168 | 2167 | ||
2169 | #ifdef CONFIG_PM | 2168 | #ifdef CONFIG_PM |
2170 | 2169 | ||
2171 | static int musb_suspend(struct platform_device *pdev, pm_message_t message) | 2170 | static int musb_suspend(struct device *dev) |
2172 | { | 2171 | { |
2172 | struct platform_device *pdev = to_platform_device(dev); | ||
2173 | unsigned long flags; | 2173 | unsigned long flags; |
2174 | struct musb *musb = dev_to_musb(&pdev->dev); | 2174 | struct musb *musb = dev_to_musb(&pdev->dev); |
2175 | 2175 | ||
@@ -2196,8 +2196,9 @@ static int musb_suspend(struct platform_device *pdev, pm_message_t message) | |||
2196 | return 0; | 2196 | return 0; |
2197 | } | 2197 | } |
2198 | 2198 | ||
2199 | static int musb_resume_early(struct platform_device *pdev) | 2199 | static int musb_resume_noirq(struct device *dev) |
2200 | { | 2200 | { |
2201 | struct platform_device *pdev = to_platform_device(dev); | ||
2201 | struct musb *musb = dev_to_musb(&pdev->dev); | 2202 | struct musb *musb = dev_to_musb(&pdev->dev); |
2202 | 2203 | ||
2203 | if (!musb->clock) | 2204 | if (!musb->clock) |
@@ -2215,9 +2216,14 @@ static int musb_resume_early(struct platform_device *pdev) | |||
2215 | return 0; | 2216 | return 0; |
2216 | } | 2217 | } |
2217 | 2218 | ||
2219 | static struct dev_pm_ops musb_dev_pm_ops = { | ||
2220 | .suspend = musb_suspend, | ||
2221 | .resume_noirq = musb_resume_noirq, | ||
2222 | }; | ||
2223 | |||
2224 | #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops) | ||
2218 | #else | 2225 | #else |
2219 | #define musb_suspend NULL | 2226 | #define MUSB_DEV_PM_OPS NULL |
2220 | #define musb_resume_early NULL | ||
2221 | #endif | 2227 | #endif |
2222 | 2228 | ||
2223 | static struct platform_driver musb_driver = { | 2229 | static struct platform_driver musb_driver = { |
@@ -2225,11 +2231,10 @@ static struct platform_driver musb_driver = { | |||
2225 | .name = (char *)musb_driver_name, | 2231 | .name = (char *)musb_driver_name, |
2226 | .bus = &platform_bus_type, | 2232 | .bus = &platform_bus_type, |
2227 | .owner = THIS_MODULE, | 2233 | .owner = THIS_MODULE, |
2234 | .pm = MUSB_DEV_PM_OPS, | ||
2228 | }, | 2235 | }, |
2229 | .remove = __devexit_p(musb_remove), | 2236 | .remove = __devexit_p(musb_remove), |
2230 | .shutdown = musb_shutdown, | 2237 | .shutdown = musb_shutdown, |
2231 | .suspend = musb_suspend, | ||
2232 | .resume_early = musb_resume_early, | ||
2233 | }; | 2238 | }; |
2234 | 2239 | ||
2235 | /*-------------------------------------------------------------------------*/ | 2240 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 40ed50ecedff..7a6778675ad3 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
@@ -407,7 +407,7 @@ stall: | |||
407 | csr |= MUSB_RXCSR_P_SENDSTALL | 407 | csr |= MUSB_RXCSR_P_SENDSTALL |
408 | | MUSB_RXCSR_FLUSHFIFO | 408 | | MUSB_RXCSR_FLUSHFIFO |
409 | | MUSB_RXCSR_CLRDATATOG | 409 | | MUSB_RXCSR_CLRDATATOG |
410 | | MUSB_TXCSR_P_WZC_BITS; | 410 | | MUSB_RXCSR_P_WZC_BITS; |
411 | musb_writew(regs, MUSB_RXCSR, | 411 | musb_writew(regs, MUSB_RXCSR, |
412 | csr); | 412 | csr); |
413 | } | 413 | } |
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index de3b2f18db44..fbfd3fd9ce1f 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h | |||
@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) | |||
323 | 323 | ||
324 | static inline u8 musb_read_configdata(void __iomem *mbase) | 324 | static inline u8 musb_read_configdata(void __iomem *mbase) |
325 | { | 325 | { |
326 | musb_writeb(mbase, MUSB_INDEX, 0); | ||
326 | return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); | 327 | return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); |
327 | } | 328 | } |
328 | 329 | ||
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index e9a40b820fd4..985cbcf48bda 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = { | |||
80 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ | 80 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ |
81 | { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ | 81 | { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ |
82 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ | 82 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ |
83 | { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ | ||
83 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ | 84 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ |
84 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ | 85 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ |
85 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ | 86 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ |
@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = { | |||
96 | { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ | 97 | { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ |
97 | { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ | 98 | { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ |
98 | { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ | 99 | { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ |
100 | { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ | ||
99 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ | 101 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ |
102 | { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ | ||
100 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ | 103 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ |
101 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 104 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
102 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 105 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 60c64cc5be2a..b574878c78b2 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = { | |||
698 | { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), | 698 | { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), |
699 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 699 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
700 | { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, | 700 | { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, |
701 | { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, | ||
701 | { }, /* Optional parameter entry */ | 702 | { }, /* Optional parameter entry */ |
702 | { } /* Terminating entry */ | 703 | { } /* Terminating entry */ |
703 | }; | 704 | }; |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index c9fbd7415092..24dbd99e87d7 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -947,6 +947,13 @@ | |||
947 | #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ | 947 | #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ |
948 | 948 | ||
949 | /* | 949 | /* |
950 | * GN Otometrics (http://www.otometrics.com) | ||
951 | * Submitted by Ville Sundberg. | ||
952 | */ | ||
953 | #define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */ | ||
954 | #define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */ | ||
955 | |||
956 | /* | ||
950 | * BmRequestType: 1100 0000b | 957 | * BmRequestType: 1100 0000b |
951 | * bRequest: FTDI_E2_READ | 958 | * bRequest: FTDI_E2_READ |
952 | * wValue: 0 | 959 | * wValue: 0 |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index c31940a307f8..270009afdf77 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -124,10 +124,13 @@ | |||
124 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | 124 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 |
125 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | 125 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 |
126 | 126 | ||
127 | /* This driver also supports the ATEN UC2324 device since it is mos7840 based | 127 | /* This driver also supports |
128 | * - if I knew the device id it would also support the ATEN UC2322 */ | 128 | * ATEN UC2324 device using Moschip MCS7840 |
129 | * ATEN UC2322 device using Moschip MCS7820 | ||
130 | */ | ||
129 | #define USB_VENDOR_ID_ATENINTL 0x0557 | 131 | #define USB_VENDOR_ID_ATENINTL 0x0557 |
130 | #define ATENINTL_DEVICE_ID_UC2324 0x2011 | 132 | #define ATENINTL_DEVICE_ID_UC2324 0x2011 |
133 | #define ATENINTL_DEVICE_ID_UC2322 0x7820 | ||
131 | 134 | ||
132 | /* Interrupt Routine Defines */ | 135 | /* Interrupt Routine Defines */ |
133 | 136 | ||
@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = { | |||
177 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 180 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
178 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 181 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
179 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 182 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
183 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | ||
180 | {} /* terminating entry */ | 184 | {} /* terminating entry */ |
181 | }; | 185 | }; |
182 | 186 | ||
@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = { | |||
186 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 190 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
187 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 191 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
188 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 192 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
193 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | ||
189 | {} /* terminating entry */ | 194 | {} /* terminating entry */ |
190 | }; | 195 | }; |
191 | 196 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 98262dd552bb..c784ddbe7b61 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file); | |||
66 | static int option_tiocmset(struct tty_struct *tty, struct file *file, | 66 | static int option_tiocmset(struct tty_struct *tty, struct file *file, |
67 | unsigned int set, unsigned int clear); | 67 | unsigned int set, unsigned int clear); |
68 | static int option_send_setup(struct usb_serial_port *port); | 68 | static int option_send_setup(struct usb_serial_port *port); |
69 | #ifdef CONFIG_PM | ||
69 | static int option_suspend(struct usb_serial *serial, pm_message_t message); | 70 | static int option_suspend(struct usb_serial *serial, pm_message_t message); |
70 | static int option_resume(struct usb_serial *serial); | 71 | static int option_resume(struct usb_serial *serial); |
72 | #endif | ||
71 | 73 | ||
72 | /* Vendor and product IDs */ | 74 | /* Vendor and product IDs */ |
73 | #define OPTION_VENDOR_ID 0x0AF0 | 75 | #define OPTION_VENDOR_ID 0x0AF0 |
@@ -205,6 +207,7 @@ static int option_resume(struct usb_serial *serial); | |||
205 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 | 207 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 |
206 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 | 208 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 |
207 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | 209 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 |
210 | #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 | ||
208 | #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 | 211 | #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 |
209 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 | 212 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 |
210 | 213 | ||
@@ -259,11 +262,6 @@ static int option_resume(struct usb_serial *serial); | |||
259 | #define AXESSTEL_VENDOR_ID 0x1726 | 262 | #define AXESSTEL_VENDOR_ID 0x1726 |
260 | #define AXESSTEL_PRODUCT_MV110H 0x1000 | 263 | #define AXESSTEL_PRODUCT_MV110H 0x1000 |
261 | 264 | ||
262 | #define ONDA_VENDOR_ID 0x19d2 | ||
263 | #define ONDA_PRODUCT_MSA501HS 0x0001 | ||
264 | #define ONDA_PRODUCT_ET502HS 0x0002 | ||
265 | #define ONDA_PRODUCT_MT503HS 0x2000 | ||
266 | |||
267 | #define BANDRICH_VENDOR_ID 0x1A8D | 265 | #define BANDRICH_VENDOR_ID 0x1A8D |
268 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 266 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
269 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 267 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
@@ -301,6 +299,7 @@ static int option_resume(struct usb_serial *serial); | |||
301 | #define ZTE_PRODUCT_MF628 0x0015 | 299 | #define ZTE_PRODUCT_MF628 0x0015 |
302 | #define ZTE_PRODUCT_MF626 0x0031 | 300 | #define ZTE_PRODUCT_MF626 0x0031 |
303 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe | 301 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe |
302 | #define ZTE_PRODUCT_AC8710 0xfff1 | ||
304 | 303 | ||
305 | #define BENQ_VENDOR_ID 0x04a5 | 304 | #define BENQ_VENDOR_ID 0x04a5 |
306 | #define BENQ_PRODUCT_H10 0x4068 | 305 | #define BENQ_PRODUCT_H10 0x4068 |
@@ -322,6 +321,11 @@ static int option_resume(struct usb_serial *serial); | |||
322 | #define ALINK_VENDOR_ID 0x1e0e | 321 | #define ALINK_VENDOR_ID 0x1e0e |
323 | #define ALINK_PRODUCT_3GU 0x9200 | 322 | #define ALINK_PRODUCT_3GU 0x9200 |
324 | 323 | ||
324 | /* ALCATEL PRODUCTS */ | ||
325 | #define ALCATEL_VENDOR_ID 0x1bbb | ||
326 | #define ALCATEL_PRODUCT_X060S 0x0000 | ||
327 | |||
328 | |||
325 | static struct usb_device_id option_ids[] = { | 329 | static struct usb_device_id option_ids[] = { |
326 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 330 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
327 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 331 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = { | |||
438 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ | 442 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ |
439 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ | 443 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ |
440 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ | 444 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ |
445 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ | ||
441 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ | 446 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ |
442 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ | 447 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ |
443 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ | 448 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ |
@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = { | |||
474 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 479 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
475 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, | 480 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, |
476 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, | 481 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, |
477 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, | ||
478 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, | ||
479 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) }, | ||
480 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) }, | ||
481 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) }, | ||
482 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) }, | ||
483 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) }, | ||
484 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) }, | ||
485 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) }, | ||
486 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) }, | ||
487 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) }, | ||
488 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) }, | ||
489 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) }, | ||
490 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) }, | ||
491 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) }, | ||
492 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) }, | ||
493 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) }, | ||
494 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) }, | ||
495 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) }, | ||
496 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) }, | ||
497 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) }, | ||
498 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) }, | ||
499 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) }, | ||
500 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) }, | ||
501 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) }, | ||
502 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) }, | ||
503 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) }, | ||
504 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) }, | ||
505 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) }, | ||
506 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) }, | ||
507 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) }, | ||
508 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) }, | ||
509 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) }, | ||
510 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) }, | ||
511 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) }, | ||
512 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) }, | ||
513 | { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, | 482 | { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, |
514 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 483 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
515 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 484 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = { | |||
534 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 503 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
535 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ | 504 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
536 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, | 505 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
537 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, | 506 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
538 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, | 507 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) }, |
539 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, | 508 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, |
540 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, | 509 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, |
510 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, | ||
511 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, | ||
512 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) }, | ||
513 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, | ||
514 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, | ||
515 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, | ||
516 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, | ||
517 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, | ||
518 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, | ||
519 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, | ||
520 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, | ||
521 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, | ||
522 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, | ||
523 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, | ||
524 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, | ||
525 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, | ||
526 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, | ||
527 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, | ||
528 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, | ||
529 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, | ||
530 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, | ||
531 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) }, | ||
532 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, | ||
533 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, | ||
534 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, | ||
535 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, | ||
536 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, | ||
537 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, | ||
538 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, | ||
539 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, | ||
540 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, | ||
541 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, | ||
542 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, | ||
543 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, | ||
544 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, | ||
545 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, | ||
546 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, | ||
547 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, | ||
548 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, | ||
549 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, | ||
550 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, | ||
551 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, | ||
552 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, | ||
553 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, | ||
554 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, | ||
555 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, | ||
556 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, | ||
557 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, | ||
558 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, | ||
559 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, | ||
560 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, | ||
561 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, | ||
562 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, | ||
563 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, | ||
564 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, | ||
565 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, | ||
566 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, | ||
567 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ | ||
568 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, | ||
569 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, | ||
570 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, | ||
571 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, | ||
572 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, | ||
573 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, | ||
574 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, | ||
541 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, | 575 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, |
542 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, | 576 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, |
543 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, | 577 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, |
@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = { | |||
547 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ | 581 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ |
548 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, | 582 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, |
549 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, | 583 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, |
584 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, | ||
550 | { } /* Terminating entry */ | 585 | { } /* Terminating entry */ |
551 | }; | 586 | }; |
552 | MODULE_DEVICE_TABLE(usb, option_ids); | 587 | MODULE_DEVICE_TABLE(usb, option_ids); |
@@ -555,8 +590,10 @@ static struct usb_driver option_driver = { | |||
555 | .name = "option", | 590 | .name = "option", |
556 | .probe = usb_serial_probe, | 591 | .probe = usb_serial_probe, |
557 | .disconnect = usb_serial_disconnect, | 592 | .disconnect = usb_serial_disconnect, |
593 | #ifdef CONFIG_PM | ||
558 | .suspend = usb_serial_suspend, | 594 | .suspend = usb_serial_suspend, |
559 | .resume = usb_serial_resume, | 595 | .resume = usb_serial_resume, |
596 | #endif | ||
560 | .id_table = option_ids, | 597 | .id_table = option_ids, |
561 | .no_dynamic_id = 1, | 598 | .no_dynamic_id = 1, |
562 | }; | 599 | }; |
@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = { | |||
588 | .disconnect = option_disconnect, | 625 | .disconnect = option_disconnect, |
589 | .release = option_release, | 626 | .release = option_release, |
590 | .read_int_callback = option_instat_callback, | 627 | .read_int_callback = option_instat_callback, |
628 | #ifdef CONFIG_PM | ||
591 | .suspend = option_suspend, | 629 | .suspend = option_suspend, |
592 | .resume = option_resume, | 630 | .resume = option_resume, |
631 | #endif | ||
593 | }; | 632 | }; |
594 | 633 | ||
595 | static int debug; | 634 | static int debug; |
@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb) | |||
831 | int status = urb->status; | 870 | int status = urb->status; |
832 | struct usb_serial_port *port = urb->context; | 871 | struct usb_serial_port *port = urb->context; |
833 | struct option_port_private *portdata = usb_get_serial_port_data(port); | 872 | struct option_port_private *portdata = usb_get_serial_port_data(port); |
834 | struct usb_serial *serial = port->serial; | ||
835 | 873 | ||
836 | dbg("%s", __func__); | 874 | dbg("%s", __func__); |
837 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); | 875 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); |
@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty, | |||
927 | struct usb_serial_port *port, struct file *filp) | 965 | struct usb_serial_port *port, struct file *filp) |
928 | { | 966 | { |
929 | struct option_port_private *portdata; | 967 | struct option_port_private *portdata; |
930 | struct usb_serial *serial = port->serial; | ||
931 | int i, err; | 968 | int i, err; |
932 | struct urb *urb; | 969 | struct urb *urb; |
933 | 970 | ||
@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial) | |||
1187 | } | 1224 | } |
1188 | } | 1225 | } |
1189 | 1226 | ||
1227 | #ifdef CONFIG_PM | ||
1190 | static int option_suspend(struct usb_serial *serial, pm_message_t message) | 1228 | static int option_suspend(struct usb_serial *serial, pm_message_t message) |
1191 | { | 1229 | { |
1192 | dbg("%s entered", __func__); | 1230 | dbg("%s entered", __func__); |
@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial) | |||
1245 | } | 1283 | } |
1246 | return 0; | 1284 | return 0; |
1247 | } | 1285 | } |
1286 | #endif | ||
1248 | 1287 | ||
1249 | MODULE_AUTHOR(DRIVER_AUTHOR); | 1288 | MODULE_AUTHOR(DRIVER_AUTHOR); |
1250 | MODULE_DESCRIPTION(DRIVER_DESC); | 1289 | MODULE_DESCRIPTION(DRIVER_DESC); |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index bd7581b3a48a..99188c92068b 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
35 | #include <linux/serial.h> | ||
35 | #include <linux/usb.h> | 36 | #include <linux/usb.h> |
36 | #include <linux/usb/serial.h> | 37 | #include <linux/usb/serial.h> |
37 | #include "pl2303.h" | 38 | #include "pl2303.h" |
@@ -184,6 +185,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | |||
184 | struct usb_serial_port *port; | 185 | struct usb_serial_port *port; |
185 | unsigned int portNumber; | 186 | unsigned int portNumber; |
186 | int retval = 0; | 187 | int retval = 0; |
188 | int first = 0; | ||
187 | 189 | ||
188 | dbg("%s", __func__); | 190 | dbg("%s", __func__); |
189 | 191 | ||
@@ -223,7 +225,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | |||
223 | 225 | ||
224 | /* If the console is attached, the device is already open */ | 226 | /* If the console is attached, the device is already open */ |
225 | if (port->port.count == 1 && !port->console) { | 227 | if (port->port.count == 1 && !port->console) { |
226 | 228 | first = 1; | |
227 | /* lock this module before we call it | 229 | /* lock this module before we call it |
228 | * this may fail, which means we must bail out, | 230 | * this may fail, which means we must bail out, |
229 | * safe because we are called with BKL held */ | 231 | * safe because we are called with BKL held */ |
@@ -246,13 +248,21 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | |||
246 | if (retval) | 248 | if (retval) |
247 | goto bailout_interface_put; | 249 | goto bailout_interface_put; |
248 | mutex_unlock(&serial->disc_mutex); | 250 | mutex_unlock(&serial->disc_mutex); |
251 | set_bit(ASYNCB_INITIALIZED, &port->port.flags); | ||
249 | } | 252 | } |
250 | mutex_unlock(&port->mutex); | 253 | mutex_unlock(&port->mutex); |
251 | /* Now do the correct tty layer semantics */ | 254 | /* Now do the correct tty layer semantics */ |
252 | retval = tty_port_block_til_ready(&port->port, tty, filp); | 255 | retval = tty_port_block_til_ready(&port->port, tty, filp); |
253 | if (retval == 0) | 256 | if (retval == 0) { |
257 | if (!first) | ||
258 | usb_serial_put(serial); | ||
254 | return 0; | 259 | return 0; |
255 | 260 | } | |
261 | mutex_lock(&port->mutex); | ||
262 | if (first == 0) | ||
263 | goto bailout_mutex_unlock; | ||
264 | /* Undo the initial port actions */ | ||
265 | mutex_lock(&serial->disc_mutex); | ||
256 | bailout_interface_put: | 266 | bailout_interface_put: |
257 | usb_autopm_put_interface(serial->interface); | 267 | usb_autopm_put_interface(serial->interface); |
258 | bailout_module_put: | 268 | bailout_module_put: |
@@ -340,6 +350,22 @@ static void serial_close(struct tty_struct *tty, struct file *filp) | |||
340 | 350 | ||
341 | dbg("%s - port %d", __func__, port->number); | 351 | dbg("%s - port %d", __func__, port->number); |
342 | 352 | ||
353 | /* FIXME: | ||
354 | This leaves a very narrow race. Really we should do the | ||
355 | serial_do_free() on tty->shutdown(), but tty->shutdown can | ||
356 | be called from IRQ context and serial_do_free can sleep. | ||
357 | |||
358 | The right fix is probably to make the tty free (which is rare) | ||
359 | and thus tty->shutdown() occur via a work queue and simplify all | ||
360 | the drivers that use it. | ||
361 | */ | ||
362 | if (tty_hung_up_p(filp)) { | ||
363 | /* serial_hangup already called serial_down at this point. | ||
364 | Another user may have already reopened the port but | ||
365 | serial_do_free is refcounted */ | ||
366 | serial_do_free(port); | ||
367 | return; | ||
368 | } | ||
343 | 369 | ||
344 | if (tty_port_close_start(&port->port, tty, filp) == 0) | 370 | if (tty_port_close_start(&port->port, tty, filp) == 0) |
345 | return; | 371 | return; |
@@ -355,7 +381,8 @@ static void serial_hangup(struct tty_struct *tty) | |||
355 | struct usb_serial_port *port = tty->driver_data; | 381 | struct usb_serial_port *port = tty->driver_data; |
356 | serial_do_down(port); | 382 | serial_do_down(port); |
357 | tty_port_hangup(&port->port); | 383 | tty_port_hangup(&port->port); |
358 | serial_do_free(port); | 384 | /* We must not free port yet - the USB serial layer depends on it's |
385 | continued existence */ | ||
359 | } | 386 | } |
360 | 387 | ||
361 | static int serial_write(struct tty_struct *tty, const unsigned char *buf, | 388 | static int serial_write(struct tty_struct *tty, const unsigned char *buf, |
@@ -394,7 +421,6 @@ static int serial_chars_in_buffer(struct tty_struct *tty) | |||
394 | struct usb_serial_port *port = tty->driver_data; | 421 | struct usb_serial_port *port = tty->driver_data; |
395 | dbg("%s = port %d", __func__, port->number); | 422 | dbg("%s = port %d", __func__, port->number); |
396 | 423 | ||
397 | WARN_ON(!port->port.count); | ||
398 | /* if the device was unplugged then any remaining characters | 424 | /* if the device was unplugged then any remaining characters |
399 | fell out of the connector ;) */ | 425 | fell out of the connector ;) */ |
400 | if (port->serial->disconnected) | 426 | if (port->serial->disconnected) |
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index fcb320217218..e20dc525d177 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us) | |||
961 | US_BULK_GET_MAX_LUN, | 961 | US_BULK_GET_MAX_LUN, |
962 | USB_DIR_IN | USB_TYPE_CLASS | | 962 | USB_DIR_IN | USB_TYPE_CLASS | |
963 | USB_RECIP_INTERFACE, | 963 | USB_RECIP_INTERFACE, |
964 | 0, us->ifnum, us->iobuf, 1, HZ); | 964 | 0, us->ifnum, us->iobuf, 1, 10*HZ); |
965 | 965 | ||
966 | US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", | 966 | US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", |
967 | result, us->iobuf[0]); | 967 | result, us->iobuf[0]); |