diff options
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 379 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 9 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 399 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 84 |
4 files changed, 857 insertions, 14 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index d64f5724bfc4..d299ffad806b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -304,6 +304,350 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | |||
304 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); | 304 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); |
305 | } | 305 | } |
306 | 306 | ||
307 | |||
308 | /***************** Streams structures manipulation *************************/ | ||
309 | |||
310 | void xhci_free_stream_ctx(struct xhci_hcd *xhci, | ||
311 | unsigned int num_stream_ctxs, | ||
312 | struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) | ||
313 | { | ||
314 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
315 | |||
316 | if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) | ||
317 | pci_free_consistent(pdev, | ||
318 | sizeof(struct xhci_stream_ctx)*num_stream_ctxs, | ||
319 | stream_ctx, dma); | ||
320 | else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) | ||
321 | return dma_pool_free(xhci->small_streams_pool, | ||
322 | stream_ctx, dma); | ||
323 | else | ||
324 | return dma_pool_free(xhci->medium_streams_pool, | ||
325 | stream_ctx, dma); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * The stream context array for each endpoint with bulk streams enabled can | ||
330 | * vary in size, based on: | ||
331 | * - how many streams the endpoint supports, | ||
332 | * - the maximum primary stream array size the host controller supports, | ||
333 | * - and how many streams the device driver asks for. | ||
334 | * | ||
335 | * The stream context array must be a power of 2, and can be as small as | ||
336 | * 64 bytes or as large as 1MB. | ||
337 | */ | ||
338 | struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, | ||
339 | unsigned int num_stream_ctxs, dma_addr_t *dma, | ||
340 | gfp_t mem_flags) | ||
341 | { | ||
342 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
343 | |||
344 | if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) | ||
345 | return pci_alloc_consistent(pdev, | ||
346 | sizeof(struct xhci_stream_ctx)*num_stream_ctxs, | ||
347 | dma); | ||
348 | else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) | ||
349 | return dma_pool_alloc(xhci->small_streams_pool, | ||
350 | mem_flags, dma); | ||
351 | else | ||
352 | return dma_pool_alloc(xhci->medium_streams_pool, | ||
353 | mem_flags, dma); | ||
354 | } | ||
355 | |||
356 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | ||
357 | struct xhci_ring *dma_to_stream_ring( | ||
358 | struct xhci_stream_info *stream_info, | ||
359 | u64 address) | ||
360 | { | ||
361 | return radix_tree_lookup(&stream_info->trb_address_map, | ||
362 | address >> SEGMENT_SHIFT); | ||
363 | } | ||
364 | #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ | ||
365 | |||
366 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | ||
367 | static int xhci_test_radix_tree(struct xhci_hcd *xhci, | ||
368 | unsigned int num_streams, | ||
369 | struct xhci_stream_info *stream_info) | ||
370 | { | ||
371 | u32 cur_stream; | ||
372 | struct xhci_ring *cur_ring; | ||
373 | u64 addr; | ||
374 | |||
375 | for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { | ||
376 | struct xhci_ring *mapped_ring; | ||
377 | int trb_size = sizeof(union xhci_trb); | ||
378 | |||
379 | cur_ring = stream_info->stream_rings[cur_stream]; | ||
380 | for (addr = cur_ring->first_seg->dma; | ||
381 | addr < cur_ring->first_seg->dma + SEGMENT_SIZE; | ||
382 | addr += trb_size) { | ||
383 | mapped_ring = dma_to_stream_ring(stream_info, addr); | ||
384 | if (cur_ring != mapped_ring) { | ||
385 | xhci_warn(xhci, "WARN: DMA address 0x%08llx " | ||
386 | "didn't map to stream ID %u; " | ||
387 | "mapped to ring %p\n", | ||
388 | (unsigned long long) addr, | ||
389 | cur_stream, | ||
390 | mapped_ring); | ||
391 | return -EINVAL; | ||
392 | } | ||
393 | } | ||
394 | /* One TRB after the end of the ring segment shouldn't return a | ||
395 | * pointer to the current ring (although it may be a part of a | ||
396 | * different ring). | ||
397 | */ | ||
398 | mapped_ring = dma_to_stream_ring(stream_info, addr); | ||
399 | if (mapped_ring != cur_ring) { | ||
400 | /* One TRB before should also fail */ | ||
401 | addr = cur_ring->first_seg->dma - trb_size; | ||
402 | mapped_ring = dma_to_stream_ring(stream_info, addr); | ||
403 | } | ||
404 | if (mapped_ring == cur_ring) { | ||
405 | xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx " | ||
406 | "mapped to valid stream ID %u; " | ||
407 | "mapped ring = %p\n", | ||
408 | (unsigned long long) addr, | ||
409 | cur_stream, | ||
410 | mapped_ring); | ||
411 | return -EINVAL; | ||
412 | } | ||
413 | } | ||
414 | return 0; | ||
415 | } | ||
416 | #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ | ||
417 | |||
418 | /* | ||
419 | * Change an endpoint's internal structure so it supports stream IDs. The | ||
420 | * number of requested streams includes stream 0, which cannot be used by device | ||
421 | * drivers. | ||
422 | * | ||
423 | * The number of stream contexts in the stream context array may be bigger than | ||
424 | * the number of streams the driver wants to use. This is because the number of | ||
425 | * stream context array entries must be a power of two. | ||
426 | * | ||
427 | * We need a radix tree for mapping physical addresses of TRBs to which stream | ||
428 | * ID they belong to. We need to do this because the host controller won't tell | ||
429 | * us which stream ring the TRB came from. We could store the stream ID in an | ||
430 | * event data TRB, but that doesn't help us for the cancellation case, since the | ||
431 | * endpoint may stop before it reaches that event data TRB. | ||
432 | * | ||
433 | * The radix tree maps the upper portion of the TRB DMA address to a ring | ||
434 | * segment that has the same upper portion of DMA addresses. For example, say I | ||
435 | * have segments of size 1KB, that are always 64-byte aligned. A segment may | ||
436 | * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the | ||
437 | * key to the stream ID is 0x43244. I can use the DMA address of the TRB to | ||
438 | * pass the radix tree a key to get the right stream ID: | ||
439 | * | ||
440 | * 0x10c90fff >> 10 = 0x43243 | ||
441 | * 0x10c912c0 >> 10 = 0x43244 | ||
442 | * 0x10c91400 >> 10 = 0x43245 | ||
443 | * | ||
444 | * Obviously, only those TRBs with DMA addresses that are within the segment | ||
445 | * will make the radix tree return the stream ID for that ring. | ||
446 | * | ||
447 | * Caveats for the radix tree: | ||
448 | * | ||
449 | * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an | ||
450 | * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be | ||
451 | * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the | ||
452 | * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit | ||
453 | * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit | ||
454 | * extended systems (where the DMA address can be bigger than 32-bits), | ||
455 | * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. | ||
456 | */ | ||
457 | struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, | ||
458 | unsigned int num_stream_ctxs, | ||
459 | unsigned int num_streams, gfp_t mem_flags) | ||
460 | { | ||
461 | struct xhci_stream_info *stream_info; | ||
462 | u32 cur_stream; | ||
463 | struct xhci_ring *cur_ring; | ||
464 | unsigned long key; | ||
465 | u64 addr; | ||
466 | int ret; | ||
467 | |||
468 | xhci_dbg(xhci, "Allocating %u streams and %u " | ||
469 | "stream context array entries.\n", | ||
470 | num_streams, num_stream_ctxs); | ||
471 | if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { | ||
472 | xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); | ||
473 | return NULL; | ||
474 | } | ||
475 | xhci->cmd_ring_reserved_trbs++; | ||
476 | |||
477 | stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags); | ||
478 | if (!stream_info) | ||
479 | goto cleanup_trbs; | ||
480 | |||
481 | stream_info->num_streams = num_streams; | ||
482 | stream_info->num_stream_ctxs = num_stream_ctxs; | ||
483 | |||
484 | /* Initialize the array of virtual pointers to stream rings. */ | ||
485 | stream_info->stream_rings = kzalloc( | ||
486 | sizeof(struct xhci_ring *)*num_streams, | ||
487 | mem_flags); | ||
488 | if (!stream_info->stream_rings) | ||
489 | goto cleanup_info; | ||
490 | |||
491 | /* Initialize the array of DMA addresses for stream rings for the HW. */ | ||
492 | stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, | ||
493 | num_stream_ctxs, &stream_info->ctx_array_dma, | ||
494 | mem_flags); | ||
495 | if (!stream_info->stream_ctx_array) | ||
496 | goto cleanup_ctx; | ||
497 | memset(stream_info->stream_ctx_array, 0, | ||
498 | sizeof(struct xhci_stream_ctx)*num_stream_ctxs); | ||
499 | |||
500 | /* Allocate everything needed to free the stream rings later */ | ||
501 | stream_info->free_streams_command = | ||
502 | xhci_alloc_command(xhci, true, true, mem_flags); | ||
503 | if (!stream_info->free_streams_command) | ||
504 | goto cleanup_ctx; | ||
505 | |||
506 | INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); | ||
507 | |||
508 | /* Allocate rings for all the streams that the driver will use, | ||
509 | * and add their segment DMA addresses to the radix tree. | ||
510 | * Stream 0 is reserved. | ||
511 | */ | ||
512 | for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { | ||
513 | stream_info->stream_rings[cur_stream] = | ||
514 | xhci_ring_alloc(xhci, 1, true, mem_flags); | ||
515 | cur_ring = stream_info->stream_rings[cur_stream]; | ||
516 | if (!cur_ring) | ||
517 | goto cleanup_rings; | ||
518 | /* Set deq ptr, cycle bit, and stream context type */ | ||
519 | addr = cur_ring->first_seg->dma | | ||
520 | SCT_FOR_CTX(SCT_PRI_TR) | | ||
521 | cur_ring->cycle_state; | ||
522 | stream_info->stream_ctx_array[cur_stream].stream_ring = addr; | ||
523 | xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", | ||
524 | cur_stream, (unsigned long long) addr); | ||
525 | |||
526 | key = (unsigned long) | ||
527 | (cur_ring->first_seg->dma >> SEGMENT_SHIFT); | ||
528 | ret = radix_tree_insert(&stream_info->trb_address_map, | ||
529 | key, cur_ring); | ||
530 | if (ret) { | ||
531 | xhci_ring_free(xhci, cur_ring); | ||
532 | stream_info->stream_rings[cur_stream] = NULL; | ||
533 | goto cleanup_rings; | ||
534 | } | ||
535 | } | ||
536 | /* Leave the other unused stream ring pointers in the stream context | ||
537 | * array initialized to zero. This will cause the xHC to give us an | ||
538 | * error if the device asks for a stream ID we don't have setup (if it | ||
539 | * was any other way, the host controller would assume the ring is | ||
540 | * "empty" and wait forever for data to be queued to that stream ID). | ||
541 | */ | ||
542 | #if XHCI_DEBUG | ||
543 | /* Do a little test on the radix tree to make sure it returns the | ||
544 | * correct values. | ||
545 | */ | ||
546 | if (xhci_test_radix_tree(xhci, num_streams, stream_info)) | ||
547 | goto cleanup_rings; | ||
548 | #endif | ||
549 | |||
550 | return stream_info; | ||
551 | |||
552 | cleanup_rings: | ||
553 | for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { | ||
554 | cur_ring = stream_info->stream_rings[cur_stream]; | ||
555 | if (cur_ring) { | ||
556 | addr = cur_ring->first_seg->dma; | ||
557 | radix_tree_delete(&stream_info->trb_address_map, | ||
558 | addr >> SEGMENT_SHIFT); | ||
559 | xhci_ring_free(xhci, cur_ring); | ||
560 | stream_info->stream_rings[cur_stream] = NULL; | ||
561 | } | ||
562 | } | ||
563 | xhci_free_command(xhci, stream_info->free_streams_command); | ||
564 | cleanup_ctx: | ||
565 | kfree(stream_info->stream_rings); | ||
566 | cleanup_info: | ||
567 | kfree(stream_info); | ||
568 | cleanup_trbs: | ||
569 | xhci->cmd_ring_reserved_trbs--; | ||
570 | return NULL; | ||
571 | } | ||
572 | /* | ||
573 | * Sets the MaxPStreams field and the Linear Stream Array field. | ||
574 | * Sets the dequeue pointer to the stream context array. | ||
575 | */ | ||
576 | void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, | ||
577 | struct xhci_ep_ctx *ep_ctx, | ||
578 | struct xhci_stream_info *stream_info) | ||
579 | { | ||
580 | u32 max_primary_streams; | ||
581 | /* MaxPStreams is the number of stream context array entries, not the | ||
582 | * number we're actually using. Must be in 2^(MaxPstreams + 1) format. | ||
583 | * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. | ||
584 | */ | ||
585 | max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; | ||
586 | xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", | ||
587 | 1 << (max_primary_streams + 1)); | ||
588 | ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; | ||
589 | ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams); | ||
590 | ep_ctx->ep_info |= EP_HAS_LSA; | ||
591 | ep_ctx->deq = stream_info->ctx_array_dma; | ||
592 | } | ||
593 | |||
594 | /* | ||
595 | * Sets the MaxPStreams field and the Linear Stream Array field to 0. | ||
596 | * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, | ||
597 | * not at the beginning of the ring). | ||
598 | */ | ||
599 | void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, | ||
600 | struct xhci_ep_ctx *ep_ctx, | ||
601 | struct xhci_virt_ep *ep) | ||
602 | { | ||
603 | dma_addr_t addr; | ||
604 | ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; | ||
605 | ep_ctx->ep_info &= ~EP_HAS_LSA; | ||
606 | addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); | ||
607 | ep_ctx->deq = addr | ep->ring->cycle_state; | ||
608 | } | ||
609 | |||
610 | /* Frees all stream contexts associated with the endpoint, | ||
611 | * | ||
612 | * Caller should fix the endpoint context streams fields. | ||
613 | */ | ||
614 | void xhci_free_stream_info(struct xhci_hcd *xhci, | ||
615 | struct xhci_stream_info *stream_info) | ||
616 | { | ||
617 | int cur_stream; | ||
618 | struct xhci_ring *cur_ring; | ||
619 | dma_addr_t addr; | ||
620 | |||
621 | if (!stream_info) | ||
622 | return; | ||
623 | |||
624 | for (cur_stream = 1; cur_stream < stream_info->num_streams; | ||
625 | cur_stream++) { | ||
626 | cur_ring = stream_info->stream_rings[cur_stream]; | ||
627 | if (cur_ring) { | ||
628 | addr = cur_ring->first_seg->dma; | ||
629 | radix_tree_delete(&stream_info->trb_address_map, | ||
630 | addr >> SEGMENT_SHIFT); | ||
631 | xhci_ring_free(xhci, cur_ring); | ||
632 | stream_info->stream_rings[cur_stream] = NULL; | ||
633 | } | ||
634 | } | ||
635 | xhci_free_command(xhci, stream_info->free_streams_command); | ||
636 | xhci->cmd_ring_reserved_trbs--; | ||
637 | if (stream_info->stream_ctx_array) | ||
638 | xhci_free_stream_ctx(xhci, | ||
639 | stream_info->num_stream_ctxs, | ||
640 | stream_info->stream_ctx_array, | ||
641 | stream_info->ctx_array_dma); | ||
642 | |||
643 | if (stream_info) | ||
644 | kfree(stream_info->stream_rings); | ||
645 | kfree(stream_info); | ||
646 | } | ||
647 | |||
648 | |||
649 | /***************** Device context manipulation *************************/ | ||
650 | |||
307 | static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, | 651 | static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, |
308 | struct xhci_virt_ep *ep) | 652 | struct xhci_virt_ep *ep) |
309 | { | 653 | { |
@@ -328,9 +672,13 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
328 | if (!dev) | 672 | if (!dev) |
329 | return; | 673 | return; |
330 | 674 | ||
331 | for (i = 0; i < 31; ++i) | 675 | for (i = 0; i < 31; ++i) { |
332 | if (dev->eps[i].ring) | 676 | if (dev->eps[i].ring) |
333 | xhci_ring_free(xhci, dev->eps[i].ring); | 677 | xhci_ring_free(xhci, dev->eps[i].ring); |
678 | if (dev->eps[i].stream_info) | ||
679 | xhci_free_stream_info(xhci, | ||
680 | dev->eps[i].stream_info); | ||
681 | } | ||
334 | 682 | ||
335 | if (dev->ring_cache) { | 683 | if (dev->ring_cache) { |
336 | for (i = 0; i < dev->num_rings_cached; i++) | 684 | for (i = 0; i < dev->num_rings_cached; i++) |
@@ -655,6 +1003,9 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, | |||
655 | return max_packet * (max_burst + 1); | 1003 | return max_packet * (max_burst + 1); |
656 | } | 1004 | } |
657 | 1005 | ||
1006 | /* Set up an endpoint with one ring segment. Do not allocate stream rings. | ||
1007 | * Drivers will have to call usb_alloc_streams() to do that. | ||
1008 | */ | ||
658 | int xhci_endpoint_init(struct xhci_hcd *xhci, | 1009 | int xhci_endpoint_init(struct xhci_hcd *xhci, |
659 | struct xhci_virt_device *virt_dev, | 1010 | struct xhci_virt_device *virt_dev, |
660 | struct usb_device *udev, | 1011 | struct usb_device *udev, |
@@ -1003,6 +1354,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1003 | xhci->device_pool = NULL; | 1354 | xhci->device_pool = NULL; |
1004 | xhci_dbg(xhci, "Freed device context pool\n"); | 1355 | xhci_dbg(xhci, "Freed device context pool\n"); |
1005 | 1356 | ||
1357 | if (xhci->small_streams_pool) | ||
1358 | dma_pool_destroy(xhci->small_streams_pool); | ||
1359 | xhci->small_streams_pool = NULL; | ||
1360 | xhci_dbg(xhci, "Freed small stream array pool\n"); | ||
1361 | |||
1362 | if (xhci->medium_streams_pool) | ||
1363 | dma_pool_destroy(xhci->medium_streams_pool); | ||
1364 | xhci->medium_streams_pool = NULL; | ||
1365 | xhci_dbg(xhci, "Freed medium stream array pool\n"); | ||
1366 | |||
1006 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); | 1367 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); |
1007 | if (xhci->dcbaa) | 1368 | if (xhci->dcbaa) |
1008 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | 1369 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), |
@@ -1239,6 +1600,22 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
1239 | if (!xhci->segment_pool || !xhci->device_pool) | 1600 | if (!xhci->segment_pool || !xhci->device_pool) |
1240 | goto fail; | 1601 | goto fail; |
1241 | 1602 | ||
1603 | /* Linear stream context arrays don't have any boundary restrictions, | ||
1604 | * and only need to be 16-byte aligned. | ||
1605 | */ | ||
1606 | xhci->small_streams_pool = | ||
1607 | dma_pool_create("xHCI 256 byte stream ctx arrays", | ||
1608 | dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); | ||
1609 | xhci->medium_streams_pool = | ||
1610 | dma_pool_create("xHCI 1KB stream ctx arrays", | ||
1611 | dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); | ||
1612 | /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE | ||
1613 | * will be allocated with pci_alloc_consistent() | ||
1614 | */ | ||
1615 | |||
1616 | if (!xhci->small_streams_pool || !xhci->medium_streams_pool) | ||
1617 | goto fail; | ||
1618 | |||
1242 | /* Set up the command ring to have one segments for now. */ | 1619 | /* Set up the command ring to have one segments for now. */ |
1243 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); | 1620 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); |
1244 | if (!xhci->cmd_ring) | 1621 | if (!xhci->cmd_ring) |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index c1359ed310b5..a14f657e279b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -323,6 +323,10 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
323 | ep_state = ep->ep_state; | 323 | ep_state = ep->ep_state; |
324 | /* Don't ring the doorbell for this endpoint if there are pending | 324 | /* Don't ring the doorbell for this endpoint if there are pending |
325 | * cancellations because the we don't want to interrupt processing. | 325 | * cancellations because the we don't want to interrupt processing. |
326 | * We don't want to restart any stream rings if there's a set dequeue | ||
327 | * pointer command pending because the device can choose to start any | ||
328 | * stream once the endpoint is on the HW schedule. | ||
329 | * FIXME - check all the stream rings for pending cancellations. | ||
326 | */ | 330 | */ |
327 | if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) | 331 | if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) |
328 | && !(ep_state & EP_HALTED)) { | 332 | && !(ep_state & EP_HALTED)) { |
@@ -916,8 +920,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
916 | * Configure endpoint commands can come from the USB core | 920 | * Configure endpoint commands can come from the USB core |
917 | * configuration or alt setting changes, or because the HW | 921 | * configuration or alt setting changes, or because the HW |
918 | * needed an extra configure endpoint command after a reset | 922 | * needed an extra configure endpoint command after a reset |
919 | * endpoint command. In the latter case, the xHCI driver is | 923 | * endpoint command or streams were being configured. |
920 | * not waiting on the configure endpoint command. | 924 | * If the command was for a halted endpoint, the xHCI driver |
925 | * is not waiting on the configure endpoint command. | ||
921 | */ | 926 | */ |
922 | ctrl_ctx = xhci_get_input_control_ctx(xhci, | 927 | ctrl_ctx = xhci_get_input_control_ctx(xhci, |
923 | virt_dev->in_ctx); | 928 | virt_dev->in_ctx); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 077dfcd57dc9..2e370fea9590 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -21,6 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/log2.h> | ||
24 | #include <linux/module.h> | 25 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 26 | #include <linux/moduleparam.h> |
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
@@ -726,8 +727,21 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
726 | spin_lock_irqsave(&xhci->lock, flags); | 727 | spin_lock_irqsave(&xhci->lock, flags); |
727 | if (xhci->xhc_state & XHCI_STATE_DYING) | 728 | if (xhci->xhc_state & XHCI_STATE_DYING) |
728 | goto dying; | 729 | goto dying; |
729 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | 730 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
730 | slot_id, ep_index); | 731 | EP_GETTING_STREAMS) { |
732 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | ||
733 | "is transitioning to using streams.\n"); | ||
734 | ret = -EINVAL; | ||
735 | } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & | ||
736 | EP_GETTING_NO_STREAMS) { | ||
737 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | ||
738 | "is transitioning to " | ||
739 | "not having streams.\n"); | ||
740 | ret = -EINVAL; | ||
741 | } else { | ||
742 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | ||
743 | slot_id, ep_index); | ||
744 | } | ||
731 | spin_unlock_irqrestore(&xhci->lock, flags); | 745 | spin_unlock_irqrestore(&xhci->lock, flags); |
732 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | 746 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
733 | spin_lock_irqsave(&xhci->lock, flags); | 747 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -1446,6 +1460,387 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1446 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); | 1460 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); |
1447 | } | 1461 | } |
1448 | 1462 | ||
1463 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, | ||
1464 | struct usb_device *udev, struct usb_host_endpoint *ep, | ||
1465 | unsigned int slot_id) | ||
1466 | { | ||
1467 | int ret; | ||
1468 | unsigned int ep_index; | ||
1469 | unsigned int ep_state; | ||
1470 | |||
1471 | if (!ep) | ||
1472 | return -EINVAL; | ||
1473 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__); | ||
1474 | if (ret <= 0) | ||
1475 | return -EINVAL; | ||
1476 | if (!ep->ss_ep_comp) { | ||
1477 | xhci_warn(xhci, "WARN: No SuperSpeed Endpoint Companion" | ||
1478 | " descriptor for ep 0x%x\n", | ||
1479 | ep->desc.bEndpointAddress); | ||
1480 | return -EINVAL; | ||
1481 | } | ||
1482 | if (ep->ss_ep_comp->desc.bmAttributes == 0) { | ||
1483 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" | ||
1484 | " descriptor for ep 0x%x does not support streams\n", | ||
1485 | ep->desc.bEndpointAddress); | ||
1486 | return -EINVAL; | ||
1487 | } | ||
1488 | |||
1489 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
1490 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | ||
1491 | if (ep_state & EP_HAS_STREAMS || | ||
1492 | ep_state & EP_GETTING_STREAMS) { | ||
1493 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " | ||
1494 | "already has streams set up.\n", | ||
1495 | ep->desc.bEndpointAddress); | ||
1496 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " | ||
1497 | "dynamic stream context array reallocation.\n"); | ||
1498 | return -EINVAL; | ||
1499 | } | ||
1500 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { | ||
1501 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " | ||
1502 | "endpoint 0x%x; URBs are pending.\n", | ||
1503 | ep->desc.bEndpointAddress); | ||
1504 | return -EINVAL; | ||
1505 | } | ||
1506 | return 0; | ||
1507 | } | ||
1508 | |||
1509 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, | ||
1510 | unsigned int *num_streams, unsigned int *num_stream_ctxs) | ||
1511 | { | ||
1512 | unsigned int max_streams; | ||
1513 | |||
1514 | /* The stream context array size must be a power of two */ | ||
1515 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); | ||
1516 | /* | ||
1517 | * Find out how many primary stream array entries the host controller | ||
1518 | * supports. Later we may use secondary stream arrays (similar to 2nd | ||
1519 | * level page entries), but that's an optional feature for xHCI host | ||
1520 | * controllers. xHCs must support at least 4 stream IDs. | ||
1521 | */ | ||
1522 | max_streams = HCC_MAX_PSA(xhci->hcc_params); | ||
1523 | if (*num_stream_ctxs > max_streams) { | ||
1524 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", | ||
1525 | max_streams); | ||
1526 | *num_stream_ctxs = max_streams; | ||
1527 | *num_streams = max_streams; | ||
1528 | } | ||
1529 | } | ||
1530 | |||
1531 | /* Returns an error code if one of the endpoint already has streams. | ||
1532 | * This does not change any data structures, it only checks and gathers | ||
1533 | * information. | ||
1534 | */ | ||
1535 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, | ||
1536 | struct usb_device *udev, | ||
1537 | struct usb_host_endpoint **eps, unsigned int num_eps, | ||
1538 | unsigned int *num_streams, u32 *changed_ep_bitmask) | ||
1539 | { | ||
1540 | struct usb_host_ss_ep_comp *ss_ep_comp; | ||
1541 | unsigned int max_streams; | ||
1542 | unsigned int endpoint_flag; | ||
1543 | int i; | ||
1544 | int ret; | ||
1545 | |||
1546 | for (i = 0; i < num_eps; i++) { | ||
1547 | ret = xhci_check_streams_endpoint(xhci, udev, | ||
1548 | eps[i], udev->slot_id); | ||
1549 | if (ret < 0) | ||
1550 | return ret; | ||
1551 | |||
1552 | ss_ep_comp = eps[i]->ss_ep_comp; | ||
1553 | max_streams = USB_SS_MAX_STREAMS(ss_ep_comp->desc.bmAttributes); | ||
1554 | if (max_streams < (*num_streams - 1)) { | ||
1555 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", | ||
1556 | eps[i]->desc.bEndpointAddress, | ||
1557 | max_streams); | ||
1558 | *num_streams = max_streams+1; | ||
1559 | } | ||
1560 | |||
1561 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); | ||
1562 | if (*changed_ep_bitmask & endpoint_flag) | ||
1563 | return -EINVAL; | ||
1564 | *changed_ep_bitmask |= endpoint_flag; | ||
1565 | } | ||
1566 | return 0; | ||
1567 | } | ||
1568 | |||
1569 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, | ||
1570 | struct usb_device *udev, | ||
1571 | struct usb_host_endpoint **eps, unsigned int num_eps) | ||
1572 | { | ||
1573 | u32 changed_ep_bitmask = 0; | ||
1574 | unsigned int slot_id; | ||
1575 | unsigned int ep_index; | ||
1576 | unsigned int ep_state; | ||
1577 | int i; | ||
1578 | |||
1579 | slot_id = udev->slot_id; | ||
1580 | if (!xhci->devs[slot_id]) | ||
1581 | return 0; | ||
1582 | |||
1583 | for (i = 0; i < num_eps; i++) { | ||
1584 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1585 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | ||
1586 | /* Are streams already being freed for the endpoint? */ | ||
1587 | if (ep_state & EP_GETTING_NO_STREAMS) { | ||
1588 | xhci_warn(xhci, "WARN Can't disable streams for " | ||
1589 | "endpoint 0x%x\n, " | ||
1590 | "streams are being disabled already.", | ||
1591 | eps[i]->desc.bEndpointAddress); | ||
1592 | return 0; | ||
1593 | } | ||
1594 | /* Are there actually any streams to free? */ | ||
1595 | if (!(ep_state & EP_HAS_STREAMS) && | ||
1596 | !(ep_state & EP_GETTING_STREAMS)) { | ||
1597 | xhci_warn(xhci, "WARN Can't disable streams for " | ||
1598 | "endpoint 0x%x\n, " | ||
1599 | "streams are already disabled!", | ||
1600 | eps[i]->desc.bEndpointAddress); | ||
1601 | xhci_warn(xhci, "WARN xhci_free_streams() called " | ||
1602 | "with non-streams endpoint\n"); | ||
1603 | return 0; | ||
1604 | } | ||
1605 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); | ||
1606 | } | ||
1607 | return changed_ep_bitmask; | ||
1608 | } | ||
1609 | |||
1610 | /* | ||
1611 | * The USB device drivers use this function (though the HCD interface in USB | ||
1612 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to | ||
1613 | * coordinate mass storage command queueing across multiple endpoints (basically | ||
1614 | * a stream ID == a task ID). | ||
1615 | * | ||
1616 | * Setting up streams involves allocating the same size stream context array | ||
1617 | * for each endpoint and issuing a configure endpoint command for all endpoints. | ||
1618 | * | ||
1619 | * Don't allow the call to succeed if one endpoint only supports one stream | ||
1620 | * (which means it doesn't support streams at all). | ||
1621 | * | ||
1622 | * Drivers may get less stream IDs than they asked for, if the host controller | ||
1623 | * hardware or endpoints claim they can't support the number of requested | ||
1624 | * stream IDs. | ||
1625 | */ | ||
1626 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, | ||
1627 | struct usb_host_endpoint **eps, unsigned int num_eps, | ||
1628 | unsigned int num_streams, gfp_t mem_flags) | ||
1629 | { | ||
1630 | int i, ret; | ||
1631 | struct xhci_hcd *xhci; | ||
1632 | struct xhci_virt_device *vdev; | ||
1633 | struct xhci_command *config_cmd; | ||
1634 | unsigned int ep_index; | ||
1635 | unsigned int num_stream_ctxs; | ||
1636 | unsigned long flags; | ||
1637 | u32 changed_ep_bitmask = 0; | ||
1638 | |||
1639 | if (!eps) | ||
1640 | return -EINVAL; | ||
1641 | |||
1642 | /* Add one to the number of streams requested to account for | ||
1643 | * stream 0 that is reserved for xHCI usage. | ||
1644 | */ | ||
1645 | num_streams += 1; | ||
1646 | xhci = hcd_to_xhci(hcd); | ||
1647 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", | ||
1648 | num_streams); | ||
1649 | |||
1650 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); | ||
1651 | if (!config_cmd) { | ||
1652 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | ||
1653 | return -ENOMEM; | ||
1654 | } | ||
1655 | |||
1656 | /* Check to make sure all endpoints are not already configured for | ||
1657 | * streams. While we're at it, find the maximum number of streams that | ||
1658 | * all the endpoints will support and check for duplicate endpoints. | ||
1659 | */ | ||
1660 | spin_lock_irqsave(&xhci->lock, flags); | ||
1661 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, | ||
1662 | num_eps, &num_streams, &changed_ep_bitmask); | ||
1663 | if (ret < 0) { | ||
1664 | xhci_free_command(xhci, config_cmd); | ||
1665 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1666 | return ret; | ||
1667 | } | ||
1668 | if (num_streams <= 1) { | ||
1669 | xhci_warn(xhci, "WARN: endpoints can't handle " | ||
1670 | "more than one stream.\n"); | ||
1671 | xhci_free_command(xhci, config_cmd); | ||
1672 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1673 | return -EINVAL; | ||
1674 | } | ||
1675 | vdev = xhci->devs[udev->slot_id]; | ||
1676 | /* Mark each endpoint as being in transistion, so | ||
1677 | * xhci_urb_enqueue() will reject all URBs. | ||
1678 | */ | ||
1679 | for (i = 0; i < num_eps; i++) { | ||
1680 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1681 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; | ||
1682 | } | ||
1683 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1684 | |||
1685 | /* Setup internal data structures and allocate HW data structures for | ||
1686 | * streams (but don't install the HW structures in the input context | ||
1687 | * until we're sure all memory allocation succeeded). | ||
1688 | */ | ||
1689 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); | ||
1690 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", | ||
1691 | num_stream_ctxs, num_streams); | ||
1692 | |||
1693 | for (i = 0; i < num_eps; i++) { | ||
1694 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1695 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, | ||
1696 | num_stream_ctxs, | ||
1697 | num_streams, mem_flags); | ||
1698 | if (!vdev->eps[ep_index].stream_info) | ||
1699 | goto cleanup; | ||
1700 | /* Set maxPstreams in endpoint context and update deq ptr to | ||
1701 | * point to stream context array. FIXME | ||
1702 | */ | ||
1703 | } | ||
1704 | |||
1705 | /* Set up the input context for a configure endpoint command. */ | ||
1706 | for (i = 0; i < num_eps; i++) { | ||
1707 | struct xhci_ep_ctx *ep_ctx; | ||
1708 | |||
1709 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1710 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); | ||
1711 | |||
1712 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, | ||
1713 | vdev->out_ctx, ep_index); | ||
1714 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, | ||
1715 | vdev->eps[ep_index].stream_info); | ||
1716 | } | ||
1717 | /* Tell the HW to drop its old copy of the endpoint context info | ||
1718 | * and add the updated copy from the input context. | ||
1719 | */ | ||
1720 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, | ||
1721 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); | ||
1722 | |||
1723 | /* Issue and wait for the configure endpoint command */ | ||
1724 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, | ||
1725 | false, false); | ||
1726 | |||
1727 | /* xHC rejected the configure endpoint command for some reason, so we | ||
1728 | * leave the old ring intact and free our internal streams data | ||
1729 | * structure. | ||
1730 | */ | ||
1731 | if (ret < 0) | ||
1732 | goto cleanup; | ||
1733 | |||
1734 | spin_lock_irqsave(&xhci->lock, flags); | ||
1735 | for (i = 0; i < num_eps; i++) { | ||
1736 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1737 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | ||
1738 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", | ||
1739 | udev->slot_id, ep_index); | ||
1740 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; | ||
1741 | } | ||
1742 | xhci_free_command(xhci, config_cmd); | ||
1743 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1744 | |||
1745 | /* Subtract 1 for stream 0, which drivers can't use */ | ||
1746 | return num_streams - 1; | ||
1747 | |||
1748 | cleanup: | ||
1749 | /* If it didn't work, free the streams! */ | ||
1750 | for (i = 0; i < num_eps; i++) { | ||
1751 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1752 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | ||
1753 | /* FIXME Unset maxPstreams in endpoint context and | ||
1754 | * update deq ptr to point to normal string ring. | ||
1755 | */ | ||
1756 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | ||
1757 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | ||
1758 | xhci_endpoint_zero(xhci, vdev, eps[i]); | ||
1759 | } | ||
1760 | xhci_free_command(xhci, config_cmd); | ||
1761 | return -ENOMEM; | ||
1762 | } | ||
1763 | |||
1764 | /* Transition the endpoint from using streams to being a "normal" endpoint | ||
1765 | * without streams. | ||
1766 | * | ||
1767 | * Modify the endpoint context state, submit a configure endpoint command, | ||
1768 | * and free all endpoint rings for streams if that completes successfully. | ||
1769 | */ | ||
1770 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | ||
1771 | struct usb_host_endpoint **eps, unsigned int num_eps, | ||
1772 | gfp_t mem_flags) | ||
1773 | { | ||
1774 | int i, ret; | ||
1775 | struct xhci_hcd *xhci; | ||
1776 | struct xhci_virt_device *vdev; | ||
1777 | struct xhci_command *command; | ||
1778 | unsigned int ep_index; | ||
1779 | unsigned long flags; | ||
1780 | u32 changed_ep_bitmask; | ||
1781 | |||
1782 | xhci = hcd_to_xhci(hcd); | ||
1783 | vdev = xhci->devs[udev->slot_id]; | ||
1784 | |||
1785 | /* Set up a configure endpoint command to remove the streams rings */ | ||
1786 | spin_lock_irqsave(&xhci->lock, flags); | ||
1787 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, | ||
1788 | udev, eps, num_eps); | ||
1789 | if (changed_ep_bitmask == 0) { | ||
1790 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1791 | return -EINVAL; | ||
1792 | } | ||
1793 | |||
1794 | /* Use the xhci_command structure from the first endpoint. We may have | ||
1795 | * allocated too many, but the driver may call xhci_free_streams() for | ||
1796 | * each endpoint it grouped into one call to xhci_alloc_streams(). | ||
1797 | */ | ||
1798 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); | ||
1799 | command = vdev->eps[ep_index].stream_info->free_streams_command; | ||
1800 | for (i = 0; i < num_eps; i++) { | ||
1801 | struct xhci_ep_ctx *ep_ctx; | ||
1802 | |||
1803 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1804 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); | ||
1805 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= | ||
1806 | EP_GETTING_NO_STREAMS; | ||
1807 | |||
1808 | xhci_endpoint_copy(xhci, command->in_ctx, | ||
1809 | vdev->out_ctx, ep_index); | ||
1810 | xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, | ||
1811 | &vdev->eps[ep_index]); | ||
1812 | } | ||
1813 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | ||
1814 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); | ||
1815 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1816 | |||
1817 | /* Issue and wait for the configure endpoint command, | ||
1818 | * which must succeed. | ||
1819 | */ | ||
1820 | ret = xhci_configure_endpoint(xhci, udev, command, | ||
1821 | false, true); | ||
1822 | |||
1823 | /* xHC rejected the configure endpoint command for some reason, so we | ||
1824 | * leave the streams rings intact. | ||
1825 | */ | ||
1826 | if (ret < 0) | ||
1827 | return ret; | ||
1828 | |||
1829 | spin_lock_irqsave(&xhci->lock, flags); | ||
1830 | for (i = 0; i < num_eps; i++) { | ||
1831 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1832 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | ||
1833 | /* FIXME Unset maxPstreams in endpoint context and | ||
1834 | * update deq ptr to point to normal string ring. | ||
1835 | */ | ||
1836 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; | ||
1837 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | ||
1838 | } | ||
1839 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1840 | |||
1841 | return 0; | ||
1842 | } | ||
1843 | |||
1449 | /* | 1844 | /* |
1450 | * This submits a Reset Device Command, which will set the device state to 0, | 1845 | * This submits a Reset Device Command, which will set the device state to 0, |
1451 | * set the device address to 0, and disable all the endpoints except the default | 1846 | * set the device address to 0, and disable all the endpoints except the default |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index a7c4e1122902..7a9447cb6ea9 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -117,7 +117,7 @@ struct xhci_cap_regs { | |||
117 | /* true: no secondary Stream ID Support */ | 117 | /* true: no secondary Stream ID Support */ |
118 | #define HCC_NSS(p) ((p) & (1 << 7)) | 118 | #define HCC_NSS(p) ((p) & (1 << 7)) |
119 | /* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */ | 119 | /* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */ |
120 | #define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1)) | 120 | #define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1)) |
121 | /* Extended Capabilities pointer from PCI base - section 5.3.6 */ | 121 | /* Extended Capabilities pointer from PCI base - section 5.3.6 */ |
122 | #define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p) | 122 | #define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p) |
123 | 123 | ||
@@ -585,6 +585,10 @@ struct xhci_ep_ctx { | |||
585 | /* Interval - period between requests to an endpoint - 125u increments. */ | 585 | /* Interval - period between requests to an endpoint - 125u increments. */ |
586 | #define EP_INTERVAL(p) ((p & 0xff) << 16) | 586 | #define EP_INTERVAL(p) ((p & 0xff) << 16) |
587 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) | 587 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) |
588 | #define EP_MAXPSTREAMS_MASK (0x1f << 10) | ||
589 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) | ||
590 | /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ | ||
591 | #define EP_HAS_LSA (1 << 15) | ||
588 | 592 | ||
589 | /* ep_info2 bitmasks */ | 593 | /* ep_info2 bitmasks */ |
590 | /* | 594 | /* |
@@ -648,8 +652,50 @@ struct xhci_command { | |||
648 | /* add context bitmasks */ | 652 | /* add context bitmasks */ |
649 | #define ADD_EP(x) (0x1 << x) | 653 | #define ADD_EP(x) (0x1 << x) |
650 | 654 | ||
655 | struct xhci_stream_ctx { | ||
656 | /* 64-bit stream ring address, cycle state, and stream type */ | ||
657 | u64 stream_ring; | ||
658 | /* offset 0x14 - 0x1f reserved for HC internal use */ | ||
659 | u32 reserved[2]; | ||
660 | }; | ||
661 | |||
662 | /* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */ | ||
663 | #define SCT_FOR_CTX(p) (((p) << 1) & 0x7) | ||
664 | /* Secondary stream array type, dequeue pointer is to a transfer ring */ | ||
665 | #define SCT_SEC_TR 0 | ||
666 | /* Primary stream array type, dequeue pointer is to a transfer ring */ | ||
667 | #define SCT_PRI_TR 1 | ||
668 | /* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */ | ||
669 | #define SCT_SSA_8 2 | ||
670 | #define SCT_SSA_16 3 | ||
671 | #define SCT_SSA_32 4 | ||
672 | #define SCT_SSA_64 5 | ||
673 | #define SCT_SSA_128 6 | ||
674 | #define SCT_SSA_256 7 | ||
675 | |||
676 | /* Assume no secondary streams for now */ | ||
677 | struct xhci_stream_info { | ||
678 | struct xhci_ring **stream_rings; | ||
679 | /* Number of streams, including stream 0 (which drivers can't use) */ | ||
680 | unsigned int num_streams; | ||
681 | /* The stream context array may be bigger than | ||
682 | * the number of streams the driver asked for | ||
683 | */ | ||
684 | struct xhci_stream_ctx *stream_ctx_array; | ||
685 | unsigned int num_stream_ctxs; | ||
686 | dma_addr_t ctx_array_dma; | ||
687 | /* For mapping physical TRB addresses to segments in stream rings */ | ||
688 | struct radix_tree_root trb_address_map; | ||
689 | struct xhci_command *free_streams_command; | ||
690 | }; | ||
691 | |||
692 | #define SMALL_STREAM_ARRAY_SIZE 256 | ||
693 | #define MEDIUM_STREAM_ARRAY_SIZE 1024 | ||
694 | |||
651 | struct xhci_virt_ep { | 695 | struct xhci_virt_ep { |
652 | struct xhci_ring *ring; | 696 | struct xhci_ring *ring; |
697 | /* Related to endpoints that are configured to use stream IDs only */ | ||
698 | struct xhci_stream_info *stream_info; | ||
653 | /* Temporary storage in case the configure endpoint command fails and we | 699 | /* Temporary storage in case the configure endpoint command fails and we |
654 | * have to restore the device state to the previous state | 700 | * have to restore the device state to the previous state |
655 | */ | 701 | */ |
@@ -658,6 +704,11 @@ struct xhci_virt_ep { | |||
658 | #define SET_DEQ_PENDING (1 << 0) | 704 | #define SET_DEQ_PENDING (1 << 0) |
659 | #define EP_HALTED (1 << 1) /* For stall handling */ | 705 | #define EP_HALTED (1 << 1) /* For stall handling */ |
660 | #define EP_HALT_PENDING (1 << 2) /* For URB cancellation */ | 706 | #define EP_HALT_PENDING (1 << 2) /* For URB cancellation */ |
707 | /* Transitioning the endpoint to using streams, don't enqueue URBs */ | ||
708 | #define EP_GETTING_STREAMS (1 << 3) | ||
709 | #define EP_HAS_STREAMS (1 << 4) | ||
710 | /* Transitioning the endpoint to not using streams, don't enqueue URBs */ | ||
711 | #define EP_GETTING_NO_STREAMS (1 << 5) | ||
661 | /* ---- Related to URB cancellation ---- */ | 712 | /* ---- Related to URB cancellation ---- */ |
662 | struct list_head cancelled_td_list; | 713 | struct list_head cancelled_td_list; |
663 | /* The TRB that was last reported in a stopped endpoint ring */ | 714 | /* The TRB that was last reported in a stopped endpoint ring */ |
@@ -710,14 +761,6 @@ struct xhci_device_context_array { | |||
710 | */ | 761 | */ |
711 | 762 | ||
712 | 763 | ||
713 | struct xhci_stream_ctx { | ||
714 | /* 64-bit stream ring address, cycle state, and stream type */ | ||
715 | u64 stream_ring; | ||
716 | /* offset 0x14 - 0x1f reserved for HC internal use */ | ||
717 | u32 reserved[2]; | ||
718 | }; | ||
719 | |||
720 | |||
721 | struct xhci_transfer_event { | 764 | struct xhci_transfer_event { |
722 | /* 64-bit buffer address, or immediate data */ | 765 | /* 64-bit buffer address, or immediate data */ |
723 | u64 buffer; | 766 | u64 buffer; |
@@ -952,6 +995,10 @@ union xhci_trb { | |||
952 | /* Allow two commands + a link TRB, along with any reserved command TRBs */ | 995 | /* Allow two commands + a link TRB, along with any reserved command TRBs */ |
953 | #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) | 996 | #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) |
954 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) | 997 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) |
998 | /* SEGMENT_SHIFT should be log2(SEGMENT_SIZE). | ||
999 | * Change this if you change TRBS_PER_SEGMENT! | ||
1000 | */ | ||
1001 | #define SEGMENT_SHIFT 10 | ||
955 | /* TRB buffer pointers can't cross 64KB boundaries */ | 1002 | /* TRB buffer pointers can't cross 64KB boundaries */ |
956 | #define TRB_MAX_BUFF_SHIFT 16 | 1003 | #define TRB_MAX_BUFF_SHIFT 16 |
957 | #define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT) | 1004 | #define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT) |
@@ -1088,6 +1135,8 @@ struct xhci_hcd { | |||
1088 | /* DMA pools */ | 1135 | /* DMA pools */ |
1089 | struct dma_pool *device_pool; | 1136 | struct dma_pool *device_pool; |
1090 | struct dma_pool *segment_pool; | 1137 | struct dma_pool *segment_pool; |
1138 | struct dma_pool *small_streams_pool; | ||
1139 | struct dma_pool *medium_streams_pool; | ||
1091 | 1140 | ||
1092 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 1141 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
1093 | /* Poll the rings - for debugging */ | 1142 | /* Poll the rings - for debugging */ |
@@ -1242,6 +1291,17 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); | |||
1242 | void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, | 1291 | void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, |
1243 | struct xhci_virt_device *virt_dev, | 1292 | struct xhci_virt_device *virt_dev, |
1244 | unsigned int ep_index); | 1293 | unsigned int ep_index); |
1294 | struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, | ||
1295 | unsigned int num_stream_ctxs, | ||
1296 | unsigned int num_streams, gfp_t flags); | ||
1297 | void xhci_free_stream_info(struct xhci_hcd *xhci, | ||
1298 | struct xhci_stream_info *stream_info); | ||
1299 | void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, | ||
1300 | struct xhci_ep_ctx *ep_ctx, | ||
1301 | struct xhci_stream_info *stream_info); | ||
1302 | void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, | ||
1303 | struct xhci_ep_ctx *ep_ctx, | ||
1304 | struct xhci_virt_ep *ep); | ||
1245 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, | 1305 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, |
1246 | bool allocate_in_ctx, bool allocate_completion, | 1306 | bool allocate_in_ctx, bool allocate_completion, |
1247 | gfp_t mem_flags); | 1307 | gfp_t mem_flags); |
@@ -1266,6 +1326,12 @@ int xhci_get_frame(struct usb_hcd *hcd); | |||
1266 | irqreturn_t xhci_irq(struct usb_hcd *hcd); | 1326 | irqreturn_t xhci_irq(struct usb_hcd *hcd); |
1267 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); | 1327 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); |
1268 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); | 1328 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); |
1329 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, | ||
1330 | struct usb_host_endpoint **eps, unsigned int num_eps, | ||
1331 | unsigned int num_streams, gfp_t mem_flags); | ||
1332 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | ||
1333 | struct usb_host_endpoint **eps, unsigned int num_eps, | ||
1334 | gfp_t mem_flags); | ||
1269 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); | 1335 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); |
1270 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | 1336 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
1271 | struct usb_tt *tt, gfp_t mem_flags); | 1337 | struct usb_tt *tt, gfp_t mem_flags); |