aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-27 22:59:01 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:49 -0400
commite04748e3a87271fcf30d383e3780c5d3ee1c1618 (patch)
tree1e1053837fcde0761673cbba6140514630a372ed /drivers/usb
parentb10de142119a676552df3f0d2e3a9d647036c26a (diff)
USB: Push scatter gather lists down to host controller drivers.
This is the original patch I created before David Vrabel posted a better patch (http://marc.info/?l=linux-usb&m=123377477209109&w=2) that does basically the same thing. This patch will get replaced with his (modified) patch later. Allow USB device drivers that use usb_sg_init() and usb_sg_wait() to push bulk endpoint scatter gather lists down to the host controller drivers. This allows host controller drivers to more efficiently enqueue these transfers, and allows the xHCI host controller to better take advantage of USB 3.0 "bursts" for bulk endpoints. This patch currently only enables scatter gather lists for bulk endpoints. Other endpoint types that use the usb_sg_* functions will not have their scatter gather lists pushed down to the host controller. For periodic endpoints, we want each scatterlist entry to be a separate transfer. Eventually, HCDs could parse these scatter-gather lists for periodic endpoints also. For now, we use the old code and call usb_submit_urb() for each scatterlist entry. The caller of usb_sg_init() can request that all bytes in the scatter gather list be transferred by passing in a length of zero. Handle that request for a bulk endpoint under xHCI by walking the scatter gather list and calculating the length. We could let the HCD handle a zero length in this case, but I'm not sure if the core layers in between will get confused by this. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/message.c139
2 files changed, 93 insertions, 49 deletions
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index b2da4753b12e..1609623ec829 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1239,7 +1239,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1239 1239
1240 /* Map the URB's buffers for DMA access. 1240 /* Map the URB's buffers for DMA access.
1241 * Lower level HCD code should use *_dma exclusively, 1241 * Lower level HCD code should use *_dma exclusively,
1242 * unless it uses pio or talks to another transport. 1242 * unless it uses pio or talks to another transport,
1243 * or uses the provided scatter gather list for bulk.
1243 */ 1244 */
1244 if (is_root_hub(urb->dev)) 1245 if (is_root_hub(urb->dev))
1245 return 0; 1246 return 0;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 3a2e69ec2f29..2bed83caacb1 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -365,6 +365,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
365 int i; 365 int i;
366 int urb_flags; 366 int urb_flags;
367 int dma; 367 int dma;
368 int use_sg;
368 369
369 if (!io || !dev || !sg 370 if (!io || !dev || !sg
370 || usb_pipecontrol(pipe) 371 || usb_pipecontrol(pipe)
@@ -392,7 +393,19 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
392 if (io->entries <= 0) 393 if (io->entries <= 0)
393 return io->entries; 394 return io->entries;
394 395
395 io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags); 396 /* If we're running on an xHCI host controller, queue the whole scatter
397 * gather list with one call to urb_enqueue(). This is only for bulk,
398 * as that endpoint type does not care how the data gets broken up
399 * across frames.
400 */
401 if (usb_pipebulk(pipe) &&
402 bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) {
403 io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
404 use_sg = true;
405 } else {
406 io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
407 use_sg = false;
408 }
396 if (!io->urbs) 409 if (!io->urbs)
397 goto nomem; 410 goto nomem;
398 411
@@ -402,62 +415,92 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
402 if (usb_pipein(pipe)) 415 if (usb_pipein(pipe))
403 urb_flags |= URB_SHORT_NOT_OK; 416 urb_flags |= URB_SHORT_NOT_OK;
404 417
405 for_each_sg(sg, sg, io->entries, i) { 418 if (use_sg) {
406 unsigned len; 419 io->urbs[0] = usb_alloc_urb(0, mem_flags);
407 420 if (!io->urbs[0]) {
408 io->urbs[i] = usb_alloc_urb(0, mem_flags); 421 io->entries = 0;
409 if (!io->urbs[i]) {
410 io->entries = i;
411 goto nomem; 422 goto nomem;
412 } 423 }
413 424
414 io->urbs[i]->dev = NULL; 425 io->urbs[0]->dev = NULL;
415 io->urbs[i]->pipe = pipe; 426 io->urbs[0]->pipe = pipe;
416 io->urbs[i]->interval = period; 427 io->urbs[0]->interval = period;
417 io->urbs[i]->transfer_flags = urb_flags; 428 io->urbs[0]->transfer_flags = urb_flags;
418 429
419 io->urbs[i]->complete = sg_complete; 430 io->urbs[0]->complete = sg_complete;
420 io->urbs[i]->context = io; 431 io->urbs[0]->context = io;
421 432 /* A length of zero means transfer the whole sg list */
422 /* 433 io->urbs[0]->transfer_buffer_length = length;
423 * Some systems need to revert to PIO when DMA is temporarily 434 if (length == 0) {
424 * unavailable. For their sakes, both transfer_buffer and 435 for_each_sg(sg, sg, io->entries, i) {
425 * transfer_dma are set when possible. However this can only 436 io->urbs[0]->transfer_buffer_length +=
426 * work on systems without: 437 sg_dma_len(sg);
427 * 438 }
428 * - HIGHMEM, since DMA buffers located in high memory are 439 }
429 * not directly addressable by the CPU for PIO; 440 io->urbs[0]->sg = io;
430 * 441 io->urbs[0]->num_sgs = io->entries;
431 * - IOMMU, since dma_map_sg() is allowed to use an IOMMU to 442 io->entries = 1;
432 * make virtually discontiguous buffers be "dma-contiguous" 443 } else {
433 * so that PIO and DMA need diferent numbers of URBs. 444 for_each_sg(sg, sg, io->entries, i) {
434 * 445 unsigned len;
435 * So when HIGHMEM or IOMMU are in use, transfer_buffer is NULL 446
436 * to prevent stale pointers and to help spot bugs. 447 io->urbs[i] = usb_alloc_urb(0, mem_flags);
437 */ 448 if (!io->urbs[i]) {
438 if (dma) { 449 io->entries = i;
439 io->urbs[i]->transfer_dma = sg_dma_address(sg); 450 goto nomem;
440 len = sg_dma_len(sg); 451 }
452
453 io->urbs[i]->dev = NULL;
454 io->urbs[i]->pipe = pipe;
455 io->urbs[i]->interval = period;
456 io->urbs[i]->transfer_flags = urb_flags;
457
458 io->urbs[i]->complete = sg_complete;
459 io->urbs[i]->context = io;
460
461 /*
462 * Some systems need to revert to PIO when DMA is
463 * temporarily unavailable. For their sakes, both
464 * transfer_buffer and transfer_dma are set when
465 * possible. However this can only work on systems
466 * without:
467 *
468 * - HIGHMEM, since DMA buffers located in high memory
469 * are not directly addressable by the CPU for PIO;
470 *
471 * - IOMMU, since dma_map_sg() is allowed to use an
472 * IOMMU to make virtually discontiguous buffers be
473 * "dma-contiguous" so that PIO and DMA need diferent
474 * numbers of URBs.
475 *
476 * So when HIGHMEM or IOMMU are in use, transfer_buffer
477 * is NULL to prevent stale pointers and to help spot
478 * bugs.
479 */
480 if (dma) {
481 io->urbs[i]->transfer_dma = sg_dma_address(sg);
482 len = sg_dma_len(sg);
441#if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU) 483#if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU)
442 io->urbs[i]->transfer_buffer = NULL; 484 io->urbs[i]->transfer_buffer = NULL;
443#else 485#else
444 io->urbs[i]->transfer_buffer = sg_virt(sg); 486 io->urbs[i]->transfer_buffer = sg_virt(sg);
445#endif 487#endif
446 } else { 488 } else {
447 /* hc may use _only_ transfer_buffer */ 489 /* hc may use _only_ transfer_buffer */
448 io->urbs[i]->transfer_buffer = sg_virt(sg); 490 io->urbs[i]->transfer_buffer = sg_virt(sg);
449 len = sg->length; 491 len = sg->length;
450 } 492 }
451 493
452 if (length) { 494 if (length) {
453 len = min_t(unsigned, len, length); 495 len = min_t(unsigned, len, length);
454 length -= len; 496 length -= len;
455 if (length == 0) 497 if (length == 0)
456 io->entries = i + 1; 498 io->entries = i + 1;
499 }
500 io->urbs[i]->transfer_buffer_length = len;
457 } 501 }
458 io->urbs[i]->transfer_buffer_length = len; 502 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
459 } 503 }
460 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
461 504
462 /* transaction state */ 505 /* transaction state */
463 io->count = io->entries; 506 io->count = io->entries;