diff options
author | Alan Stern <stern@rowland.harvard.edu> | 2010-04-02 13:27:28 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-05-20 16:21:37 -0400 |
commit | ff9c895f07d36193c75533bda8193bde8ca99d02 (patch) | |
tree | 386ca8e37734c4810e59a55eaba92e4e88275d14 /drivers/usb/core/message.c | |
parent | 0ff8d1b3c858ea7c8daa54f7577971a76d04d283 (diff) |
USB: fix usbmon and DMA mapping for scatter-gather URBs
This patch (as1368) fixes a rather obscure bug in usbmon: When tracing
URBs sent by the scatter-gather library, it accesses the data buffers
while they are still mapped for DMA.
The solution is to move the mapping and unmapping out of the s-g
library and into the usual place in hcd.c. This requires the addition
of new URB flag bits to describe the kind of mapping needed, since we
have to call dma_map_sg() if the HCD supports native scatter-gather
operation and dma_map_page() if it doesn't. The nice thing about
having the new flags is that they simplify the testing for unmapping.
The patch removes the only caller of usb_buffer_[un]map_sg(), so those
functions are #if'ed out. A later patch will remove them entirely.
As a result of this change, urb->sg will be set in situations where
it wasn't set previously. Hence the xhci and whci drivers are
adjusted to test urb->num_sgs instead, which retains its original
meaning and is nonzero only when the HCD has to handle a scatterlist.
Finally, even when a submission error occurs we don't want to hand
URBs to usbmon before they are unmapped. The submission path is
rearranged so that map_urb_for_dma() is called only for non-root-hub
URBs and unmap_urb_for_dma() is called immediately after a submission
error. This simplifies the error handling.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: <stable@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/core/message.c')
-rw-r--r-- | drivers/usb/core/message.c | 45 |
1 files changed, 10 insertions, 35 deletions
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 619c44fb8a9..79d1cdf4a63 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -259,9 +259,6 @@ static void sg_clean(struct usb_sg_request *io) | |||
259 | kfree(io->urbs); | 259 | kfree(io->urbs); |
260 | io->urbs = NULL; | 260 | io->urbs = NULL; |
261 | } | 261 | } |
262 | if (io->dev->dev.dma_mask != NULL) | ||
263 | usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe), | ||
264 | io->sg, io->nents); | ||
265 | io->dev = NULL; | 262 | io->dev = NULL; |
266 | } | 263 | } |
267 | 264 | ||
@@ -364,7 +361,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, | |||
364 | { | 361 | { |
365 | int i; | 362 | int i; |
366 | int urb_flags; | 363 | int urb_flags; |
367 | int dma; | ||
368 | int use_sg; | 364 | int use_sg; |
369 | 365 | ||
370 | if (!io || !dev || !sg | 366 | if (!io || !dev || !sg |
@@ -378,21 +374,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, | |||
378 | io->pipe = pipe; | 374 | io->pipe = pipe; |
379 | io->sg = sg; | 375 | io->sg = sg; |
380 | io->nents = nents; | 376 | io->nents = nents; |
381 | 377 | io->entries = nents; | |
382 | /* not all host controllers use DMA (like the mainstream pci ones); | ||
383 | * they can use PIO (sl811) or be software over another transport. | ||
384 | */ | ||
385 | dma = (dev->dev.dma_mask != NULL); | ||
386 | if (dma) | ||
387 | io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe), | ||
388 | sg, nents); | ||
389 | else | ||
390 | io->entries = nents; | ||
391 | 378 | ||
392 | /* initialize all the urbs we'll use */ | 379 | /* initialize all the urbs we'll use */ |
393 | if (io->entries <= 0) | ||
394 | return io->entries; | ||
395 | |||
396 | if (dev->bus->sg_tablesize > 0) { | 380 | if (dev->bus->sg_tablesize > 0) { |
397 | io->urbs = kmalloc(sizeof *io->urbs, mem_flags); | 381 | io->urbs = kmalloc(sizeof *io->urbs, mem_flags); |
398 | use_sg = true; | 382 | use_sg = true; |
@@ -404,8 +388,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, | |||
404 | goto nomem; | 388 | goto nomem; |
405 | 389 | ||
406 | urb_flags = 0; | 390 | urb_flags = 0; |
407 | if (dma) | ||
408 | urb_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
409 | if (usb_pipein(pipe)) | 391 | if (usb_pipein(pipe)) |
410 | urb_flags |= URB_SHORT_NOT_OK; | 392 | urb_flags |= URB_SHORT_NOT_OK; |
411 | 393 | ||
@@ -423,12 +405,13 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, | |||
423 | 405 | ||
424 | io->urbs[0]->complete = sg_complete; | 406 | io->urbs[0]->complete = sg_complete; |
425 | io->urbs[0]->context = io; | 407 | io->urbs[0]->context = io; |
408 | |||
426 | /* A length of zero means transfer the whole sg list */ | 409 | /* A length of zero means transfer the whole sg list */ |
427 | io->urbs[0]->transfer_buffer_length = length; | 410 | io->urbs[0]->transfer_buffer_length = length; |
428 | if (length == 0) { | 411 | if (length == 0) { |
429 | for_each_sg(sg, sg, io->entries, i) { | 412 | for_each_sg(sg, sg, io->entries, i) { |
430 | io->urbs[0]->transfer_buffer_length += | 413 | io->urbs[0]->transfer_buffer_length += |
431 | sg_dma_len(sg); | 414 | sg->length; |
432 | } | 415 | } |
433 | } | 416 | } |
434 | io->urbs[0]->sg = io; | 417 | io->urbs[0]->sg = io; |
@@ -454,26 +437,16 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, | |||
454 | io->urbs[i]->context = io; | 437 | io->urbs[i]->context = io; |
455 | 438 | ||
456 | /* | 439 | /* |
457 | * Some systems need to revert to PIO when DMA is temporarily | 440 | * Some systems can't use DMA; they use PIO instead. |
458 | * unavailable. For their sakes, both transfer_buffer and | 441 | * For their sakes, transfer_buffer is set whenever |
459 | * transfer_dma are set when possible. | 442 | * possible. |
460 | * | ||
461 | * Note that if IOMMU coalescing occurred, we cannot | ||
462 | * trust sg_page anymore, so check if S/G list shrunk. | ||
463 | */ | 443 | */ |
464 | if (io->nents == io->entries && !PageHighMem(sg_page(sg))) | 444 | if (!PageHighMem(sg_page(sg))) |
465 | io->urbs[i]->transfer_buffer = sg_virt(sg); | 445 | io->urbs[i]->transfer_buffer = sg_virt(sg); |
466 | else | 446 | else |
467 | io->urbs[i]->transfer_buffer = NULL; | 447 | io->urbs[i]->transfer_buffer = NULL; |
468 | 448 | ||
469 | if (dma) { | 449 | len = sg->length; |
470 | io->urbs[i]->transfer_dma = sg_dma_address(sg); | ||
471 | len = sg_dma_len(sg); | ||
472 | } else { | ||
473 | /* hc may use _only_ transfer_buffer */ | ||
474 | len = sg->length; | ||
475 | } | ||
476 | |||
477 | if (length) { | 450 | if (length) { |
478 | len = min_t(unsigned, len, length); | 451 | len = min_t(unsigned, len, length); |
479 | length -= len; | 452 | length -= len; |
@@ -481,6 +454,8 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, | |||
481 | io->entries = i + 1; | 454 | io->entries = i + 1; |
482 | } | 455 | } |
483 | io->urbs[i]->transfer_buffer_length = len; | 456 | io->urbs[i]->transfer_buffer_length = len; |
457 | |||
458 | io->urbs[i]->sg = (struct usb_sg_request *) sg; | ||
484 | } | 459 | } |
485 | io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; | 460 | io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; |
486 | } | 461 | } |