diff options
author | David Vrabel <david.vrabel@csr.com> | 2009-08-24 10:02:27 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-12-11 14:55:14 -0500 |
commit | 294a39e7829dfd663e6c5c94cede0c6a0c13e37f (patch) | |
tree | c9157a84b52aa64f29ce445e1e86a4f4523d8d39 /drivers | |
parent | 4c1bd3d7a7d114dabd58f62f386ac4bfd268be1f (diff) |
USB: whci-hcd: support urbs with scatter-gather lists
Support urbs with scatter-gather lists by trying to fit sg list elements
into page lists in one or more qTDs. qTDs must end on a wMaxPacketSize
boundary so if this isn't possible the urb's sg list must be copied into
bounce buffers.
Signed-off-by: David Vrabel <david.vrabel@csr.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/usb/host/whci/hcd.c | 1 | ||||
-rw-r--r-- | drivers/usb/host/whci/qset.c | 348 | ||||
-rw-r--r-- | drivers/usb/host/whci/whcd.h | 9 | ||||
-rw-r--r-- | drivers/usb/host/whci/whci-hc.h | 5 |
4 files changed, 332 insertions, 31 deletions
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c index 687b622a1612..e0d3401285c8 100644 --- a/drivers/usb/host/whci/hcd.c +++ b/drivers/usb/host/whci/hcd.c | |||
@@ -250,6 +250,7 @@ static int whc_probe(struct umc_dev *umc) | |||
250 | } | 250 | } |
251 | 251 | ||
252 | usb_hcd->wireless = 1; | 252 | usb_hcd->wireless = 1; |
253 | usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */ | ||
253 | 254 | ||
254 | wusbhc = usb_hcd_to_wusbhc(usb_hcd); | 255 | wusbhc = usb_hcd_to_wusbhc(usb_hcd); |
255 | whc = wusbhc_to_whc(wusbhc); | 256 | whc = wusbhc_to_whc(wusbhc); |
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index 1b9dc1571570..88e51ea8620b 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c | |||
@@ -57,8 +57,9 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) | |||
57 | 57 | ||
58 | is_out = usb_pipeout(urb->pipe); | 58 | is_out = usb_pipeout(urb->pipe); |
59 | 59 | ||
60 | epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; | 60 | qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); |
61 | 61 | ||
62 | epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; | ||
62 | if (epcd) { | 63 | if (epcd) { |
63 | qset->max_seq = epcd->bMaxSequence; | 64 | qset->max_seq = epcd->bMaxSequence; |
64 | qset->max_burst = epcd->bMaxBurst; | 65 | qset->max_burst = epcd->bMaxBurst; |
@@ -72,7 +73,7 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) | |||
72 | | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) | 73 | | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) |
73 | | usb_pipe_to_qh_type(urb->pipe) | 74 | | usb_pipe_to_qh_type(urb->pipe) |
74 | | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) | 75 | | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) |
75 | | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out)) | 76 | | QH_INFO1_MAX_PKT_LEN(qset->max_packet) |
76 | ); | 77 | ); |
77 | qset->qh.info2 = cpu_to_le32( | 78 | qset->qh.info2 = cpu_to_le32( |
78 | QH_INFO2_BURST(qset->max_burst) | 79 | QH_INFO2_BURST(qset->max_burst) |
@@ -241,6 +242,36 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) | |||
241 | qset->ntds--; | 242 | qset->ntds--; |
242 | } | 243 | } |
243 | 244 | ||
245 | static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std) | ||
246 | { | ||
247 | struct scatterlist *sg; | ||
248 | void *bounce; | ||
249 | size_t remaining, offset; | ||
250 | |||
251 | bounce = std->bounce_buf; | ||
252 | remaining = std->len; | ||
253 | |||
254 | sg = std->bounce_sg; | ||
255 | offset = std->bounce_offset; | ||
256 | |||
257 | while (remaining) { | ||
258 | size_t len; | ||
259 | |||
260 | len = min(sg->length - offset, remaining); | ||
261 | memcpy(sg_virt(sg) + offset, bounce, len); | ||
262 | |||
263 | bounce += len; | ||
264 | remaining -= len; | ||
265 | |||
266 | offset += len; | ||
267 | if (offset >= sg->length) { | ||
268 | sg = sg_next(sg); | ||
269 | offset = 0; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | } | ||
274 | |||
244 | /** | 275 | /** |
245 | * qset_free_std - remove an sTD and free it. | 276 | * qset_free_std - remove an sTD and free it. |
246 | * @whc: the WHCI host controller | 277 | * @whc: the WHCI host controller |
@@ -249,13 +280,29 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) | |||
249 | void qset_free_std(struct whc *whc, struct whc_std *std) | 280 | void qset_free_std(struct whc *whc, struct whc_std *std) |
250 | { | 281 | { |
251 | list_del(&std->list_node); | 282 | list_del(&std->list_node); |
252 | if (std->num_pointers) { | 283 | if (std->bounce_buf) { |
253 | dma_unmap_single(whc->wusbhc.dev, std->dma_addr, | 284 | bool is_out = usb_pipeout(std->urb->pipe); |
254 | std->num_pointers * sizeof(struct whc_page_list_entry), | 285 | dma_addr_t dma_addr; |
255 | DMA_TO_DEVICE); | 286 | |
287 | if (std->num_pointers) | ||
288 | dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr); | ||
289 | else | ||
290 | dma_addr = std->dma_addr; | ||
291 | |||
292 | dma_unmap_single(whc->wusbhc.dev, dma_addr, | ||
293 | std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
294 | if (!is_out) | ||
295 | qset_copy_bounce_to_sg(whc, std); | ||
296 | kfree(std->bounce_buf); | ||
297 | } | ||
298 | if (std->pl_virt) { | ||
299 | if (std->dma_addr) | ||
300 | dma_unmap_single(whc->wusbhc.dev, std->dma_addr, | ||
301 | std->num_pointers * sizeof(struct whc_page_list_entry), | ||
302 | DMA_TO_DEVICE); | ||
256 | kfree(std->pl_virt); | 303 | kfree(std->pl_virt); |
304 | std->pl_virt = NULL; | ||
257 | } | 305 | } |
258 | |||
259 | kfree(std); | 306 | kfree(std); |
260 | } | 307 | } |
261 | 308 | ||
@@ -293,12 +340,17 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f | |||
293 | { | 340 | { |
294 | dma_addr_t dma_addr = std->dma_addr; | 341 | dma_addr_t dma_addr = std->dma_addr; |
295 | dma_addr_t sp, ep; | 342 | dma_addr_t sp, ep; |
296 | size_t std_len = std->len; | ||
297 | size_t pl_len; | 343 | size_t pl_len; |
298 | int p; | 344 | int p; |
299 | 345 | ||
300 | sp = ALIGN(dma_addr, WHCI_PAGE_SIZE); | 346 | /* Short buffers don't need a page list. */ |
301 | ep = dma_addr + std_len; | 347 | if (std->len <= WHCI_PAGE_SIZE) { |
348 | std->num_pointers = 0; | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | sp = dma_addr & ~(WHCI_PAGE_SIZE-1); | ||
353 | ep = dma_addr + std->len; | ||
302 | std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); | 354 | std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); |
303 | 355 | ||
304 | pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); | 356 | pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); |
@@ -309,7 +361,7 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f | |||
309 | 361 | ||
310 | for (p = 0; p < std->num_pointers; p++) { | 362 | for (p = 0; p < std->num_pointers; p++) { |
311 | std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); | 363 | std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); |
312 | dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE); | 364 | dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); |
313 | } | 365 | } |
314 | 366 | ||
315 | return 0; | 367 | return 0; |
@@ -339,6 +391,238 @@ static void urb_dequeue_work(struct work_struct *work) | |||
339 | spin_unlock_irqrestore(&whc->lock, flags); | 391 | spin_unlock_irqrestore(&whc->lock, flags); |
340 | } | 392 | } |
341 | 393 | ||
394 | static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset, | ||
395 | struct urb *urb, gfp_t mem_flags) | ||
396 | { | ||
397 | struct whc_std *std; | ||
398 | |||
399 | std = kzalloc(sizeof(struct whc_std), mem_flags); | ||
400 | if (std == NULL) | ||
401 | return NULL; | ||
402 | |||
403 | std->urb = urb; | ||
404 | std->qtd = NULL; | ||
405 | |||
406 | INIT_LIST_HEAD(&std->list_node); | ||
407 | list_add_tail(&std->list_node, &qset->stds); | ||
408 | |||
409 | return std; | ||
410 | } | ||
411 | |||
412 | static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, | ||
413 | gfp_t mem_flags) | ||
414 | { | ||
415 | size_t remaining; | ||
416 | struct scatterlist *sg; | ||
417 | int i; | ||
418 | int ntds = 0; | ||
419 | struct whc_std *std = NULL; | ||
420 | struct whc_page_list_entry *entry; | ||
421 | dma_addr_t prev_end = 0; | ||
422 | size_t pl_len; | ||
423 | int p = 0; | ||
424 | |||
425 | dev_dbg(&whc->umc->dev, "adding urb w/ sg of length %d\n", urb->transfer_buffer_length); | ||
426 | |||
427 | remaining = urb->transfer_buffer_length; | ||
428 | |||
429 | for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) { | ||
430 | dma_addr_t dma_addr; | ||
431 | size_t dma_remaining; | ||
432 | dma_addr_t sp, ep; | ||
433 | int num_pointers; | ||
434 | |||
435 | if (remaining == 0) { | ||
436 | break; | ||
437 | } | ||
438 | |||
439 | dma_addr = sg_dma_address(sg); | ||
440 | dma_remaining = min(sg_dma_len(sg), remaining); | ||
441 | |||
442 | dev_dbg(&whc->umc->dev, "adding sg[%d] %08x %d\n", i, (unsigned)dma_addr, | ||
443 | dma_remaining); | ||
444 | |||
445 | while (dma_remaining) { | ||
446 | size_t dma_len; | ||
447 | |||
448 | /* | ||
449 | * We can use the previous std (if it exists) provided that: | ||
450 | * - the previous one ended on a page boundary. | ||
451 | * - the current one begins on a page boundary. | ||
452 | * - the previous one isn't full. | ||
453 | * | ||
454 | * If a new std is needed but the previous one | ||
455 | * did not end on a wMaxPacketSize boundary | ||
456 | * then this sg list cannot be mapped onto | ||
457 | * multiple qTDs. Return an error and let the | ||
458 | * caller sort it out. | ||
459 | */ | ||
460 | if (!std | ||
461 | || (prev_end & (WHCI_PAGE_SIZE-1)) | ||
462 | || (dma_addr & (WHCI_PAGE_SIZE-1)) | ||
463 | || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) { | ||
464 | if (prev_end % qset->max_packet != 0) | ||
465 | return -EINVAL; | ||
466 | dev_dbg(&whc->umc->dev, "need new std\n"); | ||
467 | std = qset_new_std(whc, qset, urb, mem_flags); | ||
468 | if (std == NULL) { | ||
469 | return -ENOMEM; | ||
470 | } | ||
471 | ntds++; | ||
472 | p = 0; | ||
473 | } | ||
474 | |||
475 | dma_len = dma_remaining; | ||
476 | |||
477 | /* | ||
478 | * If the remainder in this element doesn't | ||
479 | * fit in a single qTD, end the qTD on a | ||
480 | * wMaxPacketSize boundary. | ||
481 | */ | ||
482 | if (std->len + dma_len > QTD_MAX_XFER_SIZE) { | ||
483 | dma_len = QTD_MAX_XFER_SIZE - std->len; | ||
484 | ep = ((dma_addr + dma_len) / qset->max_packet) * qset->max_packet; | ||
485 | dma_len = ep - dma_addr; | ||
486 | } | ||
487 | |||
488 | dev_dbg(&whc->umc->dev, "adding %d\n", dma_len); | ||
489 | |||
490 | std->len += dma_len; | ||
491 | std->ntds_remaining = -1; /* filled in later */ | ||
492 | |||
493 | sp = dma_addr & ~(WHCI_PAGE_SIZE-1); | ||
494 | ep = dma_addr + dma_len; | ||
495 | num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); | ||
496 | std->num_pointers += num_pointers; | ||
497 | |||
498 | dev_dbg(&whc->umc->dev, "need %d more (%d total) page pointers\n", | ||
499 | num_pointers, std->num_pointers); | ||
500 | |||
501 | pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); | ||
502 | |||
503 | std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags); | ||
504 | if (std->pl_virt == NULL) { | ||
505 | return -ENOMEM; | ||
506 | } | ||
507 | |||
508 | for (;p < std->num_pointers; p++, entry++) { | ||
509 | dev_dbg(&whc->umc->dev, "e[%d] %08x\n", p, dma_addr); | ||
510 | std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); | ||
511 | dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); | ||
512 | } | ||
513 | |||
514 | prev_end = dma_addr = ep; | ||
515 | dma_remaining -= dma_len; | ||
516 | remaining -= dma_len; | ||
517 | } | ||
518 | } | ||
519 | |||
520 | dev_dbg(&whc->umc->dev, "used %d tds\n", ntds); | ||
521 | |||
522 | /* Now the number of stds is know, go back and fill in | ||
523 | std->ntds_remaining. */ | ||
524 | list_for_each_entry(std, &qset->stds, list_node) { | ||
525 | if (std->ntds_remaining == -1) { | ||
526 | pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); | ||
527 | std->ntds_remaining = ntds--; | ||
528 | std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, | ||
529 | pl_len, DMA_TO_DEVICE); | ||
530 | } | ||
531 | } | ||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * qset_add_urb_sg_linearize - add an urb with sg list, copying the data | ||
537 | * | ||
538 | * If the URB contains an sg list whose elements cannot be directly | ||
539 | * mapped to qTDs then the data must be transferred via bounce | ||
540 | * buffers. | ||
541 | */ | ||
542 | static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, | ||
543 | struct urb *urb, gfp_t mem_flags) | ||
544 | { | ||
545 | bool is_out = usb_pipeout(urb->pipe); | ||
546 | size_t max_std_len; | ||
547 | size_t remaining; | ||
548 | int ntds = 0; | ||
549 | struct whc_std *std = NULL; | ||
550 | void *bounce = NULL; | ||
551 | struct scatterlist *sg; | ||
552 | int i; | ||
553 | |||
554 | /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */ | ||
555 | max_std_len = qset->max_burst * qset->max_packet; | ||
556 | |||
557 | remaining = urb->transfer_buffer_length; | ||
558 | |||
559 | for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) { | ||
560 | size_t len; | ||
561 | size_t sg_remaining; | ||
562 | void *orig; | ||
563 | |||
564 | if (remaining == 0) { | ||
565 | break; | ||
566 | } | ||
567 | |||
568 | sg_remaining = min(remaining, sg->length); | ||
569 | orig = sg_virt(sg); | ||
570 | |||
571 | dev_dbg(&whc->umc->dev, "adding sg[%d] %d\n", i, sg_remaining); | ||
572 | |||
573 | while (sg_remaining) { | ||
574 | if (!std || std->len == max_std_len) { | ||
575 | dev_dbg(&whc->umc->dev, "need new std\n"); | ||
576 | std = qset_new_std(whc, qset, urb, mem_flags); | ||
577 | if (std == NULL) | ||
578 | return -ENOMEM; | ||
579 | std->bounce_buf = kmalloc(max_std_len, mem_flags); | ||
580 | if (std->bounce_buf == NULL) | ||
581 | return -ENOMEM; | ||
582 | std->bounce_sg = sg; | ||
583 | std->bounce_offset = orig - sg_virt(sg); | ||
584 | bounce = std->bounce_buf; | ||
585 | ntds++; | ||
586 | } | ||
587 | |||
588 | len = min(sg_remaining, max_std_len - std->len); | ||
589 | |||
590 | dev_dbg(&whc->umc->dev, "added %d from sg[%d] @ offset %d\n", | ||
591 | len, i, orig - sg_virt(sg)); | ||
592 | |||
593 | if (is_out) | ||
594 | memcpy(bounce, orig, len); | ||
595 | |||
596 | std->len += len; | ||
597 | std->ntds_remaining = -1; /* filled in later */ | ||
598 | |||
599 | bounce += len; | ||
600 | orig += len; | ||
601 | sg_remaining -= len; | ||
602 | remaining -= len; | ||
603 | } | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * For each of the new sTDs, map the bounce buffers, create | ||
608 | * page lists (if necessary), and fill in std->ntds_remaining. | ||
609 | */ | ||
610 | list_for_each_entry(std, &qset->stds, list_node) { | ||
611 | if (std->ntds_remaining != -1) | ||
612 | continue; | ||
613 | |||
614 | std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len, | ||
615 | is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
616 | |||
617 | if (qset_fill_page_list(whc, std, mem_flags) < 0) | ||
618 | return -ENOMEM; | ||
619 | |||
620 | std->ntds_remaining = ntds--; | ||
621 | } | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
342 | /** | 626 | /** |
343 | * qset_add_urb - add an urb to the qset's queue. | 627 | * qset_add_urb - add an urb to the qset's queue. |
344 | * | 628 | * |
@@ -353,10 +637,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, | |||
353 | int remaining = urb->transfer_buffer_length; | 637 | int remaining = urb->transfer_buffer_length; |
354 | u64 transfer_dma = urb->transfer_dma; | 638 | u64 transfer_dma = urb->transfer_dma; |
355 | int ntds_remaining; | 639 | int ntds_remaining; |
356 | 640 | int ret; | |
357 | ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); | ||
358 | if (ntds_remaining == 0) | ||
359 | ntds_remaining = 1; | ||
360 | 641 | ||
361 | wurb = kzalloc(sizeof(struct whc_urb), mem_flags); | 642 | wurb = kzalloc(sizeof(struct whc_urb), mem_flags); |
362 | if (wurb == NULL) | 643 | if (wurb == NULL) |
@@ -366,32 +647,41 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, | |||
366 | wurb->urb = urb; | 647 | wurb->urb = urb; |
367 | INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); | 648 | INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); |
368 | 649 | ||
650 | if (urb->sg) { | ||
651 | ret = qset_add_urb_sg(whc, qset, urb, mem_flags); | ||
652 | if (ret == -EINVAL) { | ||
653 | dev_dbg(&whc->umc->dev, "linearizing %d octet urb\n", | ||
654 | urb->transfer_buffer_length); | ||
655 | qset_free_stds(qset, urb); | ||
656 | ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags); | ||
657 | } | ||
658 | if (ret < 0) | ||
659 | goto err_no_mem; | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); | ||
664 | if (ntds_remaining == 0) | ||
665 | ntds_remaining = 1; | ||
666 | |||
369 | while (ntds_remaining) { | 667 | while (ntds_remaining) { |
370 | struct whc_std *std; | 668 | struct whc_std *std; |
371 | size_t std_len; | 669 | size_t std_len; |
372 | 670 | ||
373 | std = kmalloc(sizeof(struct whc_std), mem_flags); | ||
374 | if (std == NULL) | ||
375 | goto err_no_mem; | ||
376 | |||
377 | std_len = remaining; | 671 | std_len = remaining; |
378 | if (std_len > QTD_MAX_XFER_SIZE) | 672 | if (std_len > QTD_MAX_XFER_SIZE) |
379 | std_len = QTD_MAX_XFER_SIZE; | 673 | std_len = QTD_MAX_XFER_SIZE; |
380 | 674 | ||
381 | std->urb = urb; | 675 | std = qset_new_std(whc, qset, urb, mem_flags); |
676 | if (std == NULL) | ||
677 | goto err_no_mem; | ||
678 | |||
382 | std->dma_addr = transfer_dma; | 679 | std->dma_addr = transfer_dma; |
383 | std->len = std_len; | 680 | std->len = std_len; |
384 | std->ntds_remaining = ntds_remaining; | 681 | std->ntds_remaining = ntds_remaining; |
385 | std->qtd = NULL; | ||
386 | 682 | ||
387 | INIT_LIST_HEAD(&std->list_node); | 683 | if (qset_fill_page_list(whc, std, mem_flags) < 0) |
388 | list_add_tail(&std->list_node, &qset->stds); | 684 | goto err_no_mem; |
389 | |||
390 | if (std_len > WHCI_PAGE_SIZE) { | ||
391 | if (qset_fill_page_list(whc, std, mem_flags) < 0) | ||
392 | goto err_no_mem; | ||
393 | } else | ||
394 | std->num_pointers = 0; | ||
395 | 685 | ||
396 | ntds_remaining--; | 686 | ntds_remaining--; |
397 | remaining -= std_len; | 687 | remaining -= std_len; |
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h index 24e94d983c5e..c80c7d93bc4a 100644 --- a/drivers/usb/host/whci/whcd.h +++ b/drivers/usb/host/whci/whcd.h | |||
@@ -84,6 +84,11 @@ struct whc { | |||
84 | * @len: the length of data in the associated TD. | 84 | * @len: the length of data in the associated TD. |
85 | * @ntds_remaining: number of TDs (starting from this one) in this transfer. | 85 | * @ntds_remaining: number of TDs (starting from this one) in this transfer. |
86 | * | 86 | * |
87 | * @bounce_buf: a bounce buffer if the std was from an urb with a sg | ||
88 | * list that could not be mapped to qTDs directly. | ||
89 | * @bounce_sg: the first scatterlist element bounce_buf is for. | ||
90 | * @bounce_offset: the offset into bounce_sg for the start of bounce_buf. | ||
91 | * | ||
87 | * Queued URBs may require more TDs than are available in a qset so we | 92 | * Queued URBs may require more TDs than are available in a qset so we |
88 | * use a list of these "software TDs" (sTDs) to hold per-TD data. | 93 | * use a list of these "software TDs" (sTDs) to hold per-TD data. |
89 | */ | 94 | */ |
@@ -97,6 +102,10 @@ struct whc_std { | |||
97 | int num_pointers; | 102 | int num_pointers; |
98 | dma_addr_t dma_addr; | 103 | dma_addr_t dma_addr; |
99 | struct whc_page_list_entry *pl_virt; | 104 | struct whc_page_list_entry *pl_virt; |
105 | |||
106 | void *bounce_buf; | ||
107 | struct scatterlist *bounce_sg; | ||
108 | unsigned bounce_offset; | ||
100 | }; | 109 | }; |
101 | 110 | ||
102 | /** | 111 | /** |
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h index e8d0001605be..d5e5c3aacced 100644 --- a/drivers/usb/host/whci/whci-hc.h +++ b/drivers/usb/host/whci/whci-hc.h | |||
@@ -267,8 +267,9 @@ struct whc_qset { | |||
267 | unsigned reset:1; | 267 | unsigned reset:1; |
268 | struct urb *pause_after_urb; | 268 | struct urb *pause_after_urb; |
269 | struct completion remove_complete; | 269 | struct completion remove_complete; |
270 | int max_burst; | 270 | uint16_t max_packet; |
271 | int max_seq; | 271 | uint8_t max_burst; |
272 | uint8_t max_seq; | ||
272 | }; | 273 | }; |
273 | 274 | ||
274 | static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) | 275 | static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) |