aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/wusbcore
diff options
context:
space:
mode:
authorThomas Pugliese <thomas.pugliese@gmail.com>2013-06-11 11:39:31 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-06-17 16:41:58 -0400
commit2b81c083c62909dc2af0d8d2e8687b99d259954f (patch)
treee6cd643e09b056163e9393996231d7346e85cc74 /drivers/usb/wusbcore
parent5b92f46c967706d43717aa666c50c6ea0b12da43 (diff)
usb: wire adapter: add scatter gather support
This patch adds support for scatter gather DMA to the wire adapter and updates the HWA to advertise support for SG transfers. This allows the block layer to submit transfer requests to the HWA HC without first breaking them up into PAGE_SIZE requests. Signed-off-by: Thomas Pugliese <thomas.pugliese@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/wusbcore')
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c181
1 files changed, 166 insertions, 15 deletions
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 6ef94bce8c0d..16968c899493 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -85,6 +85,7 @@
85#include <linux/hash.h> 85#include <linux/hash.h>
86#include <linux/ratelimit.h> 86#include <linux/ratelimit.h>
87#include <linux/export.h> 87#include <linux/export.h>
88#include <linux/scatterlist.h>
88 89
89#include "wa-hc.h" 90#include "wa-hc.h"
90#include "wusbhc.h" 91#include "wusbhc.h"
@@ -442,8 +443,7 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
442 goto error; 443 goto error;
443 } 444 }
444 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; 445 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
445 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1) 446 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
446 / xfer->seg_size;
447 if (xfer->segs >= WA_SEGS_MAX) { 447 if (xfer->segs >= WA_SEGS_MAX) {
448 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", 448 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
449 (int)(urb->transfer_buffer_length / xfer->seg_size), 449 (int)(urb->transfer_buffer_length / xfer->seg_size),
@@ -627,6 +627,86 @@ static void wa_seg_cb(struct urb *urb)
627 } 627 }
628} 628}
629 629
630/* allocate an SG list to store bytes_to_transfer bytes and copy the
631 * subset of the in_sg that matches the buffer subset
632 * we are about to transfer. */
633static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
634 const unsigned int bytes_transferred,
635 const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
636{
637 struct scatterlist *out_sg;
638 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
639 nents;
640 struct scatterlist *current_xfer_sg = in_sg;
641 struct scatterlist *current_seg_sg, *last_seg_sg;
642
643 /* skip previously transferred pages. */
644 while ((current_xfer_sg) &&
645 (bytes_processed < bytes_transferred)) {
646 bytes_processed += current_xfer_sg->length;
647
648 /* advance the sg if current segment starts on or past the
649 next page. */
650 if (bytes_processed <= bytes_transferred)
651 current_xfer_sg = sg_next(current_xfer_sg);
652 }
653
654 /* the data for the current segment starts in current_xfer_sg.
655 calculate the offset. */
656 if (bytes_processed > bytes_transferred) {
657 offset_into_current_page_data = current_xfer_sg->length -
658 (bytes_processed - bytes_transferred);
659 }
660
661 /* calculate the number of pages needed by this segment. */
662 nents = DIV_ROUND_UP((bytes_to_transfer +
663 offset_into_current_page_data +
664 current_xfer_sg->offset),
665 PAGE_SIZE);
666
667 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
668 if (out_sg) {
669 sg_init_table(out_sg, nents);
670
671 /* copy the portion of the incoming SG that correlates to the
672 * data to be transferred by this segment to the segment SG. */
673 last_seg_sg = current_seg_sg = out_sg;
674 bytes_processed = 0;
675
676 /* reset nents and calculate the actual number of sg entries
677 needed. */
678 nents = 0;
679 while ((bytes_processed < bytes_to_transfer) &&
680 current_seg_sg && current_xfer_sg) {
681 unsigned int page_len = min((current_xfer_sg->length -
682 offset_into_current_page_data),
683 (bytes_to_transfer - bytes_processed));
684
685 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
686 page_len,
687 current_xfer_sg->offset +
688 offset_into_current_page_data);
689
690 bytes_processed += page_len;
691
692 last_seg_sg = current_seg_sg;
693 current_seg_sg = sg_next(current_seg_sg);
694 current_xfer_sg = sg_next(current_xfer_sg);
695
696 /* only the first page may require additional offset. */
697 offset_into_current_page_data = 0;
698 nents++;
699 }
700
701 /* update num_sgs and terminate the list since we may have
702 * concatenated pages. */
703 sg_mark_end(last_seg_sg);
704 *out_num_sgs = nents;
705 }
706
707 return out_sg;
708}
709
630/* 710/*
631 * Allocate the segs array and initialize each of them 711 * Allocate the segs array and initialize each of them
632 * 712 *
@@ -663,9 +743,9 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
663 dto_epd->bEndpointAddress), 743 dto_epd->bEndpointAddress),
664 &seg->xfer_hdr, xfer_hdr_size, 744 &seg->xfer_hdr, xfer_hdr_size,
665 wa_seg_cb, seg); 745 wa_seg_cb, seg);
666 buf_itr_size = buf_size > xfer->seg_size ? 746 buf_itr_size = min(buf_size, xfer->seg_size);
667 xfer->seg_size : buf_size;
668 if (xfer->is_inbound == 0 && buf_size > 0) { 747 if (xfer->is_inbound == 0 && buf_size > 0) {
748 /* outbound data. */
669 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); 749 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
670 if (seg->dto_urb == NULL) 750 if (seg->dto_urb == NULL)
671 goto error_dto_alloc; 751 goto error_dto_alloc;
@@ -679,9 +759,42 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
679 xfer->urb->transfer_dma + buf_itr; 759 xfer->urb->transfer_dma + buf_itr;
680 seg->dto_urb->transfer_flags |= 760 seg->dto_urb->transfer_flags |=
681 URB_NO_TRANSFER_DMA_MAP; 761 URB_NO_TRANSFER_DMA_MAP;
682 } else 762 seg->dto_urb->transfer_buffer = NULL;
683 seg->dto_urb->transfer_buffer = 763 seg->dto_urb->sg = NULL;
684 xfer->urb->transfer_buffer + buf_itr; 764 seg->dto_urb->num_sgs = 0;
765 } else {
766 /* do buffer or SG processing. */
767 seg->dto_urb->transfer_flags &=
768 ~URB_NO_TRANSFER_DMA_MAP;
769 /* this should always be 0 before a resubmit. */
770 seg->dto_urb->num_mapped_sgs = 0;
771
772 if (xfer->urb->transfer_buffer) {
773 seg->dto_urb->transfer_buffer =
774 xfer->urb->transfer_buffer +
775 buf_itr;
776 seg->dto_urb->sg = NULL;
777 seg->dto_urb->num_sgs = 0;
778 } else {
779 /* allocate an SG list to store seg_size
780 bytes and copy the subset of the
781 xfer->urb->sg that matches the
782 buffer subset we are about to read.
783 */
784 seg->dto_urb->sg =
785 wa_xfer_create_subset_sg(
786 xfer->urb->sg,
787 buf_itr, buf_itr_size,
788 &(seg->dto_urb->num_sgs));
789
790 if (!(seg->dto_urb->sg)) {
791 seg->dto_urb->num_sgs = 0;
792 goto error_sg_alloc;
793 }
794
795 seg->dto_urb->transfer_buffer = NULL;
796 }
797 }
685 seg->dto_urb->transfer_buffer_length = buf_itr_size; 798 seg->dto_urb->transfer_buffer_length = buf_itr_size;
686 } 799 }
687 seg->status = WA_SEG_READY; 800 seg->status = WA_SEG_READY;
@@ -690,6 +803,8 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
690 } 803 }
691 return 0; 804 return 0;
692 805
806error_sg_alloc:
807 kfree(seg->dto_urb);
693error_dto_alloc: 808error_dto_alloc:
694 kfree(xfer->seg[cnt]); 809 kfree(xfer->seg[cnt]);
695 cnt--; 810 cnt--;
@@ -1026,7 +1141,8 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1026 unsigned long my_flags; 1141 unsigned long my_flags;
1027 unsigned cant_sleep = irqs_disabled() | in_atomic(); 1142 unsigned cant_sleep = irqs_disabled() | in_atomic();
1028 1143
1029 if (urb->transfer_buffer == NULL 1144 if ((urb->transfer_buffer == NULL)
1145 && (urb->sg == NULL)
1030 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 1146 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1031 && urb->transfer_buffer_length != 0) { 1147 && urb->transfer_buffer_length != 0) {
1032 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb); 1148 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
@@ -1261,7 +1377,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1261 seg = xfer->seg[seg_idx]; 1377 seg = xfer->seg[seg_idx];
1262 rpipe = xfer->ep->hcpriv; 1378 rpipe = xfer->ep->hcpriv;
1263 usb_status = xfer_result->bTransferStatus; 1379 usb_status = xfer_result->bTransferStatus;
1264 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", 1380 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
1265 xfer, seg_idx, usb_status, seg->status); 1381 xfer, seg_idx, usb_status, seg->status);
1266 if (seg->status == WA_SEG_ABORTED 1382 if (seg->status == WA_SEG_ABORTED
1267 || seg->status == WA_SEG_ERROR) /* already handled */ 1383 || seg->status == WA_SEG_ERROR) /* already handled */
@@ -1276,8 +1392,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1276 } 1392 }
1277 if (usb_status & 0x80) { 1393 if (usb_status & 0x80) {
1278 seg->result = wa_xfer_status_to_errno(usb_status); 1394 seg->result = wa_xfer_status_to_errno(usb_status);
1279 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n", 1395 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1280 xfer, seg->index, usb_status); 1396 xfer, xfer->id, seg->index, usb_status);
1281 goto error_complete; 1397 goto error_complete;
1282 } 1398 }
1283 /* FIXME: we ignore warnings, tally them for stats */ 1399 /* FIXME: we ignore warnings, tally them for stats */
@@ -1286,18 +1402,47 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1286 if (xfer->is_inbound) { /* IN data phase: read to buffer */ 1402 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1287 seg->status = WA_SEG_DTI_PENDING; 1403 seg->status = WA_SEG_DTI_PENDING;
1288 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); 1404 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1405 /* this should always be 0 before a resubmit. */
1406 wa->buf_in_urb->num_mapped_sgs = 0;
1407
1289 if (xfer->is_dma) { 1408 if (xfer->is_dma) {
1290 wa->buf_in_urb->transfer_dma = 1409 wa->buf_in_urb->transfer_dma =
1291 xfer->urb->transfer_dma 1410 xfer->urb->transfer_dma
1292 + seg_idx * xfer->seg_size; 1411 + (seg_idx * xfer->seg_size);
1293 wa->buf_in_urb->transfer_flags 1412 wa->buf_in_urb->transfer_flags
1294 |= URB_NO_TRANSFER_DMA_MAP; 1413 |= URB_NO_TRANSFER_DMA_MAP;
1414 wa->buf_in_urb->transfer_buffer = NULL;
1415 wa->buf_in_urb->sg = NULL;
1416 wa->buf_in_urb->num_sgs = 0;
1295 } else { 1417 } else {
1296 wa->buf_in_urb->transfer_buffer = 1418 /* do buffer or SG processing. */
1297 xfer->urb->transfer_buffer
1298 + seg_idx * xfer->seg_size;
1299 wa->buf_in_urb->transfer_flags 1419 wa->buf_in_urb->transfer_flags
1300 &= ~URB_NO_TRANSFER_DMA_MAP; 1420 &= ~URB_NO_TRANSFER_DMA_MAP;
1421
1422 if (xfer->urb->transfer_buffer) {
1423 wa->buf_in_urb->transfer_buffer =
1424 xfer->urb->transfer_buffer
1425 + (seg_idx * xfer->seg_size);
1426 wa->buf_in_urb->sg = NULL;
1427 wa->buf_in_urb->num_sgs = 0;
1428 } else {
1429 /* allocate an SG list to store seg_size bytes
1430 and copy the subset of the xfer->urb->sg
1431 that matches the buffer subset we are
1432 about to read. */
1433 wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
1434 xfer->urb->sg,
1435 seg_idx * xfer->seg_size,
1436 le32_to_cpu(
1437 xfer_result->dwTransferLength),
1438 &(wa->buf_in_urb->num_sgs));
1439
1440 if (!(wa->buf_in_urb->sg)) {
1441 wa->buf_in_urb->num_sgs = 0;
1442 goto error_sg_alloc;
1443 }
1444 wa->buf_in_urb->transfer_buffer = NULL;
1445 }
1301 } 1446 }
1302 wa->buf_in_urb->transfer_buffer_length = 1447 wa->buf_in_urb->transfer_buffer_length =
1303 le32_to_cpu(xfer_result->dwTransferLength); 1448 le32_to_cpu(xfer_result->dwTransferLength);
@@ -1330,6 +1475,8 @@ error_submit_buf_in:
1330 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", 1475 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1331 xfer, seg_idx, result); 1476 xfer, seg_idx, result);
1332 seg->result = result; 1477 seg->result = result;
1478 kfree(wa->buf_in_urb->sg);
1479error_sg_alloc:
1333error_complete: 1480error_complete:
1334 seg->status = WA_SEG_ERROR; 1481 seg->status = WA_SEG_ERROR;
1335 xfer->segs_done++; 1482 xfer->segs_done++;
@@ -1381,6 +1528,10 @@ static void wa_buf_in_cb(struct urb *urb)
1381 unsigned long flags; 1528 unsigned long flags;
1382 u8 done = 0; 1529 u8 done = 0;
1383 1530
1531 /* free the sg if it was used. */
1532 kfree(urb->sg);
1533 urb->sg = NULL;
1534
1384 switch (urb->status) { 1535 switch (urb->status) {
1385 case 0: 1536 case 0:
1386 spin_lock_irqsave(&xfer->lock, flags); 1537 spin_lock_irqsave(&xfer->lock, flags);