aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelipe Balbi <balbi@ti.com>2011-11-28 05:46:59 -0500
committerFelipe Balbi <balbi@ti.com>2011-12-21 06:24:46 -0500
commiteeb720fb21d61dfc3aac780e721150998ef603af (patch)
tree2d8877fa1c96c970d5aa4956e81b4622ea6c4e7b
parent42f8eb7a1087442e9710ce75b355c0f28aadbf96 (diff)
usb: dwc3: gadget: add support for SG lists
add support for SG lists on dwc3 driver. With this we can e.g. use VFS layer's SG lists on storage gadgets so that we can start bigger transfers and improve throughput. Signed-off-by: Felipe Balbi <balbi@ti.com>
-rw-r--r--drivers/usb/dwc3/gadget.c115
1 files changed, 98 insertions, 17 deletions
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0292b0617d72..ddc7a43592c0 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -65,6 +65,22 @@ void dwc3_map_buffer_to_dma(struct dwc3_request *req)
65 return; 65 return;
66 } 66 }
67 67
68 if (req->request.num_sgs) {
69 int mapped;
70
71 mapped = dma_map_sg(dwc->dev, req->request.sg,
72 req->request.num_sgs,
73 req->direction ? DMA_TO_DEVICE
74 : DMA_FROM_DEVICE);
75 if (mapped < 0) {
76 dev_err(dwc->dev, "failed to map SGs\n");
77 return;
78 }
79
80 req->request.num_mapped_sgs = mapped;
81 return;
82 }
83
68 if (req->request.dma == DMA_ADDR_INVALID) { 84 if (req->request.dma == DMA_ADDR_INVALID) {
69 req->request.dma = dma_map_single(dwc->dev, req->request.buf, 85 req->request.dma = dma_map_single(dwc->dev, req->request.buf,
70 req->request.length, req->direction 86 req->request.length, req->direction
@@ -82,6 +98,17 @@ void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
82 return; 98 return;
83 } 99 }
84 100
101 if (req->request.num_mapped_sgs) {
102 req->request.dma = DMA_ADDR_INVALID;
103 dma_unmap_sg(dwc->dev, req->request.sg,
104 req->request.num_sgs,
105 req->direction ? DMA_TO_DEVICE
106 : DMA_FROM_DEVICE);
107
108 req->request.num_mapped_sgs = 0;
109 return;
110 }
111
85 if (req->mapped) { 112 if (req->mapped) {
86 dma_unmap_single(dwc->dev, req->request.dma, 113 dma_unmap_single(dwc->dev, req->request.dma,
87 req->request.length, req->direction 114 req->request.length, req->direction
@@ -97,7 +124,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
97 struct dwc3 *dwc = dep->dwc; 124 struct dwc3 *dwc = dep->dwc;
98 125
99 if (req->queued) { 126 if (req->queued) {
100 dep->busy_slot++; 127 if (req->request.num_mapped_sgs)
128 dep->busy_slot += req->request.num_mapped_sgs;
129 else
130 dep->busy_slot++;
131
101 /* 132 /*
102 * Skip LINK TRB. We can't use req->trb and check for 133 * Skip LINK TRB. We can't use req->trb and check for
103 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just 134 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
@@ -108,6 +139,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
108 dep->busy_slot++; 139 dep->busy_slot++;
109 } 140 }
110 list_del(&req->list); 141 list_del(&req->list);
142 req->trb = NULL;
111 143
112 if (req->request.status == -EINPROGRESS) 144 if (req->request.status == -EINPROGRESS)
113 req->request.status = status; 145 req->request.status = status;
@@ -545,13 +577,20 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
545 * @req: dwc3_request pointer 577 * @req: dwc3_request pointer
546 */ 578 */
547static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 579static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
548 struct dwc3_request *req, unsigned last) 580 struct dwc3_request *req, dma_addr_t dma,
581 unsigned length, unsigned last, unsigned chain)
549{ 582{
583 struct dwc3 *dwc = dep->dwc;
550 struct dwc3_trb_hw *trb_hw; 584 struct dwc3_trb_hw *trb_hw;
551 struct dwc3_trb trb; 585 struct dwc3_trb trb;
552 586
553 unsigned int cur_slot; 587 unsigned int cur_slot;
554 588
589 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
590 dep->name, req, (unsigned long long) dma,
591 length, last ? " last" : "",
592 chain ? " chain" : "");
593
555 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 594 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
556 cur_slot = dep->free_slot; 595 cur_slot = dep->free_slot;
557 dep->free_slot++; 596 dep->free_slot++;
@@ -561,15 +600,18 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
561 usb_endpoint_xfer_isoc(dep->desc)) 600 usb_endpoint_xfer_isoc(dep->desc))
562 return; 601 return;
563 602
564 dwc3_gadget_move_request_queued(req);
565 memset(&trb, 0, sizeof(trb)); 603 memset(&trb, 0, sizeof(trb));
566 604 if (!req->trb) {
567 req->trb = trb_hw; 605 dwc3_gadget_move_request_queued(req);
606 req->trb = trb_hw;
607 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
608 }
568 609
569 if (usb_endpoint_xfer_isoc(dep->desc)) { 610 if (usb_endpoint_xfer_isoc(dep->desc)) {
570 trb.isp_imi = true; 611 trb.isp_imi = true;
571 trb.csp = true; 612 trb.csp = true;
572 } else { 613 } else {
614 trb.chn = chain;
573 trb.lst = last; 615 trb.lst = last;
574 } 616 }
575 617
@@ -601,12 +643,11 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
601 BUG(); 643 BUG();
602 } 644 }
603 645
604 trb.length = req->request.length; 646 trb.length = length;
605 trb.bplh = req->request.dma; 647 trb.bplh = dma;
606 trb.hwo = true; 648 trb.hwo = true;
607 649
608 dwc3_trb_to_hw(&trb, trb_hw); 650 dwc3_trb_to_hw(&trb, trb_hw);
609 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
610} 651}
611 652
612/* 653/*
@@ -663,19 +704,58 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
663 return; 704 return;
664 705
665 list_for_each_entry_safe(req, n, &dep->request_list, list) { 706 list_for_each_entry_safe(req, n, &dep->request_list, list) {
666 trbs_left--; 707 unsigned length;
708 dma_addr_t dma;
667 709
668 if (!trbs_left) 710 if (req->request.num_mapped_sgs > 0) {
669 last_one = 1; 711 struct usb_request *request = &req->request;
712 struct scatterlist *sg = request->sg;
713 struct scatterlist *s;
714 int i;
670 715
671 /* Is this the last request? */ 716 for_each_sg(sg, s, request->num_mapped_sgs, i) {
672 if (list_empty(&dep->request_list)) 717 unsigned chain = true;
673 last_one = 1;
674 718
675 dwc3_prepare_one_trb(dep, req, last_one); 719 length = sg_dma_len(s);
720 dma = sg_dma_address(s);
676 721
677 if (last_one) 722 if (i == (request->num_mapped_sgs - 1)
678 break; 723 || sg_is_last(s)) {
724 last_one = true;
725 chain = false;
726 }
727
728 trbs_left--;
729 if (!trbs_left)
730 last_one = true;
731
732 if (last_one)
733 chain = false;
734
735 dwc3_prepare_one_trb(dep, req, dma, length,
736 last_one, chain);
737
738 if (last_one)
739 break;
740 }
741 } else {
742 dma = req->request.dma;
743 length = req->request.length;
744 trbs_left--;
745
746 if (!trbs_left)
747 last_one = 1;
748
749 /* Is this the last request? */
750 if (list_is_last(&req->list, &dep->request_list))
751 last_one = 1;
752
753 dwc3_prepare_one_trb(dep, req, dma, length,
754 last_one, false);
755
756 if (last_one)
757 break;
758 }
679 } 759 }
680} 760}
681 761
@@ -1989,6 +2069,7 @@ int __devinit dwc3_gadget_init(struct dwc3 *dwc)
1989 dwc->gadget.max_speed = USB_SPEED_SUPER; 2069 dwc->gadget.max_speed = USB_SPEED_SUPER;
1990 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2070 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1991 dwc->gadget.dev.parent = dwc->dev; 2071 dwc->gadget.dev.parent = dwc->dev;
2072 dwc->gadget.sg_supported = true;
1992 2073
1993 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask); 2074 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
1994 2075