aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichael Grzeschik <m.grzeschik@pengutronix.de>2013-06-13 10:59:53 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-06-17 16:45:47 -0400
commitcc9e6c495b0a37cc4f7003e470bcbb15ea760377 (patch)
tree620189fb514e382806ac61237b2812476c9abe07 /drivers
parent20a677fd63c57edd5b0c463baa44f133b2f2d4a0 (diff)
usb: chipidea: udc: manage dynamic amount of tds with a linked list
Instead of having a limited number of usable tds in the udc we use a linked list to support dynamic amount of needed tds for all special gadget types. This improves throughput. Signed-off-by: Michael Grzeschik <m.grzeschik@pengutronix.de> Reviewed-by: Felipe Balbi <balbi@ti.com> Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/chipidea/debug.c19
-rw-r--r--drivers/usb/chipidea/udc.c161
-rw-r--r--drivers/usb/chipidea/udc.h11
3 files changed, 129 insertions, 62 deletions
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 36a7063a6cba..64b8c32d4f33 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -162,6 +162,7 @@ static int ci_requests_show(struct seq_file *s, void *data)
162 unsigned long flags; 162 unsigned long flags;
163 struct list_head *ptr = NULL; 163 struct list_head *ptr = NULL;
164 struct ci13xxx_req *req = NULL; 164 struct ci13xxx_req *req = NULL;
165 struct td_node *node, *tmpnode;
165 unsigned i, j, qsize = sizeof(struct ci13xxx_td)/sizeof(u32); 166 unsigned i, j, qsize = sizeof(struct ci13xxx_td)/sizeof(u32);
166 167
167 if (ci->role != CI_ROLE_GADGET) { 168 if (ci->role != CI_ROLE_GADGET) {
@@ -174,13 +175,17 @@ static int ci_requests_show(struct seq_file *s, void *data)
174 list_for_each(ptr, &ci->ci13xxx_ep[i].qh.queue) { 175 list_for_each(ptr, &ci->ci13xxx_ep[i].qh.queue) {
175 req = list_entry(ptr, struct ci13xxx_req, queue); 176 req = list_entry(ptr, struct ci13xxx_req, queue);
176 177
177 seq_printf(s, "EP=%02i: TD=%08X %s\n", 178 list_for_each_entry_safe(node, tmpnode, &req->tds, td) {
178 i % (ci->hw_ep_max / 2), (u32)req->dma, 179 seq_printf(s, "EP=%02i: TD=%08X %s\n",
179 ((i < ci->hw_ep_max/2) ? "RX" : "TX")); 180 i % (ci->hw_ep_max / 2),
180 181 (u32)node->dma,
181 for (j = 0; j < qsize; j++) 182 ((i < ci->hw_ep_max/2) ?
182 seq_printf(s, " %04X: %08X\n", j, 183 "RX" : "TX"));
183 *((u32 *)req->ptr + j)); 184
185 for (j = 0; j < qsize; j++)
186 seq_printf(s, " %04X: %08X\n", j,
187 *((u32 *)node->ptr + j));
188 }
184 } 189 }
185 spin_unlock_irqrestore(&ci->lock, flags); 190 spin_unlock_irqrestore(&ci->lock, flags);
186 191
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8aed28855c04..960814f4179d 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -368,6 +368,46 @@ static int hw_usb_reset(struct ci13xxx *ci)
368/****************************************************************************** 368/******************************************************************************
369 * UTIL block 369 * UTIL block
370 *****************************************************************************/ 370 *****************************************************************************/
371
372static void setup_td_bits(struct td_node *tdnode, unsigned length)
373{
374 memset(tdnode->ptr, 0, sizeof(*tdnode->ptr));
375 tdnode->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
376 tdnode->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
377 tdnode->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
378}
379
380static int add_td_to_list(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq,
381 unsigned length)
382{
383 struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
384 GFP_ATOMIC);
385
386 if (node == NULL)
387 return -ENOMEM;
388
389 node->ptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
390 &node->dma);
391 if (node->ptr == NULL) {
392 kfree(node);
393 return -ENOMEM;
394 }
395
396 setup_td_bits(node, length);
397
398 if (!list_empty(&mReq->tds)) {
399 /* get the last entry */
400 lastnode = list_entry(mReq->tds.prev,
401 struct td_node, td);
402 lastnode->ptr->next = cpu_to_le32(node->dma);
403 }
404
405 INIT_LIST_HEAD(&node->td);
406 list_add_tail(&node->td, &mReq->tds);
407
408 return 0;
409}
410
371/** 411/**
372 * _usb_addr: calculates endpoint address from direction & number 412 * _usb_addr: calculates endpoint address from direction & number
373 * @ep: endpoint 413 * @ep: endpoint
@@ -390,6 +430,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
390 unsigned i; 430 unsigned i;
391 int ret = 0; 431 int ret = 0;
392 unsigned length = mReq->req.length; 432 unsigned length = mReq->req.length;
433 struct td_node *firstnode, *lastnode;
393 434
394 /* don't queue twice */ 435 /* don't queue twice */
395 if (mReq->req.status == -EALREADY) 436 if (mReq->req.status == -EALREADY)
@@ -397,58 +438,46 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
397 438
398 mReq->req.status = -EALREADY; 439 mReq->req.status = -EALREADY;
399 440
400 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
401 mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
402 &mReq->zdma);
403 if (mReq->zptr == NULL)
404 return -ENOMEM;
405
406 memset(mReq->zptr, 0, sizeof(*mReq->zptr));
407 mReq->zptr->next = cpu_to_le32(TD_TERMINATE);
408 mReq->zptr->token = cpu_to_le32(TD_STATUS_ACTIVE);
409 if (!mReq->req.no_interrupt)
410 mReq->zptr->token |= cpu_to_le32(TD_IOC);
411 }
412 ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir); 441 ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir);
413 if (ret) 442 if (ret)
414 return ret; 443 return ret;
415 444
416 /* 445 firstnode = list_first_entry(&mReq->tds,
417 * TD configuration 446 struct td_node, td);
418 * TODO - handle requests which spawns into several TDs 447
419 */ 448 setup_td_bits(firstnode, length);
420 memset(mReq->ptr, 0, sizeof(*mReq->ptr)); 449
421 mReq->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES)); 450 firstnode->ptr->page[0] = cpu_to_le32(mReq->req.dma);
422 mReq->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
423 mReq->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
424 if (mReq->zptr) {
425 mReq->ptr->next = cpu_to_le32(mReq->zdma);
426 } else {
427 mReq->ptr->next = cpu_to_le32(TD_TERMINATE);
428 if (!mReq->req.no_interrupt)
429 mReq->ptr->token |= cpu_to_le32(TD_IOC);
430 }
431 mReq->ptr->page[0] = cpu_to_le32(mReq->req.dma);
432 for (i = 1; i < TD_PAGE_COUNT; i++) { 451 for (i = 1; i < TD_PAGE_COUNT; i++) {
433 u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE; 452 u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE;
434 page &= ~TD_RESERVED_MASK; 453 page &= ~TD_RESERVED_MASK;
435 mReq->ptr->page[i] = cpu_to_le32(page); 454 firstnode->ptr->page[i] = cpu_to_le32(page);
436 } 455 }
437 456
457 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0))
458 add_td_to_list(mEp, mReq, 0);
459
460 lastnode = list_entry(mReq->tds.prev,
461 struct td_node, td);
462
463 lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
464 if (!mReq->req.no_interrupt)
465 lastnode->ptr->token |= cpu_to_le32(TD_IOC);
438 wmb(); 466 wmb();
439 467
440 if (!list_empty(&mEp->qh.queue)) { 468 if (!list_empty(&mEp->qh.queue)) {
441 struct ci13xxx_req *mReqPrev; 469 struct ci13xxx_req *mReqPrev;
442 int n = hw_ep_bit(mEp->num, mEp->dir); 470 int n = hw_ep_bit(mEp->num, mEp->dir);
443 int tmp_stat; 471 int tmp_stat;
444 u32 next = mReq->dma & TD_ADDR_MASK; 472 struct td_node *prevlastnode;
473 u32 next = firstnode->dma & TD_ADDR_MASK;
445 474
446 mReqPrev = list_entry(mEp->qh.queue.prev, 475 mReqPrev = list_entry(mEp->qh.queue.prev,
447 struct ci13xxx_req, queue); 476 struct ci13xxx_req, queue);
448 if (mReqPrev->zptr) 477 prevlastnode = list_entry(mReqPrev->tds.prev,
449 mReqPrev->zptr->next = cpu_to_le32(next); 478 struct td_node, td);
450 else 479
451 mReqPrev->ptr->next = cpu_to_le32(next); 480 prevlastnode->ptr->next = cpu_to_le32(next);
452 wmb(); 481 wmb();
453 if (hw_read(ci, OP_ENDPTPRIME, BIT(n))) 482 if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
454 goto done; 483 goto done;
@@ -462,7 +491,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
462 } 491 }
463 492
464 /* QH configuration */ 493 /* QH configuration */
465 mEp->qh.ptr->td.next = cpu_to_le32(mReq->dma); /* TERMINATE = 0 */ 494 mEp->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
466 mEp->qh.ptr->td.token &= 495 mEp->qh.ptr->td.token &=
467 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE)); 496 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
468 497
@@ -491,19 +520,25 @@ done:
491 */ 520 */
492static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) 521static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
493{ 522{
494 u32 tmptoken = le32_to_cpu(mReq->ptr->token); 523 u32 tmptoken;
524 struct td_node *node, *tmpnode, *firstnode;
495 525
496 if (mReq->req.status != -EALREADY) 526 if (mReq->req.status != -EALREADY)
497 return -EINVAL; 527 return -EINVAL;
498 528
499 if ((TD_STATUS_ACTIVE & tmptoken) != 0) 529 firstnode = list_first_entry(&mReq->tds,
500 return -EBUSY; 530 struct td_node, td);
501 531
502 if (mReq->zptr) { 532 list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) {
503 if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->zptr->token) != 0) 533 tmptoken = le32_to_cpu(node->ptr->token);
534 if ((TD_STATUS_ACTIVE & tmptoken) != 0)
504 return -EBUSY; 535 return -EBUSY;
505 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); 536 if (node != firstnode) {
506 mReq->zptr = NULL; 537 dma_pool_free(mEp->td_pool, node->ptr, node->dma);
538 list_del_init(&node->td);
539 node->ptr = NULL;
540 kfree(node);
541 }
507 } 542 }
508 543
509 mReq->req.status = 0; 544 mReq->req.status = 0;
@@ -537,6 +572,7 @@ static int _ep_nuke(struct ci13xxx_ep *mEp)
537__releases(mEp->lock) 572__releases(mEp->lock)
538__acquires(mEp->lock) 573__acquires(mEp->lock)
539{ 574{
575 struct td_node *node, *tmpnode, *firstnode;
540 if (mEp == NULL) 576 if (mEp == NULL)
541 return -EINVAL; 577 return -EINVAL;
542 578
@@ -549,9 +585,17 @@ __acquires(mEp->lock)
549 list_entry(mEp->qh.queue.next, 585 list_entry(mEp->qh.queue.next,
550 struct ci13xxx_req, queue); 586 struct ci13xxx_req, queue);
551 587
552 if (mReq->zptr) { 588 firstnode = list_first_entry(&mReq->tds,
553 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); 589 struct td_node, td);
554 mReq->zptr = NULL; 590
591 list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) {
592 if (node != firstnode) {
593 dma_pool_free(mEp->td_pool, node->ptr,
594 node->dma);
595 list_del_init(&node->td);
596 node->ptr = NULL;
597 kfree(node);
598 }
555 } 599 }
556 600
557 list_del_init(&mReq->queue); 601 list_del_init(&mReq->queue);
@@ -838,9 +882,13 @@ __acquires(mEp->lock)
838 struct ci13xxx_req *mReq, *mReqTemp; 882 struct ci13xxx_req *mReq, *mReqTemp;
839 struct ci13xxx_ep *mEpTemp = mEp; 883 struct ci13xxx_ep *mEpTemp = mEp;
840 int retval = 0; 884 int retval = 0;
885 struct td_node *firstnode;
841 886
842 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue, 887 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
843 queue) { 888 queue) {
889 firstnode = list_first_entry(&mReq->tds,
890 struct td_node, td);
891
844 retval = _hardware_dequeue(mEp, mReq); 892 retval = _hardware_dequeue(mEp, mReq);
845 if (retval < 0) 893 if (retval < 0)
846 break; 894 break;
@@ -1143,19 +1191,26 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1143{ 1191{
1144 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1192 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1145 struct ci13xxx_req *mReq = NULL; 1193 struct ci13xxx_req *mReq = NULL;
1194 struct td_node *node;
1146 1195
1147 if (ep == NULL) 1196 if (ep == NULL)
1148 return NULL; 1197 return NULL;
1149 1198
1150 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); 1199 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
1151 if (mReq != NULL) { 1200 node = kzalloc(sizeof(struct td_node), gfp_flags);
1201 if (mReq != NULL && node != NULL) {
1152 INIT_LIST_HEAD(&mReq->queue); 1202 INIT_LIST_HEAD(&mReq->queue);
1203 INIT_LIST_HEAD(&mReq->tds);
1204 INIT_LIST_HEAD(&node->td);
1153 1205
1154 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, 1206 node->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
1155 &mReq->dma); 1207 &node->dma);
1156 if (mReq->ptr == NULL) { 1208 if (node->ptr == NULL) {
1209 kfree(node);
1157 kfree(mReq); 1210 kfree(mReq);
1158 mReq = NULL; 1211 mReq = NULL;
1212 } else {
1213 list_add_tail(&node->td, &mReq->tds);
1159 } 1214 }
1160 } 1215 }
1161 1216
@@ -1171,6 +1226,7 @@ static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1171{ 1226{
1172 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1227 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1173 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); 1228 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1229 struct td_node *firstnode;
1174 unsigned long flags; 1230 unsigned long flags;
1175 1231
1176 if (ep == NULL || req == NULL) { 1232 if (ep == NULL || req == NULL) {
@@ -1182,8 +1238,11 @@ static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1182 1238
1183 spin_lock_irqsave(mEp->lock, flags); 1239 spin_lock_irqsave(mEp->lock, flags);
1184 1240
1185 if (mReq->ptr) 1241 firstnode = list_first_entry(&mReq->tds,
1186 dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma); 1242 struct td_node, td);
1243
1244 if (firstnode->ptr)
1245 dma_pool_free(mEp->td_pool, firstnode->ptr, firstnode->dma);
1187 kfree(mReq); 1246 kfree(mReq);
1188 1247
1189 spin_unlock_irqrestore(mEp->lock, flags); 1248 spin_unlock_irqrestore(mEp->lock, flags);
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index a75724a19e1a..0ecc0ad4f513 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -60,6 +60,12 @@ struct ci13xxx_qh {
60 struct usb_ctrlrequest setup; 60 struct usb_ctrlrequest setup;
61} __attribute__ ((packed, aligned(4))); 61} __attribute__ ((packed, aligned(4)));
62 62
63struct td_node {
64 struct list_head td;
65 dma_addr_t dma;
66 struct ci13xxx_td *ptr;
67};
68
63/** 69/**
64 * struct ci13xxx_req - usb request representation 70 * struct ci13xxx_req - usb request representation
65 * @req: request structure for gadget drivers 71 * @req: request structure for gadget drivers
@@ -72,10 +78,7 @@ struct ci13xxx_qh {
72struct ci13xxx_req { 78struct ci13xxx_req {
73 struct usb_request req; 79 struct usb_request req;
74 struct list_head queue; 80 struct list_head queue;
75 struct ci13xxx_td *ptr; 81 struct list_head tds;
76 dma_addr_t dma;
77 struct ci13xxx_td *zptr;
78 dma_addr_t zdma;
79}; 82};
80 83
81#ifdef CONFIG_USB_CHIPIDEA_UDC 84#ifdef CONFIG_USB_CHIPIDEA_UDC