aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/chipidea
diff options
context:
space:
mode:
authorAlexander Shishkin <alexander.shishkin@linux.intel.com>2013-06-13 11:00:03 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-06-17 16:47:25 -0400
commit2dbc5c4c831418eb88eb1770c567ff21c9957aca (patch)
tree7990e62c08281a2ca34210c232d8a65925a208b2 /drivers/usb/chipidea
parent8b2379b49a9b7476b8421a6a404de30cd304cca1 (diff)
usb: chipidea: get rid of camelcase names
Since someone has added camelcase detection to checkpatch.pl, chipidea udc patches have been very noisy. To make everybody's life easier, this patch changes camelcase names into something more appropriate to the coding style. No functional changes. Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/chipidea')
-rw-r--r--drivers/usb/chipidea/debug.c10
-rw-r--r--drivers/usb/chipidea/udc.c503
2 files changed, 257 insertions, 256 deletions
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 64b8c32d4f33..33566219f3bd 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -126,15 +126,15 @@ static int ci_qheads_show(struct seq_file *s, void *data)
126 126
127 spin_lock_irqsave(&ci->lock, flags); 127 spin_lock_irqsave(&ci->lock, flags);
128 for (i = 0; i < ci->hw_ep_max/2; i++) { 128 for (i = 0; i < ci->hw_ep_max/2; i++) {
129 struct ci13xxx_ep *mEpRx = &ci->ci13xxx_ep[i]; 129 struct ci13xxx_ep *hweprx = &ci->ci13xxx_ep[i];
130 struct ci13xxx_ep *mEpTx = 130 struct ci13xxx_ep *hweptx =
131 &ci->ci13xxx_ep[i + ci->hw_ep_max/2]; 131 &ci->ci13xxx_ep[i + ci->hw_ep_max/2];
132 seq_printf(s, "EP=%02i: RX=%08X TX=%08X\n", 132 seq_printf(s, "EP=%02i: RX=%08X TX=%08X\n",
133 i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma); 133 i, (u32)hweprx->qh.dma, (u32)hweptx->qh.dma);
134 for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) 134 for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++)
135 seq_printf(s, " %04X: %08X %08X\n", j, 135 seq_printf(s, " %04X: %08X %08X\n", j,
136 *((u32 *)mEpRx->qh.ptr + j), 136 *((u32 *)hweprx->qh.ptr + j),
137 *((u32 *)mEpTx->qh.ptr + j)); 137 *((u32 *)hweptx->qh.ptr + j));
138 } 138 }
139 spin_unlock_irqrestore(&ci->lock, flags); 139 spin_unlock_irqrestore(&ci->lock, flags);
140 140
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9ace071a188d..0a9dcc9a82aa 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -369,7 +369,7 @@ static int hw_usb_reset(struct ci13xxx *ci)
369 * UTIL block 369 * UTIL block
370 *****************************************************************************/ 370 *****************************************************************************/
371 371
372static int add_td_to_list(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq, 372static int add_td_to_list(struct ci13xxx_ep *hwep, struct ci13xxx_req *hwreq,
373 unsigned length) 373 unsigned length)
374{ 374{
375 int i; 375 int i;
@@ -380,7 +380,7 @@ static int add_td_to_list(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq,
380 if (node == NULL) 380 if (node == NULL)
381 return -ENOMEM; 381 return -ENOMEM;
382 382
383 node->ptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC, 383 node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC,
384 &node->dma); 384 &node->dma);
385 if (node->ptr == NULL) { 385 if (node->ptr == NULL) {
386 kfree(node); 386 kfree(node);
@@ -392,7 +392,7 @@ static int add_td_to_list(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq,
392 node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES); 392 node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
393 node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE); 393 node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
394 394
395 temp = (u32) (mReq->req.dma + mReq->req.actual); 395 temp = (u32) (hwreq->req.dma + hwreq->req.actual);
396 if (length) { 396 if (length) {
397 node->ptr->page[0] = cpu_to_le32(temp); 397 node->ptr->page[0] = cpu_to_le32(temp);
398 for (i = 1; i < TD_PAGE_COUNT; i++) { 398 for (i = 1; i < TD_PAGE_COUNT; i++) {
@@ -402,17 +402,17 @@ static int add_td_to_list(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq,
402 } 402 }
403 } 403 }
404 404
405 mReq->req.actual += length; 405 hwreq->req.actual += length;
406 406
407 if (!list_empty(&mReq->tds)) { 407 if (!list_empty(&hwreq->tds)) {
408 /* get the last entry */ 408 /* get the last entry */
409 lastnode = list_entry(mReq->tds.prev, 409 lastnode = list_entry(hwreq->tds.prev,
410 struct td_node, td); 410 struct td_node, td);
411 lastnode->ptr->next = cpu_to_le32(node->dma); 411 lastnode->ptr->next = cpu_to_le32(node->dma);
412 } 412 }
413 413
414 INIT_LIST_HEAD(&node->td); 414 INIT_LIST_HEAD(&node->td);
415 list_add_tail(&node->td, &mReq->tds); 415 list_add_tail(&node->td, &hwreq->tds);
416 416
417 return 0; 417 return 0;
418} 418}
@@ -429,25 +429,25 @@ static inline u8 _usb_addr(struct ci13xxx_ep *ep)
429/** 429/**
430 * _hardware_queue: configures a request at hardware level 430 * _hardware_queue: configures a request at hardware level
431 * @gadget: gadget 431 * @gadget: gadget
432 * @mEp: endpoint 432 * @hwep: endpoint
433 * 433 *
434 * This function returns an error code 434 * This function returns an error code
435 */ 435 */
436static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) 436static int _hardware_enqueue(struct ci13xxx_ep *hwep, struct ci13xxx_req *hwreq)
437{ 437{
438 struct ci13xxx *ci = mEp->ci; 438 struct ci13xxx *ci = hwep->ci;
439 int ret = 0; 439 int ret = 0;
440 unsigned rest = mReq->req.length; 440 unsigned rest = hwreq->req.length;
441 int pages = TD_PAGE_COUNT; 441 int pages = TD_PAGE_COUNT;
442 struct td_node *firstnode, *lastnode; 442 struct td_node *firstnode, *lastnode;
443 443
444 /* don't queue twice */ 444 /* don't queue twice */
445 if (mReq->req.status == -EALREADY) 445 if (hwreq->req.status == -EALREADY)
446 return -EALREADY; 446 return -EALREADY;
447 447
448 mReq->req.status = -EALREADY; 448 hwreq->req.status = -EALREADY;
449 449
450 ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir); 450 ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir);
451 if (ret) 451 if (ret)
452 return ret; 452 return ret;
453 453
@@ -455,44 +455,44 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
455 * The first buffer could be not page aligned. 455 * The first buffer could be not page aligned.
456 * In that case we have to span into one extra td. 456 * In that case we have to span into one extra td.
457 */ 457 */
458 if (mReq->req.dma % PAGE_SIZE) 458 if (hwreq->req.dma % PAGE_SIZE)
459 pages--; 459 pages--;
460 460
461 if (rest == 0) 461 if (rest == 0)
462 add_td_to_list(mEp, mReq, 0); 462 add_td_to_list(hwep, hwreq, 0);
463 463
464 while (rest > 0) { 464 while (rest > 0) {
465 unsigned count = min(mReq->req.length - mReq->req.actual, 465 unsigned count = min(hwreq->req.length - hwreq->req.actual,
466 (unsigned)(pages * CI13XXX_PAGE_SIZE)); 466 (unsigned)(pages * CI13XXX_PAGE_SIZE));
467 add_td_to_list(mEp, mReq, count); 467 add_td_to_list(hwep, hwreq, count);
468 rest -= count; 468 rest -= count;
469 } 469 }
470 470
471 if (mReq->req.zero && mReq->req.length 471 if (hwreq->req.zero && hwreq->req.length
472 && (mReq->req.length % mEp->ep.maxpacket == 0)) 472 && (hwreq->req.length % hwep->ep.maxpacket == 0))
473 add_td_to_list(mEp, mReq, 0); 473 add_td_to_list(hwep, hwreq, 0);
474 474
475 firstnode = list_first_entry(&mReq->tds, struct td_node, td); 475 firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
476 476
477 lastnode = list_entry(mReq->tds.prev, 477 lastnode = list_entry(hwreq->tds.prev,
478 struct td_node, td); 478 struct td_node, td);
479 479
480 lastnode->ptr->next = cpu_to_le32(TD_TERMINATE); 480 lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
481 if (!mReq->req.no_interrupt) 481 if (!hwreq->req.no_interrupt)
482 lastnode->ptr->token |= cpu_to_le32(TD_IOC); 482 lastnode->ptr->token |= cpu_to_le32(TD_IOC);
483 wmb(); 483 wmb();
484 484
485 mReq->req.actual = 0; 485 hwreq->req.actual = 0;
486 if (!list_empty(&mEp->qh.queue)) { 486 if (!list_empty(&hwep->qh.queue)) {
487 struct ci13xxx_req *mReqPrev; 487 struct ci13xxx_req *hwreqprev;
488 int n = hw_ep_bit(mEp->num, mEp->dir); 488 int n = hw_ep_bit(hwep->num, hwep->dir);
489 int tmp_stat; 489 int tmp_stat;
490 struct td_node *prevlastnode; 490 struct td_node *prevlastnode;
491 u32 next = firstnode->dma & TD_ADDR_MASK; 491 u32 next = firstnode->dma & TD_ADDR_MASK;
492 492
493 mReqPrev = list_entry(mEp->qh.queue.prev, 493 hwreqprev = list_entry(hwep->qh.queue.prev,
494 struct ci13xxx_req, queue); 494 struct ci13xxx_req, queue);
495 prevlastnode = list_entry(mReqPrev->tds.prev, 495 prevlastnode = list_entry(hwreqprev->tds.prev,
496 struct td_node, td); 496 struct td_node, td);
497 497
498 prevlastnode->ptr->next = cpu_to_le32(next); 498 prevlastnode->ptr->next = cpu_to_le32(next);
@@ -509,62 +509,62 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
509 } 509 }
510 510
511 /* QH configuration */ 511 /* QH configuration */
512 mEp->qh.ptr->td.next = cpu_to_le32(firstnode->dma); 512 hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
513 mEp->qh.ptr->td.token &= 513 hwep->qh.ptr->td.token &=
514 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE)); 514 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
515 515
516 if (mEp->type == USB_ENDPOINT_XFER_ISOC) { 516 if (hwep->type == USB_ENDPOINT_XFER_ISOC) {
517 u32 mul = mReq->req.length / mEp->ep.maxpacket; 517 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
518 518
519 if (mReq->req.length % mEp->ep.maxpacket) 519 if (hwreq->req.length % hwep->ep.maxpacket)
520 mul++; 520 mul++;
521 mEp->qh.ptr->cap |= mul << __ffs(QH_MULT); 521 hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
522 } 522 }
523 523
524 wmb(); /* synchronize before ep prime */ 524 wmb(); /* synchronize before ep prime */
525 525
526 ret = hw_ep_prime(ci, mEp->num, mEp->dir, 526 ret = hw_ep_prime(ci, hwep->num, hwep->dir,
527 mEp->type == USB_ENDPOINT_XFER_CONTROL); 527 hwep->type == USB_ENDPOINT_XFER_CONTROL);
528done: 528done:
529 return ret; 529 return ret;
530} 530}
531 531
532/* 532/*
533 * free_pending_td: remove a pending request for the endpoint 533 * free_pending_td: remove a pending request for the endpoint
534 * @mEp: endpoint 534 * @hwep: endpoint
535 */ 535 */
536static void free_pending_td(struct ci13xxx_ep *mEp) 536static void free_pending_td(struct ci13xxx_ep *hwep)
537{ 537{
538 struct td_node *pending = mEp->pending_td; 538 struct td_node *pending = hwep->pending_td;
539 539
540 dma_pool_free(mEp->td_pool, pending->ptr, pending->dma); 540 dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
541 mEp->pending_td = NULL; 541 hwep->pending_td = NULL;
542 kfree(pending); 542 kfree(pending);
543} 543}
544 544
545/** 545/**
546 * _hardware_dequeue: handles a request at hardware level 546 * _hardware_dequeue: handles a request at hardware level
547 * @gadget: gadget 547 * @gadget: gadget
548 * @mEp: endpoint 548 * @hwep: endpoint
549 * 549 *
550 * This function returns an error code 550 * This function returns an error code
551 */ 551 */
552static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) 552static int _hardware_dequeue(struct ci13xxx_ep *hwep, struct ci13xxx_req *hwreq)
553{ 553{
554 u32 tmptoken; 554 u32 tmptoken;
555 struct td_node *node, *tmpnode; 555 struct td_node *node, *tmpnode;
556 unsigned remaining_length; 556 unsigned remaining_length;
557 unsigned actual = mReq->req.length; 557 unsigned actual = hwreq->req.length;
558 558
559 if (mReq->req.status != -EALREADY) 559 if (hwreq->req.status != -EALREADY)
560 return -EINVAL; 560 return -EINVAL;
561 561
562 mReq->req.status = 0; 562 hwreq->req.status = 0;
563 563
564 list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) { 564 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
565 tmptoken = le32_to_cpu(node->ptr->token); 565 tmptoken = le32_to_cpu(node->ptr->token);
566 if ((TD_STATUS_ACTIVE & tmptoken) != 0) { 566 if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
567 mReq->req.status = -EALREADY; 567 hwreq->req.status = -EALREADY;
568 return -EBUSY; 568 return -EBUSY;
569 } 569 }
570 570
@@ -572,21 +572,21 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
572 remaining_length >>= __ffs(TD_TOTAL_BYTES); 572 remaining_length >>= __ffs(TD_TOTAL_BYTES);
573 actual -= remaining_length; 573 actual -= remaining_length;
574 574
575 mReq->req.status = tmptoken & TD_STATUS; 575 hwreq->req.status = tmptoken & TD_STATUS;
576 if ((TD_STATUS_HALTED & mReq->req.status)) { 576 if ((TD_STATUS_HALTED & hwreq->req.status)) {
577 mReq->req.status = -EPIPE; 577 hwreq->req.status = -EPIPE;
578 break; 578 break;
579 } else if ((TD_STATUS_DT_ERR & mReq->req.status)) { 579 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
580 mReq->req.status = -EPROTO; 580 hwreq->req.status = -EPROTO;
581 break; 581 break;
582 } else if ((TD_STATUS_TR_ERR & mReq->req.status)) { 582 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
583 mReq->req.status = -EILSEQ; 583 hwreq->req.status = -EILSEQ;
584 break; 584 break;
585 } 585 }
586 586
587 if (remaining_length) { 587 if (remaining_length) {
588 if (mEp->dir) { 588 if (hwep->dir) {
589 mReq->req.status = -EPROTO; 589 hwreq->req.status = -EPROTO;
590 break; 590 break;
591 } 591 }
592 } 592 }
@@ -595,66 +595,66 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
595 * which will run the udc unusable, the cleanup of the 595 * which will run the udc unusable, the cleanup of the
596 * td has to be delayed by one. 596 * td has to be delayed by one.
597 */ 597 */
598 if (mEp->pending_td) 598 if (hwep->pending_td)
599 free_pending_td(mEp); 599 free_pending_td(hwep);
600 600
601 mEp->pending_td = node; 601 hwep->pending_td = node;
602 list_del_init(&node->td); 602 list_del_init(&node->td);
603 } 603 }
604 604
605 usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir); 605 usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir);
606 606
607 mReq->req.actual += actual; 607 hwreq->req.actual += actual;
608 608
609 if (mReq->req.status) 609 if (hwreq->req.status)
610 return mReq->req.status; 610 return hwreq->req.status;
611 611
612 return mReq->req.actual; 612 return hwreq->req.actual;
613} 613}
614 614
615/** 615/**
616 * _ep_nuke: dequeues all endpoint requests 616 * _ep_nuke: dequeues all endpoint requests
617 * @mEp: endpoint 617 * @hwep: endpoint
618 * 618 *
619 * This function returns an error code 619 * This function returns an error code
620 * Caller must hold lock 620 * Caller must hold lock
621 */ 621 */
622static int _ep_nuke(struct ci13xxx_ep *mEp) 622static int _ep_nuke(struct ci13xxx_ep *hwep)
623__releases(mEp->lock) 623__releases(hwep->lock)
624__acquires(mEp->lock) 624__acquires(hwep->lock)
625{ 625{
626 struct td_node *node, *tmpnode; 626 struct td_node *node, *tmpnode;
627 if (mEp == NULL) 627 if (hwep == NULL)
628 return -EINVAL; 628 return -EINVAL;
629 629
630 hw_ep_flush(mEp->ci, mEp->num, mEp->dir); 630 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
631 631
632 while (!list_empty(&mEp->qh.queue)) { 632 while (!list_empty(&hwep->qh.queue)) {
633 633
634 /* pop oldest request */ 634 /* pop oldest request */
635 struct ci13xxx_req *mReq = \ 635 struct ci13xxx_req *hwreq = list_entry(hwep->qh.queue.next,
636 list_entry(mEp->qh.queue.next, 636 struct ci13xxx_req,
637 struct ci13xxx_req, queue); 637 queue);
638 638
639 list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) { 639 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
640 dma_pool_free(mEp->td_pool, node->ptr, node->dma); 640 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
641 list_del_init(&node->td); 641 list_del_init(&node->td);
642 node->ptr = NULL; 642 node->ptr = NULL;
643 kfree(node); 643 kfree(node);
644 } 644 }
645 645
646 list_del_init(&mReq->queue); 646 list_del_init(&hwreq->queue);
647 mReq->req.status = -ESHUTDOWN; 647 hwreq->req.status = -ESHUTDOWN;
648 648
649 if (mReq->req.complete != NULL) { 649 if (hwreq->req.complete != NULL) {
650 spin_unlock(mEp->lock); 650 spin_unlock(hwep->lock);
651 mReq->req.complete(&mEp->ep, &mReq->req); 651 hwreq->req.complete(&hwep->ep, &hwreq->req);
652 spin_lock(mEp->lock); 652 spin_lock(hwep->lock);
653 } 653 }
654 } 654 }
655 655
656 if (mEp->pending_td) 656 if (hwep->pending_td)
657 free_pending_td(mEp); 657 free_pending_td(hwep);
658 658
659 return 0; 659 return 0;
660} 660}
@@ -759,48 +759,48 @@ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
759static int _ep_queue(struct usb_ep *ep, struct usb_request *req, 759static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
760 gfp_t __maybe_unused gfp_flags) 760 gfp_t __maybe_unused gfp_flags)
761{ 761{
762 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 762 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
763 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); 763 struct ci13xxx_req *hwreq = container_of(req, struct ci13xxx_req, req);
764 struct ci13xxx *ci = mEp->ci; 764 struct ci13xxx *ci = hwep->ci;
765 int retval = 0; 765 int retval = 0;
766 766
767 if (ep == NULL || req == NULL || mEp->ep.desc == NULL) 767 if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
768 return -EINVAL; 768 return -EINVAL;
769 769
770 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { 770 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
771 if (req->length) 771 if (req->length)
772 mEp = (ci->ep0_dir == RX) ? 772 hwep = (ci->ep0_dir == RX) ?
773 ci->ep0out : ci->ep0in; 773 ci->ep0out : ci->ep0in;
774 if (!list_empty(&mEp->qh.queue)) { 774 if (!list_empty(&hwep->qh.queue)) {
775 _ep_nuke(mEp); 775 _ep_nuke(hwep);
776 retval = -EOVERFLOW; 776 retval = -EOVERFLOW;
777 dev_warn(mEp->ci->dev, "endpoint ctrl %X nuked\n", 777 dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
778 _usb_addr(mEp)); 778 _usb_addr(hwep));
779 } 779 }
780 } 780 }
781 781
782 if (usb_endpoint_xfer_isoc(mEp->ep.desc) && 782 if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
783 mReq->req.length > (1 + mEp->ep.mult) * mEp->ep.maxpacket) { 783 hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
784 dev_err(mEp->ci->dev, "request length too big for isochronous\n"); 784 dev_err(hwep->ci->dev, "request length too big for isochronous\n");
785 return -EMSGSIZE; 785 return -EMSGSIZE;
786 } 786 }
787 787
788 /* first nuke then test link, e.g. previous status has not sent */ 788 /* first nuke then test link, e.g. previous status has not sent */
789 if (!list_empty(&mReq->queue)) { 789 if (!list_empty(&hwreq->queue)) {
790 dev_err(mEp->ci->dev, "request already in queue\n"); 790 dev_err(hwep->ci->dev, "request already in queue\n");
791 return -EBUSY; 791 return -EBUSY;
792 } 792 }
793 793
794 /* push request */ 794 /* push request */
795 mReq->req.status = -EINPROGRESS; 795 hwreq->req.status = -EINPROGRESS;
796 mReq->req.actual = 0; 796 hwreq->req.actual = 0;
797 797
798 retval = _hardware_enqueue(mEp, mReq); 798 retval = _hardware_enqueue(hwep, hwreq);
799 799
800 if (retval == -EALREADY) 800 if (retval == -EALREADY)
801 retval = 0; 801 retval = 0;
802 if (!retval) 802 if (!retval)
803 list_add_tail(&mReq->queue, &mEp->qh.queue); 803 list_add_tail(&hwreq->queue, &hwep->qh.queue);
804 804
805 return retval; 805 return retval;
806} 806}
@@ -814,20 +814,20 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
814 */ 814 */
815static int isr_get_status_response(struct ci13xxx *ci, 815static int isr_get_status_response(struct ci13xxx *ci,
816 struct usb_ctrlrequest *setup) 816 struct usb_ctrlrequest *setup)
817__releases(mEp->lock) 817__releases(hwep->lock)
818__acquires(mEp->lock) 818__acquires(hwep->lock)
819{ 819{
820 struct ci13xxx_ep *mEp = ci->ep0in; 820 struct ci13xxx_ep *hwep = ci->ep0in;
821 struct usb_request *req = NULL; 821 struct usb_request *req = NULL;
822 gfp_t gfp_flags = GFP_ATOMIC; 822 gfp_t gfp_flags = GFP_ATOMIC;
823 int dir, num, retval; 823 int dir, num, retval;
824 824
825 if (mEp == NULL || setup == NULL) 825 if (hwep == NULL || setup == NULL)
826 return -EINVAL; 826 return -EINVAL;
827 827
828 spin_unlock(mEp->lock); 828 spin_unlock(hwep->lock);
829 req = usb_ep_alloc_request(&mEp->ep, gfp_flags); 829 req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
830 spin_lock(mEp->lock); 830 spin_lock(hwep->lock);
831 if (req == NULL) 831 if (req == NULL)
832 return -ENOMEM; 832 return -ENOMEM;
833 833
@@ -852,7 +852,7 @@ __acquires(mEp->lock)
852 } 852 }
853 /* else do nothing; reserved for future use */ 853 /* else do nothing; reserved for future use */
854 854
855 retval = _ep_queue(&mEp->ep, req, gfp_flags); 855 retval = _ep_queue(&hwep->ep, req, gfp_flags);
856 if (retval) 856 if (retval)
857 goto err_free_buf; 857 goto err_free_buf;
858 858
@@ -861,9 +861,9 @@ __acquires(mEp->lock)
861 err_free_buf: 861 err_free_buf:
862 kfree(req->buf); 862 kfree(req->buf);
863 err_free_req: 863 err_free_req:
864 spin_unlock(mEp->lock); 864 spin_unlock(hwep->lock);
865 usb_ep_free_request(&mEp->ep, req); 865 usb_ep_free_request(&hwep->ep, req);
866 spin_lock(mEp->lock); 866 spin_lock(hwep->lock);
867 return retval; 867 return retval;
868} 868}
869 869
@@ -901,45 +901,45 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
901static int isr_setup_status_phase(struct ci13xxx *ci) 901static int isr_setup_status_phase(struct ci13xxx *ci)
902{ 902{
903 int retval; 903 int retval;
904 struct ci13xxx_ep *mEp; 904 struct ci13xxx_ep *hwep;
905 905
906 mEp = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in; 906 hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
907 ci->status->context = ci; 907 ci->status->context = ci;
908 ci->status->complete = isr_setup_status_complete; 908 ci->status->complete = isr_setup_status_complete;
909 909
910 retval = _ep_queue(&mEp->ep, ci->status, GFP_ATOMIC); 910 retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
911 911
912 return retval; 912 return retval;
913} 913}
914 914
915/** 915/**
916 * isr_tr_complete_low: transaction complete low level handler 916 * isr_tr_complete_low: transaction complete low level handler
917 * @mEp: endpoint 917 * @hwep: endpoint
918 * 918 *
919 * This function returns an error code 919 * This function returns an error code
920 * Caller must hold lock 920 * Caller must hold lock
921 */ 921 */
922static int isr_tr_complete_low(struct ci13xxx_ep *mEp) 922static int isr_tr_complete_low(struct ci13xxx_ep *hwep)
923__releases(mEp->lock) 923__releases(hwep->lock)
924__acquires(mEp->lock) 924__acquires(hwep->lock)
925{ 925{
926 struct ci13xxx_req *mReq, *mReqTemp; 926 struct ci13xxx_req *hwreq, *hwreqtemp;
927 struct ci13xxx_ep *mEpTemp = mEp; 927 struct ci13xxx_ep *hweptemp = hwep;
928 int retval = 0; 928 int retval = 0;
929 929
930 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue, 930 list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
931 queue) { 931 queue) {
932 retval = _hardware_dequeue(mEp, mReq); 932 retval = _hardware_dequeue(hwep, hwreq);
933 if (retval < 0) 933 if (retval < 0)
934 break; 934 break;
935 list_del_init(&mReq->queue); 935 list_del_init(&hwreq->queue);
936 if (mReq->req.complete != NULL) { 936 if (hwreq->req.complete != NULL) {
937 spin_unlock(mEp->lock); 937 spin_unlock(hwep->lock);
938 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) && 938 if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
939 mReq->req.length) 939 hwreq->req.length)
940 mEpTemp = mEp->ci->ep0in; 940 hweptemp = hwep->ci->ep0in;
941 mReq->req.complete(&mEpTemp->ep, &mReq->req); 941 hwreq->req.complete(&hweptemp->ep, &hwreq->req);
942 spin_lock(mEp->lock); 942 spin_lock(hwep->lock);
943 } 943 }
944 } 944 }
945 945
@@ -963,21 +963,21 @@ __acquires(ci->lock)
963 u8 tmode = 0; 963 u8 tmode = 0;
964 964
965 for (i = 0; i < ci->hw_ep_max; i++) { 965 for (i = 0; i < ci->hw_ep_max; i++) {
966 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i]; 966 struct ci13xxx_ep *hwep = &ci->ci13xxx_ep[i];
967 int type, num, dir, err = -EINVAL; 967 int type, num, dir, err = -EINVAL;
968 struct usb_ctrlrequest req; 968 struct usb_ctrlrequest req;
969 969
970 if (mEp->ep.desc == NULL) 970 if (hwep->ep.desc == NULL)
971 continue; /* not configured */ 971 continue; /* not configured */
972 972
973 if (hw_test_and_clear_complete(ci, i)) { 973 if (hw_test_and_clear_complete(ci, i)) {
974 err = isr_tr_complete_low(mEp); 974 err = isr_tr_complete_low(hwep);
975 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { 975 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
976 if (err > 0) /* needs status phase */ 976 if (err > 0) /* needs status phase */
977 err = isr_setup_status_phase(ci); 977 err = isr_setup_status_phase(ci);
978 if (err < 0) { 978 if (err < 0) {
979 spin_unlock(&ci->lock); 979 spin_unlock(&ci->lock);
980 if (usb_ep_set_halt(&mEp->ep)) 980 if (usb_ep_set_halt(&hwep->ep))
981 dev_err(ci->dev, 981 dev_err(ci->dev,
982 "error: ep_set_halt\n"); 982 "error: ep_set_halt\n");
983 spin_lock(&ci->lock); 983 spin_lock(&ci->lock);
@@ -985,7 +985,7 @@ __acquires(ci->lock)
985 } 985 }
986 } 986 }
987 987
988 if (mEp->type != USB_ENDPOINT_XFER_CONTROL || 988 if (hwep->type != USB_ENDPOINT_XFER_CONTROL ||
989 !hw_test_and_clear_setup_status(ci, i)) 989 !hw_test_and_clear_setup_status(ci, i))
990 continue; 990 continue;
991 991
@@ -1004,7 +1004,7 @@ __acquires(ci->lock)
1004 /* read_setup_packet */ 1004 /* read_setup_packet */
1005 do { 1005 do {
1006 hw_test_and_set_setup_guard(ci); 1006 hw_test_and_set_setup_guard(ci);
1007 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req)); 1007 memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1008 } while (!hw_test_and_clear_setup_guard(ci)); 1008 } while (!hw_test_and_clear_setup_guard(ci));
1009 1009
1010 type = req.bRequestType; 1010 type = req.bRequestType;
@@ -1123,7 +1123,7 @@ delegate:
1123 1123
1124 if (err < 0) { 1124 if (err < 0) {
1125 spin_unlock(&ci->lock); 1125 spin_unlock(&ci->lock);
1126 if (usb_ep_set_halt(&mEp->ep)) 1126 if (usb_ep_set_halt(&hwep->ep))
1127 dev_err(ci->dev, "error: ep_set_halt\n"); 1127 dev_err(ci->dev, "error: ep_set_halt\n");
1128 spin_lock(&ci->lock); 1128 spin_lock(&ci->lock);
1129 } 1129 }
@@ -1141,7 +1141,7 @@ delegate:
1141static int ep_enable(struct usb_ep *ep, 1141static int ep_enable(struct usb_ep *ep,
1142 const struct usb_endpoint_descriptor *desc) 1142 const struct usb_endpoint_descriptor *desc)
1143{ 1143{
1144 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1144 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
1145 int retval = 0; 1145 int retval = 0;
1146 unsigned long flags; 1146 unsigned long flags;
1147 u32 cap = 0; 1147 u32 cap = 0;
@@ -1149,40 +1149,41 @@ static int ep_enable(struct usb_ep *ep,
1149 if (ep == NULL || desc == NULL) 1149 if (ep == NULL || desc == NULL)
1150 return -EINVAL; 1150 return -EINVAL;
1151 1151
1152 spin_lock_irqsave(mEp->lock, flags); 1152 spin_lock_irqsave(hwep->lock, flags);
1153 1153
1154 /* only internal SW should enable ctrl endpts */ 1154 /* only internal SW should enable ctrl endpts */
1155 1155
1156 mEp->ep.desc = desc; 1156 hwep->ep.desc = desc;
1157 1157
1158 if (!list_empty(&mEp->qh.queue)) 1158 if (!list_empty(&hwep->qh.queue))
1159 dev_warn(mEp->ci->dev, "enabling a non-empty endpoint!\n"); 1159 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1160 1160
1161 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX; 1161 hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1162 mEp->num = usb_endpoint_num(desc); 1162 hwep->num = usb_endpoint_num(desc);
1163 mEp->type = usb_endpoint_type(desc); 1163 hwep->type = usb_endpoint_type(desc);
1164 1164
1165 mEp->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff; 1165 hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
1166 mEp->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc)); 1166 hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
1167 1167
1168 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 1168 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1169 cap |= QH_IOS; 1169 cap |= QH_IOS;
1170 if (mEp->num) 1170 if (hwep->num)
1171 cap |= QH_ZLT; 1171 cap |= QH_ZLT;
1172 cap |= (mEp->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; 1172 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1173 1173
1174 mEp->qh.ptr->cap = cpu_to_le32(cap); 1174 hwep->qh.ptr->cap = cpu_to_le32(cap);
1175 1175
1176 mEp->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */ 1176 hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
1177 1177
1178 /* 1178 /*
1179 * Enable endpoints in the HW other than ep0 as ep0 1179 * Enable endpoints in the HW other than ep0 as ep0
1180 * is always enabled 1180 * is always enabled
1181 */ 1181 */
1182 if (mEp->num) 1182 if (hwep->num)
1183 retval |= hw_ep_enable(mEp->ci, mEp->num, mEp->dir, mEp->type); 1183 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1184 hwep->type);
1184 1185
1185 spin_unlock_irqrestore(mEp->lock, flags); 1186 spin_unlock_irqrestore(hwep->lock, flags);
1186 return retval; 1187 return retval;
1187} 1188}
1188 1189
@@ -1193,32 +1194,32 @@ static int ep_enable(struct usb_ep *ep,
1193 */ 1194 */
1194static int ep_disable(struct usb_ep *ep) 1195static int ep_disable(struct usb_ep *ep)
1195{ 1196{
1196 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1197 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
1197 int direction, retval = 0; 1198 int direction, retval = 0;
1198 unsigned long flags; 1199 unsigned long flags;
1199 1200
1200 if (ep == NULL) 1201 if (ep == NULL)
1201 return -EINVAL; 1202 return -EINVAL;
1202 else if (mEp->ep.desc == NULL) 1203 else if (hwep->ep.desc == NULL)
1203 return -EBUSY; 1204 return -EBUSY;
1204 1205
1205 spin_lock_irqsave(mEp->lock, flags); 1206 spin_lock_irqsave(hwep->lock, flags);
1206 1207
1207 /* only internal SW should disable ctrl endpts */ 1208 /* only internal SW should disable ctrl endpts */
1208 1209
1209 direction = mEp->dir; 1210 direction = hwep->dir;
1210 do { 1211 do {
1211 retval |= _ep_nuke(mEp); 1212 retval |= _ep_nuke(hwep);
1212 retval |= hw_ep_disable(mEp->ci, mEp->num, mEp->dir); 1213 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1213 1214
1214 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 1215 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1215 mEp->dir = (mEp->dir == TX) ? RX : TX; 1216 hwep->dir = (hwep->dir == TX) ? RX : TX;
1216 1217
1217 } while (mEp->dir != direction); 1218 } while (hwep->dir != direction);
1218 1219
1219 mEp->ep.desc = NULL; 1220 hwep->ep.desc = NULL;
1220 1221
1221 spin_unlock_irqrestore(mEp->lock, flags); 1222 spin_unlock_irqrestore(hwep->lock, flags);
1222 return retval; 1223 return retval;
1223} 1224}
1224 1225
@@ -1229,18 +1230,18 @@ static int ep_disable(struct usb_ep *ep)
1229 */ 1230 */
1230static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1231static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1231{ 1232{
1232 struct ci13xxx_req *mReq = NULL; 1233 struct ci13xxx_req *hwreq = NULL;
1233 1234
1234 if (ep == NULL) 1235 if (ep == NULL)
1235 return NULL; 1236 return NULL;
1236 1237
1237 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); 1238 hwreq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
1238 if (mReq != NULL) { 1239 if (hwreq != NULL) {
1239 INIT_LIST_HEAD(&mReq->queue); 1240 INIT_LIST_HEAD(&hwreq->queue);
1240 INIT_LIST_HEAD(&mReq->tds); 1241 INIT_LIST_HEAD(&hwreq->tds);
1241 } 1242 }
1242 1243
1243 return (mReq == NULL) ? NULL : &mReq->req; 1244 return (hwreq == NULL) ? NULL : &hwreq->req;
1244} 1245}
1245 1246
1246/** 1247/**
@@ -1250,30 +1251,30 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1250 */ 1251 */
1251static void ep_free_request(struct usb_ep *ep, struct usb_request *req) 1252static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1252{ 1253{
1253 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1254 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
1254 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); 1255 struct ci13xxx_req *hwreq = container_of(req, struct ci13xxx_req, req);
1255 struct td_node *node, *tmpnode; 1256 struct td_node *node, *tmpnode;
1256 unsigned long flags; 1257 unsigned long flags;
1257 1258
1258 if (ep == NULL || req == NULL) { 1259 if (ep == NULL || req == NULL) {
1259 return; 1260 return;
1260 } else if (!list_empty(&mReq->queue)) { 1261 } else if (!list_empty(&hwreq->queue)) {
1261 dev_err(mEp->ci->dev, "freeing queued request\n"); 1262 dev_err(hwep->ci->dev, "freeing queued request\n");
1262 return; 1263 return;
1263 } 1264 }
1264 1265
1265 spin_lock_irqsave(mEp->lock, flags); 1266 spin_lock_irqsave(hwep->lock, flags);
1266 1267
1267 list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) { 1268 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1268 dma_pool_free(mEp->td_pool, node->ptr, node->dma); 1269 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1269 list_del_init(&node->td); 1270 list_del_init(&node->td);
1270 node->ptr = NULL; 1271 node->ptr = NULL;
1271 kfree(node); 1272 kfree(node);
1272 } 1273 }
1273 1274
1274 kfree(mReq); 1275 kfree(hwreq);
1275 1276
1276 spin_unlock_irqrestore(mEp->lock, flags); 1277 spin_unlock_irqrestore(hwep->lock, flags);
1277} 1278}
1278 1279
1279/** 1280/**
@@ -1284,16 +1285,16 @@ static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1284static int ep_queue(struct usb_ep *ep, struct usb_request *req, 1285static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1285 gfp_t __maybe_unused gfp_flags) 1286 gfp_t __maybe_unused gfp_flags)
1286{ 1287{
1287 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1288 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
1288 int retval = 0; 1289 int retval = 0;
1289 unsigned long flags; 1290 unsigned long flags;
1290 1291
1291 if (ep == NULL || req == NULL || mEp->ep.desc == NULL) 1292 if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1292 return -EINVAL; 1293 return -EINVAL;
1293 1294
1294 spin_lock_irqsave(mEp->lock, flags); 1295 spin_lock_irqsave(hwep->lock, flags);
1295 retval = _ep_queue(ep, req, gfp_flags); 1296 retval = _ep_queue(ep, req, gfp_flags);
1296 spin_unlock_irqrestore(mEp->lock, flags); 1297 spin_unlock_irqrestore(hwep->lock, flags);
1297 return retval; 1298 return retval;
1298} 1299}
1299 1300
@@ -1304,33 +1305,33 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1304 */ 1305 */
1305static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) 1306static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1306{ 1307{
1307 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1308 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
1308 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); 1309 struct ci13xxx_req *hwreq = container_of(req, struct ci13xxx_req, req);
1309 unsigned long flags; 1310 unsigned long flags;
1310 1311
1311 if (ep == NULL || req == NULL || mReq->req.status != -EALREADY || 1312 if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1312 mEp->ep.desc == NULL || list_empty(&mReq->queue) || 1313 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1313 list_empty(&mEp->qh.queue)) 1314 list_empty(&hwep->qh.queue))
1314 return -EINVAL; 1315 return -EINVAL;
1315 1316
1316 spin_lock_irqsave(mEp->lock, flags); 1317 spin_lock_irqsave(hwep->lock, flags);
1317 1318
1318 hw_ep_flush(mEp->ci, mEp->num, mEp->dir); 1319 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1319 1320
1320 /* pop request */ 1321 /* pop request */
1321 list_del_init(&mReq->queue); 1322 list_del_init(&hwreq->queue);
1322 1323
1323 usb_gadget_unmap_request(&mEp->ci->gadget, req, mEp->dir); 1324 usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1324 1325
1325 req->status = -ECONNRESET; 1326 req->status = -ECONNRESET;
1326 1327
1327 if (mReq->req.complete != NULL) { 1328 if (hwreq->req.complete != NULL) {
1328 spin_unlock(mEp->lock); 1329 spin_unlock(hwep->lock);
1329 mReq->req.complete(&mEp->ep, &mReq->req); 1330 hwreq->req.complete(&hwep->ep, &hwreq->req);
1330 spin_lock(mEp->lock); 1331 spin_lock(hwep->lock);
1331 } 1332 }
1332 1333
1333 spin_unlock_irqrestore(mEp->lock, flags); 1334 spin_unlock_irqrestore(hwep->lock, flags);
1334 return 0; 1335 return 0;
1335} 1336}
1336 1337
@@ -1341,40 +1342,40 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1341 */ 1342 */
1342static int ep_set_halt(struct usb_ep *ep, int value) 1343static int ep_set_halt(struct usb_ep *ep, int value)
1343{ 1344{
1344 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1345 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
1345 int direction, retval = 0; 1346 int direction, retval = 0;
1346 unsigned long flags; 1347 unsigned long flags;
1347 1348
1348 if (ep == NULL || mEp->ep.desc == NULL) 1349 if (ep == NULL || hwep->ep.desc == NULL)
1349 return -EINVAL; 1350 return -EINVAL;
1350 1351
1351 if (usb_endpoint_xfer_isoc(mEp->ep.desc)) 1352 if (usb_endpoint_xfer_isoc(hwep->ep.desc))
1352 return -EOPNOTSUPP; 1353 return -EOPNOTSUPP;
1353 1354
1354 spin_lock_irqsave(mEp->lock, flags); 1355 spin_lock_irqsave(hwep->lock, flags);
1355 1356
1356#ifndef STALL_IN 1357#ifndef STALL_IN
1357 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */ 1358 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1358 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX && 1359 if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
1359 !list_empty(&mEp->qh.queue)) { 1360 !list_empty(&hwep->qh.queue)) {
1360 spin_unlock_irqrestore(mEp->lock, flags); 1361 spin_unlock_irqrestore(hwep->lock, flags);
1361 return -EAGAIN; 1362 return -EAGAIN;
1362 } 1363 }
1363#endif 1364#endif
1364 1365
1365 direction = mEp->dir; 1366 direction = hwep->dir;
1366 do { 1367 do {
1367 retval |= hw_ep_set_halt(mEp->ci, mEp->num, mEp->dir, value); 1368 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
1368 1369
1369 if (!value) 1370 if (!value)
1370 mEp->wedge = 0; 1371 hwep->wedge = 0;
1371 1372
1372 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 1373 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1373 mEp->dir = (mEp->dir == TX) ? RX : TX; 1374 hwep->dir = (hwep->dir == TX) ? RX : TX;
1374 1375
1375 } while (mEp->dir != direction); 1376 } while (hwep->dir != direction);
1376 1377
1377 spin_unlock_irqrestore(mEp->lock, flags); 1378 spin_unlock_irqrestore(hwep->lock, flags);
1378 return retval; 1379 return retval;
1379} 1380}
1380 1381
@@ -1385,15 +1386,15 @@ static int ep_set_halt(struct usb_ep *ep, int value)
1385 */ 1386 */
1386static int ep_set_wedge(struct usb_ep *ep) 1387static int ep_set_wedge(struct usb_ep *ep)
1387{ 1388{
1388 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1389 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
1389 unsigned long flags; 1390 unsigned long flags;
1390 1391
1391 if (ep == NULL || mEp->ep.desc == NULL) 1392 if (ep == NULL || hwep->ep.desc == NULL)
1392 return -EINVAL; 1393 return -EINVAL;
1393 1394
1394 spin_lock_irqsave(mEp->lock, flags); 1395 spin_lock_irqsave(hwep->lock, flags);
1395 mEp->wedge = 1; 1396 hwep->wedge = 1;
1396 spin_unlock_irqrestore(mEp->lock, flags); 1397 spin_unlock_irqrestore(hwep->lock, flags);
1397 1398
1398 return usb_ep_set_halt(ep); 1399 return usb_ep_set_halt(ep);
1399} 1400}
@@ -1405,19 +1406,19 @@ static int ep_set_wedge(struct usb_ep *ep)
1405 */ 1406 */
1406static void ep_fifo_flush(struct usb_ep *ep) 1407static void ep_fifo_flush(struct usb_ep *ep)
1407{ 1408{
1408 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1409 struct ci13xxx_ep *hwep = container_of(ep, struct ci13xxx_ep, ep);
1409 unsigned long flags; 1410 unsigned long flags;
1410 1411
1411 if (ep == NULL) { 1412 if (ep == NULL) {
1412 dev_err(mEp->ci->dev, "%02X: -EINVAL\n", _usb_addr(mEp)); 1413 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1413 return; 1414 return;
1414 } 1415 }
1415 1416
1416 spin_lock_irqsave(mEp->lock, flags); 1417 spin_lock_irqsave(hwep->lock, flags);
1417 1418
1418 hw_ep_flush(mEp->ci, mEp->num, mEp->dir); 1419 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1419 1420
1420 spin_unlock_irqrestore(mEp->lock, flags); 1421 spin_unlock_irqrestore(hwep->lock, flags);
1421} 1422}
1422 1423
1423/** 1424/**
@@ -1493,12 +1494,12 @@ out:
1493 return ret; 1494 return ret;
1494} 1495}
1495 1496
1496static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA) 1497static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1497{ 1498{
1498 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget); 1499 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
1499 1500
1500 if (ci->transceiver) 1501 if (ci->transceiver)
1501 return usb_phy_set_power(ci->transceiver, mA); 1502 return usb_phy_set_power(ci->transceiver, ma);
1502 return -ENOTSUPP; 1503 return -ENOTSUPP;
1503} 1504}
1504 1505
@@ -1542,31 +1543,31 @@ static int init_eps(struct ci13xxx *ci)
1542 for (i = 0; i < ci->hw_ep_max/2; i++) 1543 for (i = 0; i < ci->hw_ep_max/2; i++)
1543 for (j = RX; j <= TX; j++) { 1544 for (j = RX; j <= TX; j++) {
1544 int k = i + j * ci->hw_ep_max/2; 1545 int k = i + j * ci->hw_ep_max/2;
1545 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[k]; 1546 struct ci13xxx_ep *hwep = &ci->ci13xxx_ep[k];
1546 1547
1547 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i, 1548 scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1548 (j == TX) ? "in" : "out"); 1549 (j == TX) ? "in" : "out");
1549 1550
1550 mEp->ci = ci; 1551 hwep->ci = ci;
1551 mEp->lock = &ci->lock; 1552 hwep->lock = &ci->lock;
1552 mEp->td_pool = ci->td_pool; 1553 hwep->td_pool = ci->td_pool;
1553 1554
1554 mEp->ep.name = mEp->name; 1555 hwep->ep.name = hwep->name;
1555 mEp->ep.ops = &usb_ep_ops; 1556 hwep->ep.ops = &usb_ep_ops;
1556 /* 1557 /*
1557 * for ep0: maxP defined in desc, for other 1558 * for ep0: maxP defined in desc, for other
1558 * eps, maxP is set by epautoconfig() called 1559 * eps, maxP is set by epautoconfig() called
1559 * by gadget layer 1560 * by gadget layer
1560 */ 1561 */
1561 mEp->ep.maxpacket = (unsigned short)~0; 1562 hwep->ep.maxpacket = (unsigned short)~0;
1562 1563
1563 INIT_LIST_HEAD(&mEp->qh.queue); 1564 INIT_LIST_HEAD(&hwep->qh.queue);
1564 mEp->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL, 1565 hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
1565 &mEp->qh.dma); 1566 &hwep->qh.dma);
1566 if (mEp->qh.ptr == NULL) 1567 if (hwep->qh.ptr == NULL)
1567 retval = -ENOMEM; 1568 retval = -ENOMEM;
1568 else 1569 else
1569 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr)); 1570 memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr));
1570 1571
1571 /* 1572 /*
1572 * set up shorthands for ep0 out and in endpoints, 1573 * set up shorthands for ep0 out and in endpoints,
@@ -1574,15 +1575,15 @@ static int init_eps(struct ci13xxx *ci)
1574 */ 1575 */
1575 if (i == 0) { 1576 if (i == 0) {
1576 if (j == RX) 1577 if (j == RX)
1577 ci->ep0out = mEp; 1578 ci->ep0out = hwep;
1578 else 1579 else
1579 ci->ep0in = mEp; 1580 ci->ep0in = hwep;
1580 1581
1581 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX; 1582 hwep->ep.maxpacket = CTRL_PAYLOAD_MAX;
1582 continue; 1583 continue;
1583 } 1584 }
1584 1585
1585 list_add_tail(&mEp->ep.ep_list, &ci->gadget.ep_list); 1586 list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1586 } 1587 }
1587 1588
1588 return retval; 1589 return retval;
@@ -1593,9 +1594,9 @@ static void destroy_eps(struct ci13xxx *ci)
1593 int i; 1594 int i;
1594 1595
1595 for (i = 0; i < ci->hw_ep_max; i++) { 1596 for (i = 0; i < ci->hw_ep_max; i++) {
1596 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i]; 1597 struct ci13xxx_ep *hwep = &ci->ci13xxx_ep[i];
1597 1598
1598 dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma); 1599 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1599 } 1600 }
1600} 1601}
1601 1602