aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/isp1760-hcd.c
diff options
context:
space:
mode:
authorArvid Brodin <arvid.brodin@enea.com>2011-04-26 15:48:30 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-05-02 20:03:50 -0400
commit71a9f9d268a5c2b0a80ae606cf8e502f3410a5df (patch)
treefd8c3512ff67b64e8a487eecabdfa7ff5131bcdb /drivers/usb/host/isp1760-hcd.c
parent22bea9cef810ec54abdb057de46cea04c972dc64 (diff)
usb/isp1760: Improve urb queueing, get rid of BUG():s in normal code paths
This patch replaces the code that handles qtds. Intead of directly allocating chip mem and chip slot, enqueue the transfer in a list of queue heads. Use a centralized function enqueue_qtds() to prioritize and enqueue transfers. This removes all of the interrupt context BUG() calls when out of chip mem or transfer slots. It also makes it possible to efficiently use the dual-port mem on the chip for double-buffered transfers, which improve transfer times to/from/between usb sticks by about 40 % on my HW. With this patch it should also be possible to handle qtd scheduling outside of the interrupt handler, for significantly improved kernel latency. I have not implemented this since there are some locking issues which I haven't had time to look at. Signed-off-by: Arvid Brodin <arvid.brodin@enea.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/isp1760-hcd.c')
-rw-r--r--drivers/usb/host/isp1760-hcd.c1312
1 files changed, 647 insertions, 665 deletions
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index b38cfe98f226..dd98a966b58b 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -8,6 +8,8 @@
8 * 8 *
9 * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de> 9 * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
10 * 10 *
11 * (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
12 *
11 */ 13 */
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -26,14 +28,16 @@
26 28
27static struct kmem_cache *qtd_cachep; 29static struct kmem_cache *qtd_cachep;
28static struct kmem_cache *qh_cachep; 30static struct kmem_cache *qh_cachep;
31static struct kmem_cache *urb_listitem_cachep;
29 32
30struct isp1760_hcd { 33struct isp1760_hcd {
31 u32 hcs_params; 34 u32 hcs_params;
32 spinlock_t lock; 35 spinlock_t lock;
33 struct inter_packet_info atl_ints[32]; 36 struct slotinfo atl_slots[32];
34 struct inter_packet_info int_ints[32]; 37 struct slotinfo int_slots[32];
35 struct memory_chunk memory_pool[BLOCKS]; 38 struct memory_chunk memory_pool[BLOCKS];
36 u32 atl_queued; 39 struct list_head controlqhs, bulkqhs, interruptqhs;
40 int active_ptds;
37 41
38 /* periodic schedule support */ 42 /* periodic schedule support */
39#define DEFAULT_I_TDPS 1024 43#define DEFAULT_I_TDPS 1024
@@ -85,18 +89,34 @@ struct isp1760_qtd {
85 struct list_head qtd_list; 89 struct list_head qtd_list;
86 struct urb *urb; 90 struct urb *urb;
87 size_t length; 91 size_t length;
88 92 size_t actual_length;
89 /* isp special*/ 93
94 /* QTD_ENQUEUED: waiting for transfer (inactive) */
95 /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
96 /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
97 interrupt handler may touch this qtd! */
98 /* QTD_XFER_COMPLETE: payload has been transferred successfully */
99 /* QTD_RETIRE: transfer error/abort qtd */
100#define QTD_ENQUEUED 0
101#define QTD_PAYLOAD_ALLOC 1
102#define QTD_XFER_STARTED 2
103#define QTD_XFER_COMPLETE 3
104#define QTD_RETIRE 4
90 u32 status; 105 u32 status;
91#define URB_ENQUEUED (1 << 1)
92}; 106};
93 107
108/* Queue head, one for each active endpoint */
94struct isp1760_qh { 109struct isp1760_qh {
95 /* first part defined by EHCI spec */ 110 struct list_head qh_list;
96 struct list_head qtd_list; 111 struct list_head qtd_list;
97
98 u32 toggle; 112 u32 toggle;
99 u32 ping; 113 u32 ping;
114 int slot;
115};
116
117struct urb_listitem {
118 struct list_head urb_list;
119 struct urb *urb;
100}; 120};
101 121
102/* 122/*
@@ -293,19 +313,6 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
293 return; 313 return;
294 } 314 }
295 } 315 }
296
297 dev_err(hcd->self.controller,
298 "%s: Cannot allocate %zu bytes of memory\n"
299 "Current memory map:\n",
300 __func__, qtd->length);
301 for (i = 0; i < BLOCKS; i++) {
302 dev_err(hcd->self.controller, "Pool %2d size %4d status: %d\n",
303 i, priv->memory_pool[i].size,
304 priv->memory_pool[i].free);
305 }
306 /* XXX maybe -ENOMEM could be possible */
307 BUG();
308 return;
309} 316}
310 317
311static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) 318static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
@@ -327,15 +334,8 @@ static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
327 334
328 dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n", 335 dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
329 __func__, qtd->payload_addr); 336 __func__, qtd->payload_addr);
330 BUG(); 337 WARN_ON(1);
331} 338 qtd->payload_addr = 0;
332
333static void isp1760_init_regs(struct usb_hcd *hcd)
334{
335 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
336 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
337 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
338 reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
339} 339}
340 340
341static int handshake(struct usb_hcd *hcd, u32 reg, 341static int handshake(struct usb_hcd *hcd, u32 reg,
@@ -373,31 +373,27 @@ static int ehci_reset(struct usb_hcd *hcd)
373 return retval; 373 return retval;
374} 374}
375 375
376static void qh_destroy(struct isp1760_qh *qh) 376static struct isp1760_qh *qh_alloc(gfp_t flags)
377{
378 WARN_ON(!list_empty(&qh->qtd_list));
379 kmem_cache_free(qh_cachep, qh);
380}
381
382static struct isp1760_qh *isp1760_qh_alloc(gfp_t flags)
383{ 377{
384 struct isp1760_qh *qh; 378 struct isp1760_qh *qh;
385 379
386 qh = kmem_cache_zalloc(qh_cachep, flags); 380 qh = kmem_cache_zalloc(qh_cachep, flags);
387 if (!qh) 381 if (!qh)
388 return qh; 382 return NULL;
389 383
384 INIT_LIST_HEAD(&qh->qh_list);
390 INIT_LIST_HEAD(&qh->qtd_list); 385 INIT_LIST_HEAD(&qh->qtd_list);
386 qh->slot = -1;
387
391 return qh; 388 return qh;
392} 389}
393 390
394/* magic numbers that can affect system performance */ 391static void qh_free(struct isp1760_qh *qh)
395#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ 392{
396#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ 393 WARN_ON(!list_empty(&qh->qtd_list));
397#define EHCI_TUNE_RL_TT 0 394 WARN_ON(qh->slot > -1);
398#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ 395 kmem_cache_free(qh_cachep, qh);
399#define EHCI_TUNE_MULT_TT 1 396}
400#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
401 397
402/* one-time init, only for memory state */ 398/* one-time init, only for memory state */
403static int priv_init(struct usb_hcd *hcd) 399static int priv_init(struct usb_hcd *hcd)
@@ -407,6 +403,10 @@ static int priv_init(struct usb_hcd *hcd)
407 403
408 spin_lock_init(&priv->lock); 404 spin_lock_init(&priv->lock);
409 405
406 INIT_LIST_HEAD(&priv->interruptqhs);
407 INIT_LIST_HEAD(&priv->controlqhs);
408 INIT_LIST_HEAD(&priv->bulkqhs);
409
410 /* 410 /*
411 * hw default: 1K periodic list heads, one per frame. 411 * hw default: 1K periodic list heads, one per frame.
412 * periodic_size can shrink by USBCMD update if hcc_params allows. 412 * periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -464,7 +464,10 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
464 } 464 }
465 465
466 /* pre reset */ 466 /* pre reset */
467 isp1760_init_regs(hcd); 467 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
468 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
469 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
470 reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
468 471
469 /* reset */ 472 /* reset */
470 reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL); 473 reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
@@ -484,12 +487,15 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
484 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ? 487 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
485 "analog" : "digital"); 488 "analog" : "digital");
486 489
490 /* This is weird: at the first plug-in of a device there seems to be
491 one packet queued that never gets returned? */
492 priv->active_ptds = -1;
493
487 /* ATL reset */ 494 /* ATL reset */
488 reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET); 495 reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
489 mdelay(10); 496 mdelay(10);
490 reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode); 497 reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
491 498
492 reg_write32(hcd->regs, HC_INTERRUPT_REG, INTERRUPT_ENABLE_MASK);
493 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK); 499 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
494 500
495 /* 501 /*
@@ -513,6 +519,10 @@ static void isp1760_init_maps(struct usb_hcd *hcd)
513 reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000); 519 reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
514 reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001); 520 reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
515 521
522 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0);
523 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0);
524 reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0);
525
516 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 526 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
517 ATL_BUF_FILL | INT_BUF_FILL); 527 ATL_BUF_FILL | INT_BUF_FILL);
518} 528}
@@ -547,8 +557,7 @@ static int isp1760_run(struct usb_hcd *hcd)
547 command |= CMD_RUN; 557 command |= CMD_RUN;
548 reg_write32(hcd->regs, HC_USBCMD, command); 558 reg_write32(hcd->regs, HC_USBCMD, command);
549 559
550 retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 560 retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
551 250 * 1000);
552 if (retval) 561 if (retval)
553 return retval; 562 return retval;
554 563
@@ -597,12 +606,19 @@ static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
597 return (qtd->urb != urb); 606 return (qtd->urb != urb);
598} 607}
599 608
600static void transform_into_atl(struct isp1760_qh *qh, 609/* magic numbers that can affect system performance */
610#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
611#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
612#define EHCI_TUNE_RL_TT 0
613#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
614#define EHCI_TUNE_MULT_TT 1
615#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
616
617static void create_ptd_atl(struct isp1760_qh *qh,
601 struct isp1760_qtd *qtd, struct ptd *ptd) 618 struct isp1760_qtd *qtd, struct ptd *ptd)
602{ 619{
603 u32 maxpacket; 620 u32 maxpacket;
604 u32 multi; 621 u32 multi;
605 u32 pid_code;
606 u32 rl = RL_COUNTER; 622 u32 rl = RL_COUNTER;
607 u32 nak = NAK_COUNTER; 623 u32 nak = NAK_COUNTER;
608 624
@@ -615,67 +631,62 @@ static void transform_into_atl(struct isp1760_qh *qh,
615 maxpacket &= 0x7ff; 631 maxpacket &= 0x7ff;
616 632
617 /* DW0 */ 633 /* DW0 */
618 ptd->dw0 = PTD_VALID; 634 ptd->dw0 = DW0_VALID_BIT;
619 ptd->dw0 |= PTD_LENGTH(qtd->length); 635 ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
620 ptd->dw0 |= PTD_MAXPACKET(maxpacket); 636 ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
621 ptd->dw0 |= PTD_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe)); 637 ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
622 638
623 /* DW1 */ 639 /* DW1 */
624 ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1; 640 ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
625 ptd->dw1 |= PTD_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe)); 641 ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
626 642 ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
627 pid_code = qtd->packet_type;
628 ptd->dw1 |= PTD_PID_TOKEN(pid_code);
629 643
630 if (usb_pipebulk(qtd->urb->pipe)) 644 if (usb_pipebulk(qtd->urb->pipe))
631 ptd->dw1 |= PTD_TRANS_BULK; 645 ptd->dw1 |= DW1_TRANS_BULK;
632 else if (usb_pipeint(qtd->urb->pipe)) 646 else if (usb_pipeint(qtd->urb->pipe))
633 ptd->dw1 |= PTD_TRANS_INT; 647 ptd->dw1 |= DW1_TRANS_INT;
634 648
635 if (qtd->urb->dev->speed != USB_SPEED_HIGH) { 649 if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
636 /* split transaction */ 650 /* split transaction */
637 651
638 ptd->dw1 |= PTD_TRANS_SPLIT; 652 ptd->dw1 |= DW1_TRANS_SPLIT;
639 if (qtd->urb->dev->speed == USB_SPEED_LOW) 653 if (qtd->urb->dev->speed == USB_SPEED_LOW)
640 ptd->dw1 |= PTD_SE_USB_LOSPEED; 654 ptd->dw1 |= DW1_SE_USB_LOSPEED;
641 655
642 ptd->dw1 |= PTD_PORT_NUM(qtd->urb->dev->ttport); 656 ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
643 ptd->dw1 |= PTD_HUB_NUM(qtd->urb->dev->tt->hub->devnum); 657 ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
644 658
645 /* SE bit for Split INT transfers */ 659 /* SE bit for Split INT transfers */
646 if (usb_pipeint(qtd->urb->pipe) && 660 if (usb_pipeint(qtd->urb->pipe) &&
647 (qtd->urb->dev->speed == USB_SPEED_LOW)) 661 (qtd->urb->dev->speed == USB_SPEED_LOW))
648 ptd->dw1 |= 2 << 16; 662 ptd->dw1 |= 2 << 16;
649 663
650 ptd->dw3 = 0;
651 rl = 0; 664 rl = 0;
652 nak = 0; 665 nak = 0;
653 } else { 666 } else {
654 ptd->dw0 |= PTD_MULTI(multi); 667 ptd->dw0 |= TO_DW0_MULTI(multi);
655 if (usb_pipecontrol(qtd->urb->pipe) || 668 if (usb_pipecontrol(qtd->urb->pipe) ||
656 usb_pipebulk(qtd->urb->pipe)) 669 usb_pipebulk(qtd->urb->pipe))
657 ptd->dw3 = qh->ping; 670 ptd->dw3 |= TO_DW3_PING(qh->ping);
658 else
659 ptd->dw3 = 0;
660 } 671 }
661 /* DW2 */ 672 /* DW2 */
662 ptd->dw2 = 0; 673 ptd->dw2 = 0;
663 ptd->dw2 |= PTD_DATA_START_ADDR(base_to_chip(qtd->payload_addr)); 674 ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
664 ptd->dw2 |= PTD_RL_CNT(rl); 675 ptd->dw2 |= TO_DW2_RL(rl);
665 ptd->dw3 |= PTD_NAC_CNT(nak);
666 676
667 /* DW3 */ 677 /* DW3 */
668 ptd->dw3 |= qh->toggle; 678 ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
679 ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
669 if (usb_pipecontrol(qtd->urb->pipe)) { 680 if (usb_pipecontrol(qtd->urb->pipe)) {
670 if (qtd->data_buffer == qtd->urb->setup_packet) 681 if (qtd->data_buffer == qtd->urb->setup_packet)
671 ptd->dw3 &= ~PTD_DATA_TOGGLE(1); 682 ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
672 else if (last_qtd_of_urb(qtd, qh)) 683 else if (last_qtd_of_urb(qtd, qh))
673 ptd->dw3 |= PTD_DATA_TOGGLE(1); 684 ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
674 } 685 }
675 686
676 ptd->dw3 |= PTD_ACTIVE; 687 ptd->dw3 |= DW3_ACTIVE_BIT;
677 /* Cerr */ 688 /* Cerr */
678 ptd->dw3 |= PTD_CERR(ERR_COUNTER); 689 ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
679} 690}
680 691
681static void transform_add_int(struct isp1760_qh *qh, 692static void transform_add_int(struct isp1760_qh *qh,
@@ -730,162 +741,13 @@ static void transform_add_int(struct isp1760_qh *qh,
730 ptd->dw4 = usof; 741 ptd->dw4 = usof;
731} 742}
732 743
733static void transform_into_int(struct isp1760_qh *qh, 744static void create_ptd_int(struct isp1760_qh *qh,
734 struct isp1760_qtd *qtd, struct ptd *ptd) 745 struct isp1760_qtd *qtd, struct ptd *ptd)
735{ 746{
736 transform_into_atl(qh, qtd, ptd); 747 create_ptd_atl(qh, qtd, ptd);
737 transform_add_int(qh, qtd, ptd); 748 transform_add_int(qh, qtd, ptd);
738} 749}
739 750
740static int check_error(struct usb_hcd *hcd, struct ptd *ptd)
741{
742 int error = 0;
743
744 if (ptd->dw3 & DW3_HALT_BIT) {
745 error = -EPIPE;
746
747 if (ptd->dw3 & DW3_ERROR_BIT)
748 pr_err("error bit is set in DW3\n");
749 }
750
751 if (ptd->dw3 & DW3_QTD_ACTIVE) {
752 dev_err(hcd->self.controller, "Transfer active bit is set DW3\n"
753 "nak counter: %d, rl: %d\n",
754 (ptd->dw3 >> 19) & 0xf, (ptd->dw2 >> 25) & 0xf);
755 }
756
757 return error;
758}
759
760static void check_int_err_status(struct usb_hcd *hcd, u32 dw4)
761{
762 u32 i;
763
764 dw4 >>= 8;
765
766 for (i = 0; i < 8; i++) {
767 switch (dw4 & 0x7) {
768 case INT_UNDERRUN:
769 dev_err(hcd->self.controller, "Underrun (%d)\n", i);
770 break;
771
772 case INT_EXACT:
773 dev_err(hcd->self.controller,
774 "Transaction error (%d)\n", i);
775 break;
776
777 case INT_BABBLE:
778 dev_err(hcd->self.controller, "Babble error (%d)\n", i);
779 break;
780 }
781 dw4 >>= 3;
782 }
783}
784
785static void enqueue_one_qtd(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
786{
787 if (qtd->length && (qtd->length <= MAX_PAYLOAD_SIZE)) {
788 switch (qtd->packet_type) {
789 case IN_PID:
790 break;
791 case OUT_PID:
792 case SETUP_PID:
793 mem_writes8(hcd->regs, qtd->payload_addr,
794 qtd->data_buffer, qtd->length);
795 }
796 }
797}
798
799static void enqueue_one_atl_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
800 u32 slot, struct isp1760_qtd *qtd)
801{
802 struct isp1760_hcd *priv = hcd_to_priv(hcd);
803 struct ptd ptd;
804
805 alloc_mem(hcd, qtd);
806 transform_into_atl(qh, qtd, &ptd);
807 ptd_write(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
808 enqueue_one_qtd(hcd, qtd);
809
810 priv->atl_ints[slot].qh = qh;
811 priv->atl_ints[slot].qtd = qtd;
812 qtd->status |= URB_ENQUEUED;
813 qtd->status |= slot << 16;
814}
815
816static void enqueue_one_int_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
817 u32 slot, struct isp1760_qtd *qtd)
818{
819 struct isp1760_hcd *priv = hcd_to_priv(hcd);
820 struct ptd ptd;
821
822 alloc_mem(hcd, qtd);
823 transform_into_int(qh, qtd, &ptd);
824 ptd_write(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
825 enqueue_one_qtd(hcd, qtd);
826
827 priv->int_ints[slot].qh = qh;
828 priv->int_ints[slot].qtd = qtd;
829 qtd->status |= URB_ENQUEUED;
830 qtd->status |= slot << 16;
831}
832
833static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
834 struct isp1760_qtd *qtd)
835{
836 struct isp1760_hcd *priv = hcd_to_priv(hcd);
837 u32 skip_map;
838 u32 slot;
839
840 /*
841 * When this function is called from the interrupt handler to enqueue
842 * a follow-up packet, the SKIP register gets written and read back
843 * almost immediately. With ISP1761, this register requires a delay of
844 * 195ns between a write and subsequent read (see section 15.1.1.3).
845 */
846 mmiowb();
847 ndelay(195);
848 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
849
850 BUG_ON(!skip_map);
851 slot = __ffs(skip_map);
852
853 enqueue_one_atl_qtd(hcd, qh, slot, qtd);
854
855 skip_map &= ~(1 << slot);
856 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
857
858 priv->atl_queued++;
859 if (priv->atl_queued == 2)
860 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
861 INTERRUPT_ENABLE_SOT_MASK);
862}
863
864static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
865 struct isp1760_qtd *qtd)
866{
867 u32 skip_map;
868 u32 slot;
869
870 /*
871 * When this function is called from the interrupt handler to enqueue
872 * a follow-up packet, the SKIP register gets written and read back
873 * almost immediately. With ISP1761, this register requires a delay of
874 * 195ns between a write and subsequent read (see section 15.1.1.3).
875 */
876 mmiowb();
877 ndelay(195);
878 skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
879
880 BUG_ON(!skip_map);
881 slot = __ffs(skip_map);
882
883 enqueue_one_int_qtd(hcd, qh, slot, qtd);
884
885 skip_map &= ~(1 << slot);
886 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
887}
888
889static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb) 751static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
890__releases(priv->lock) 752__releases(priv->lock)
891__acquires(priv->lock) 753__acquires(priv->lock)
@@ -924,6 +786,8 @@ static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
924 INIT_LIST_HEAD(&qtd->qtd_list); 786 INIT_LIST_HEAD(&qtd->qtd_list);
925 qtd->urb = urb; 787 qtd->urb = urb;
926 qtd->packet_type = packet_type; 788 qtd->packet_type = packet_type;
789 qtd->status = QTD_ENQUEUED;
790 qtd->actual_length = 0;
927 791
928 return qtd; 792 return qtd;
929} 793}
@@ -934,323 +798,505 @@ static void qtd_free(struct isp1760_qtd *qtd)
934 kmem_cache_free(qtd_cachep, qtd); 798 kmem_cache_free(qtd_cachep, qtd);
935} 799}
936 800
937static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd, 801static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
938 struct isp1760_qh *qh) 802 struct slotinfo *slots, struct isp1760_qtd *qtd,
803 struct isp1760_qh *qh, struct ptd *ptd)
939{ 804{
940 struct isp1760_qtd *tmp_qtd; 805 struct isp1760_hcd *priv = hcd_to_priv(hcd);
941 806 WARN_ON((slot < 0) || (slot > 31));
942 if (list_is_last(&qtd->qtd_list, &qh->qtd_list)) 807 WARN_ON(qtd->length && !qtd->payload_addr);
943 tmp_qtd = NULL; 808 WARN_ON(slots[slot].qtd);
944 else 809 WARN_ON(slots[slot].qh);
945 tmp_qtd = list_entry(qtd->qtd_list.next, struct isp1760_qtd, 810 WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
946 qtd_list); 811
947 list_del(&qtd->qtd_list); 812 slots[slot].qtd = qtd;
948 qtd_free(qtd); 813 slots[slot].qh = qh;
949 return tmp_qtd; 814 qh->slot = slot;
815 qtd->status = QTD_XFER_STARTED; /* Set this before writing ptd, since
816 interrupt routine may preempt and expects this value. */
817 ptd_write(hcd->regs, ptd_offset, slot, ptd);
818 priv->active_ptds++;
950} 819}
951 820
952/* 821static int is_short_bulk(struct isp1760_qtd *qtd)
953 * Remove this QTD from the QH list and free its memory. If this QTD
954 * isn't the last one than remove also his successor(s).
955 * Returns the QTD which is part of an new URB and should be enqueued.
956 */
957static struct isp1760_qtd *clean_up_qtdlist(struct isp1760_qtd *qtd,
958 struct isp1760_qh *qh)
959{ 822{
960 struct urb *urb; 823 return (usb_pipebulk(qtd->urb->pipe) &&
961 824 (qtd->actual_length < qtd->length));
962 urb = qtd->urb;
963 do {
964 qtd = clean_this_qtd(qtd, qh);
965 } while (qtd && (qtd->urb == urb));
966
967 return qtd;
968} 825}
969 826
970static void do_atl_int(struct usb_hcd *hcd) 827static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
828 struct list_head *urb_list)
971{ 829{
972 struct isp1760_hcd *priv = hcd_to_priv(hcd); 830 int last_qtd;
973 u32 done_map, skip_map; 831 struct isp1760_qtd *qtd, *qtd_next;
974 struct ptd ptd; 832 struct urb_listitem *urb_listitem;
975 struct urb *urb;
976 u32 slot;
977 u32 length;
978 u32 status = -EINVAL;
979 int error;
980 struct isp1760_qtd *qtd;
981 struct isp1760_qh *qh;
982 u32 rl;
983 u32 nakcount;
984 833
985 done_map = reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG); 834 list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
986 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); 835 if (qtd->status < QTD_XFER_COMPLETE)
836 break;
987 837
988 while (done_map) { 838 if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
989 status = 0; 839 last_qtd = 1;
990 priv->atl_queued--; 840 else
841 last_qtd = qtd->urb != qtd_next->urb;
842
843 if ((!last_qtd) && (qtd->status == QTD_RETIRE))
844 qtd_next->status = QTD_RETIRE;
845
846 if (qtd->status == QTD_XFER_COMPLETE) {
847 if (qtd->actual_length) {
848 switch (qtd->packet_type) {
849 case IN_PID:
850 mem_reads8(hcd->regs, qtd->payload_addr,
851 qtd->data_buffer,
852 qtd->actual_length);
853 /* Fall through (?) */
854 case OUT_PID:
855 qtd->urb->actual_length +=
856 qtd->actual_length;
857 /* Fall through ... */
858 case SETUP_PID:
859 break;
860 }
861 }
991 862
992 slot = __ffs(done_map); 863 if (is_short_bulk(qtd)) {
993 done_map &= ~(1 << slot); 864 if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
994 skip_map |= (1 << slot); 865 qtd->urb->status = -EREMOTEIO;
866 if (!last_qtd)
867 qtd_next->status = QTD_RETIRE;
868 }
869 }
995 870
996 qtd = priv->atl_ints[slot].qtd; 871 if (qtd->payload_addr)
997 qh = priv->atl_ints[slot].qh; 872 free_mem(hcd, qtd);
998 873
999 /* urb unlinked? */ 874 if (last_qtd) {
1000 if (!qh) 875 if ((qtd->status == QTD_RETIRE) &&
1001 continue; 876 (qtd->urb->status == -EINPROGRESS))
877 qtd->urb->status = -EPIPE;
878 /* Defer calling of urb_done() since it releases lock */
879 urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
880 GFP_ATOMIC);
881 if (unlikely(!urb_listitem))
882 break;
883 urb_listitem->urb = qtd->urb;
884 list_add_tail(&urb_listitem->urb_list, urb_list);
885 }
1002 886
1003 ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); 887 list_del(&qtd->qtd_list);
888 qtd_free(qtd);
889 }
890}
1004 891
1005 rl = (ptd.dw2 >> 25) & 0x0f; 892#define ENQUEUE_DEPTH 2
1006 nakcount = (ptd.dw3 >> 19) & 0xf; 893static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
894{
895 struct isp1760_hcd *priv = hcd_to_priv(hcd);
896 int ptd_offset;
897 struct slotinfo *slots;
898 int curr_slot, free_slot;
899 int n;
900 struct ptd ptd;
901 struct isp1760_qtd *qtd;
1007 902
1008 /* Transfer Error, *but* active and no HALT -> reload */ 903 if (unlikely(list_empty(&qh->qtd_list))) {
1009 if ((ptd.dw3 & DW3_ERROR_BIT) && (ptd.dw3 & DW3_QTD_ACTIVE) && 904 WARN_ON(1);
1010 !(ptd.dw3 & DW3_HALT_BIT)) { 905 return;
1011 906 }
1012 /* according to ppriv code, we have to
1013 * reload this one if trasfered bytes != requested bytes
1014 * else act like everything went smooth..
1015 * XXX This just doesn't feel right and hasn't
1016 * triggered so far.
1017 */
1018 907
1019 length = PTD_XFERRED_LENGTH(ptd.dw3); 908 if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
1020 dev_err(hcd->self.controller, 909 qtd_list)->urb->pipe)) {
1021 "Should reload now... transferred %d " 910 ptd_offset = INT_PTD_OFFSET;
1022 "of %zu\n", length, qtd->length); 911 slots = priv->int_slots;
1023 BUG(); 912 } else {
1024 } 913 ptd_offset = ATL_PTD_OFFSET;
914 slots = priv->atl_slots;
915 }
1025 916
1026 if (!nakcount && (ptd.dw3 & DW3_QTD_ACTIVE)) { 917 free_slot = -1;
1027 /* 918 for (curr_slot = 0; curr_slot < 32; curr_slot++) {
1028 * NAKs are handled in HW by the chip. Usually if the 919 if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
1029 * device is not able to send data fast enough. 920 free_slot = curr_slot;
1030 * This happens mostly on slower hardware. 921 if (slots[curr_slot].qh == qh)
1031 */ 922 break;
923 }
1032 924
1033 /* RL counter = ERR counter */ 925 n = 0;
1034 ptd.dw3 &= ~(0xf << 19); 926 list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
1035 ptd.dw3 |= rl << 19; 927 if (qtd->status == QTD_ENQUEUED) {
1036 ptd.dw3 &= ~(3 << (55 - 32)); 928 WARN_ON(qtd->payload_addr);
1037 ptd.dw3 |= ERR_COUNTER << (55 - 32); 929 alloc_mem(hcd, qtd);
1038 930 if ((qtd->length) && (!qtd->payload_addr))
1039 /* 931 break;
1040 * It is not needed to write skip map back because it
1041 * is unchanged. Just make sure that this entry is
1042 * unskipped once it gets written to the HW.
1043 */
1044 skip_map &= ~(1 << slot);
1045 932
1046 ptd.dw0 |= PTD_VALID; 933 if ((qtd->length) &&
1047 ptd_write(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); 934 ((qtd->packet_type == SETUP_PID) ||
935 (qtd->packet_type == OUT_PID))) {
936 mem_writes8(hcd->regs, qtd->payload_addr,
937 qtd->data_buffer, qtd->length);
938 }
1048 939
1049 priv->atl_queued++; 940 qtd->status = QTD_PAYLOAD_ALLOC;
1050 if (priv->atl_queued == 2)
1051 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
1052 INTERRUPT_ENABLE_SOT_MASK);
1053 continue;
1054 } 941 }
1055 942
1056 error = check_error(hcd, &ptd); 943 if (qtd->status == QTD_PAYLOAD_ALLOC) {
1057 if (error) { 944/*
1058 status = error; 945 if ((curr_slot > 31) && (free_slot == -1))
1059 priv->atl_ints[slot].qh->toggle = 0; 946 dev_dbg(hcd->self.controller, "%s: No slot "
1060 priv->atl_ints[slot].qh->ping = 0; 947 "available for transfer\n", __func__);
1061 qtd->urb->status = -EPIPE; 948*/
1062 949 /* Start xfer for this endpoint if not already done */
1063#if 0 950 if ((curr_slot > 31) && (free_slot > -1)) {
1064 printk(KERN_ERR "Error in %s().\n", __func__); 951 if (usb_pipeint(qtd->urb->pipe))
1065 printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x " 952 create_ptd_int(qh, qtd, &ptd);
1066 "dw3: %08x dw4: %08x dw5: %08x dw6: " 953 else
1067 "%08x dw7: %08x\n", 954 create_ptd_atl(qh, qtd, &ptd);
1068 ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3, 955
1069 ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7); 956 start_bus_transfer(hcd, ptd_offset, free_slot,
1070#endif 957 slots, qtd, qh, &ptd);
1071 } else { 958 curr_slot = free_slot;
1072 priv->atl_ints[slot].qh->toggle = ptd.dw3 & (1 << 25); 959 }
1073 priv->atl_ints[slot].qh->ping = ptd.dw3 & (1 << 26); 960
961 n++;
962 if (n >= ENQUEUE_DEPTH)
963 break;
1074 } 964 }
965 }
966}
1075 967
1076 length = PTD_XFERRED_LENGTH(ptd.dw3); 968void schedule_ptds(struct usb_hcd *hcd)
1077 if (length) { 969{
1078 switch (DW1_GET_PID(ptd.dw1)) { 970 struct isp1760_hcd *priv;
1079 case IN_PID: 971 struct isp1760_qh *qh, *qh_next;
1080 mem_reads8(hcd->regs, qtd->payload_addr, 972 struct list_head *ep_queue;
1081 qtd->data_buffer, length); 973 struct usb_host_endpoint *ep;
974 LIST_HEAD(urb_list);
975 struct urb_listitem *urb_listitem, *urb_listitem_next;
1082 976
1083 case OUT_PID: 977 if (!hcd) {
978 WARN_ON(1);
979 return;
980 }
1084 981
1085 qtd->urb->actual_length += length; 982 priv = hcd_to_priv(hcd);
1086 983
1087 case SETUP_PID: 984 /*
1088 break; 985 * check finished/retired xfers, transfer payloads, call urb_done()
986 */
987 ep_queue = &priv->interruptqhs;
988 while (ep_queue) {
989 list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
990 ep = list_entry(qh->qtd_list.next, struct isp1760_qtd,
991 qtd_list)->urb->ep;
992 collect_qtds(hcd, qh, &urb_list);
993 if (list_empty(&qh->qtd_list)) {
994 list_del(&qh->qh_list);
995 if (ep->hcpriv == NULL) {
996 /* Endpoint has been disabled, so we
997 can free the associated queue head. */
998 qh_free(qh);
999 }
1089 } 1000 }
1090 } 1001 }
1091 1002
1092 priv->atl_ints[slot].qtd = NULL; 1003 if (ep_queue == &priv->interruptqhs)
1093 priv->atl_ints[slot].qh = NULL; 1004 ep_queue = &priv->controlqhs;
1094 1005 else if (ep_queue == &priv->controlqhs)
1095 free_mem(hcd, qtd); 1006 ep_queue = &priv->bulkqhs;
1007 else
1008 ep_queue = NULL;
1009 }
1096 1010
1097 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map); 1011 list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
1012 urb_list) {
1013 isp1760_urb_done(hcd, urb_listitem->urb);
1014 kmem_cache_free(urb_listitem_cachep, urb_listitem);
1015 }
1098 1016
1099 if (qtd->urb->status == -EPIPE) { 1017 /*
1100 /* HALT was received */ 1018 * Schedule packets for transfer.
1019 *
1020 * According to USB2.0 specification:
1021 *
1022 * 1st prio: interrupt xfers, up to 80 % of bandwidth
1023 * 2nd prio: control xfers
1024 * 3rd prio: bulk xfers
1025 *
1026 * ... but let's use a simpler scheme here (mostly because ISP1761 doc
1027 * is very unclear on how to prioritize traffic):
1028 *
1029 * 1) Enqueue any queued control transfers, as long as payload chip mem
1030 * and PTD ATL slots are available.
1031 * 2) Enqueue any queued INT transfers, as long as payload chip mem
1032 * and PTD INT slots are available.
1033 * 3) Enqueue any queued bulk transfers, as long as payload chip mem
1034 * and PTD ATL slots are available.
1035 *
1036 * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
1037 * conservation of chip mem and performance.
1038 *
1039 * I'm sure this scheme could be improved upon!
1040 */
1041 ep_queue = &priv->controlqhs;
1042 while (ep_queue) {
1043 list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
1044 enqueue_qtds(hcd, qh);
1045
1046 if (ep_queue == &priv->controlqhs)
1047 ep_queue = &priv->interruptqhs;
1048 else if (ep_queue == &priv->interruptqhs)
1049 ep_queue = &priv->bulkqhs;
1050 else
1051 ep_queue = NULL;
1052 }
1053}
1101 1054
1102 urb = qtd->urb; 1055#define PTD_STATE_QTD_DONE 1
1103 qtd = clean_up_qtdlist(qtd, qh); 1056#define PTD_STATE_QTD_RELOAD 2
1104 isp1760_urb_done(hcd, urb); 1057#define PTD_STATE_URB_RETIRE 3
1105 1058
1106 } else if (usb_pipebulk(qtd->urb->pipe) && 1059static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
1107 (length < qtd->length)) { 1060 struct urb *urb)
1108 /* short BULK received */ 1061{
1062 __dw dw4;
1063 int i;
1109 1064
1110 if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK) { 1065 dw4 = ptd->dw4;
1111 qtd->urb->status = -EREMOTEIO; 1066 dw4 >>= 8;
1112 dev_dbg(hcd->self.controller,
1113 "short bulk, %d instead %zu "
1114 "with URB_SHORT_NOT_OK flag.\n",
1115 length, qtd->length);
1116 }
1117 1067
1118 if (qtd->urb->status == -EINPROGRESS) 1068 /* FIXME: ISP1761 datasheet does not say what to do with these. Do we
1119 qtd->urb->status = 0; 1069 need to handle these errors? Is it done in hardware? */
1120 1070
1121 urb = qtd->urb; 1071 if (ptd->dw3 & DW3_HALT_BIT) {
1122 qtd = clean_up_qtdlist(qtd, qh);
1123 isp1760_urb_done(hcd, urb);
1124 1072
1125 } else if (last_qtd_of_urb(qtd, qh)) { 1073 urb->status = -EPROTO; /* Default unknown error */
1126 /* that was the last qtd of that URB */
1127 1074
1128 if (qtd->urb->status == -EINPROGRESS) 1075 for (i = 0; i < 8; i++) {
1129 qtd->urb->status = 0; 1076 switch (dw4 & 0x7) {
1077 case INT_UNDERRUN:
1078 dev_dbg(hcd->self.controller, "%s: underrun "
1079 "during uFrame %d\n",
1080 __func__, i);
1081 urb->status = -ECOMM; /* Could not write data */
1082 break;
1083 case INT_EXACT:
1084 dev_dbg(hcd->self.controller, "%s: transaction "
1085 "error during uFrame %d\n",
1086 __func__, i);
1087 urb->status = -EPROTO; /* timeout, bad CRC, PID
1088 error etc. */
1089 break;
1090 case INT_BABBLE:
1091 dev_dbg(hcd->self.controller, "%s: babble "
1092 "error during uFrame %d\n",
1093 __func__, i);
1094 urb->status = -EOVERFLOW;
1095 break;
1096 }
1097 dw4 >>= 3;
1098 }
1130 1099
1131 urb = qtd->urb; 1100 return PTD_STATE_URB_RETIRE;
1132 qtd = clean_up_qtdlist(qtd, qh); 1101 }
1133 isp1760_urb_done(hcd, urb);
1134 1102
1135 } else { 1103 return PTD_STATE_QTD_DONE;
1136 /* next QTD of this URB */ 1104}
1137 1105
1138 qtd = clean_this_qtd(qtd, qh); 1106static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
1139 BUG_ON(!qtd); 1107 struct urb *urb)
1140 } 1108{
1109 WARN_ON(!ptd);
1110 if (ptd->dw3 & DW3_HALT_BIT) {
1111 if (ptd->dw3 & DW3_BABBLE_BIT)
1112 urb->status = -EOVERFLOW;
1113 else if (FROM_DW3_CERR(ptd->dw3))
1114 urb->status = -EPIPE; /* Stall */
1115 else if (ptd->dw3 & DW3_ERROR_BIT)
1116 urb->status = -EPROTO; /* XactErr */
1117 else
1118 urb->status = -EPROTO; /* Unknown */
1119/*
1120 dev_dbg(hcd->self.controller, "%s: ptd error:\n"
1121 " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
1122 " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
1123 __func__,
1124 ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
1125 ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
1126*/
1127 return PTD_STATE_URB_RETIRE;
1128 }
1141 1129
1142 if (qtd) 1130 if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
1143 enqueue_an_ATL_packet(hcd, qh, qtd); 1131 /* Transfer Error, *but* active and no HALT -> reload */
1132 dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
1133 return PTD_STATE_QTD_RELOAD;
1134 }
1144 1135
1145 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); 1136 if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
1137 /*
1138 * NAKs are handled in HW by the chip. Usually if the
1139 * device is not able to send data fast enough.
1140 * This happens mostly on slower hardware.
1141 */
1142 return PTD_STATE_QTD_RELOAD;
1146 } 1143 }
1147 if (priv->atl_queued <= 1) 1144
1148 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, 1145 return PTD_STATE_QTD_DONE;
1149 INTERRUPT_ENABLE_MASK);
1150} 1146}
1151 1147
1152static void do_intl_int(struct usb_hcd *hcd) 1148static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
1153{ 1149{
1154 struct isp1760_hcd *priv = hcd_to_priv(hcd); 1150 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1155 u32 done_map, skip_map; 1151 u32 imask;
1152 irqreturn_t irqret = IRQ_NONE;
1156 struct ptd ptd; 1153 struct ptd ptd;
1157 struct urb *urb;
1158 u32 length;
1159 int error;
1160 u32 slot;
1161 struct isp1760_qtd *qtd;
1162 struct isp1760_qh *qh; 1154 struct isp1760_qh *qh;
1155 int int_done_map, atl_done_map;
1156 int slot;
1157 int state;
1158 struct slotinfo *slots;
1159 u32 ptd_offset;
1160 struct isp1760_qtd *qtd;
1161 int modified;
1162 static int last_active_ptds;
1163 1163
1164 done_map = reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG); 1164 spin_lock(&priv->lock);
1165 skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
1166
1167 while (done_map) {
1168 slot = __ffs(done_map);
1169 done_map &= ~(1 << slot);
1170 skip_map |= (1 << slot);
1171
1172 qtd = priv->int_ints[slot].qtd;
1173 qh = priv->int_ints[slot].qh;
1174
1175 /* urb unlinked? */
1176 if (!qh)
1177 continue;
1178 1165
1179 ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd); 1166 if (!(hcd->state & HC_STATE_RUNNING))
1180 check_int_err_status(hcd, ptd.dw4); 1167 goto leave;
1181
1182 error = check_error(hcd, &ptd);
1183 if (error) {
1184#if 0
1185 printk(KERN_ERR "Error in %s().\n", __func__);
1186 printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x "
1187 "dw3: %08x dw4: %08x dw5: %08x dw6: "
1188 "%08x dw7: %08x\n",
1189 ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
1190 ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
1191#endif
1192 qtd->urb->status = -EPIPE;
1193 priv->int_ints[slot].qh->toggle = 0;
1194 priv->int_ints[slot].qh->ping = 0;
1195 1168
1169 imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
1170 if (unlikely(!imask))
1171 goto leave;
1172 reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
1173
1174 int_done_map = reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
1175 atl_done_map = reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
1176 modified = int_done_map | atl_done_map;
1177
1178 while (int_done_map || atl_done_map) {
1179 if (int_done_map) {
1180 /* INT ptd */
1181 slot = __ffs(int_done_map);
1182 int_done_map &= ~(1 << slot);
1183 slots = priv->int_slots;
1184 if (!slots[slot].qh)
1185 continue;
1186 ptd_offset = INT_PTD_OFFSET;
1187 ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
1188 state = check_int_transfer(hcd, &ptd,
1189 slots[slot].qtd->urb);
1196 } else { 1190 } else {
1197 priv->int_ints[slot].qh->toggle = ptd.dw3 & (1 << 25); 1191 /* ATL ptd */
1198 priv->int_ints[slot].qh->ping = ptd.dw3 & (1 << 26); 1192 slot = __ffs(atl_done_map);
1193 atl_done_map &= ~(1 << slot);
1194 slots = priv->atl_slots;
1195 if (!slots[slot].qh)
1196 continue;
1197 ptd_offset = ATL_PTD_OFFSET;
1198 ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
1199 state = check_atl_transfer(hcd, &ptd,
1200 slots[slot].qtd->urb);
1199 } 1201 }
1200 1202
1201 if (qtd->urb->dev->speed != USB_SPEED_HIGH) 1203 qtd = slots[slot].qtd;
1202 length = PTD_XFERRED_LENGTH_LO(ptd.dw3); 1204 slots[slot].qtd = NULL;
1203 else 1205 qh = slots[slot].qh;
1204 length = PTD_XFERRED_LENGTH(ptd.dw3); 1206 slots[slot].qh = NULL;
1205 1207 priv->active_ptds--;
1206 if (length) { 1208 qh->slot = -1;
1207 switch (DW1_GET_PID(ptd.dw1)) { 1209
1208 case IN_PID: 1210 WARN_ON(qtd->status != QTD_XFER_STARTED);
1209 mem_reads8(hcd->regs, qtd->payload_addr, 1211
1210 qtd->data_buffer, length); 1212 switch (state) {
1211 case OUT_PID: 1213 case PTD_STATE_QTD_DONE:
1212 1214 if ((usb_pipeint(qtd->urb->pipe)) &&
1213 qtd->urb->actual_length += length; 1215 (qtd->urb->dev->speed != USB_SPEED_HIGH))
1216 qtd->actual_length =
1217 FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
1218 else
1219 qtd->actual_length =
1220 FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
1214 1221
1215 case SETUP_PID: 1222 qtd->status = QTD_XFER_COMPLETE;
1216 break; 1223 if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
1217 } 1224 is_short_bulk(qtd))
1218 } 1225 qtd = NULL;
1226 else
1227 qtd = list_entry(qtd->qtd_list.next,
1228 typeof(*qtd), qtd_list);
1219 1229
1220 priv->int_ints[slot].qtd = NULL; 1230 qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
1221 priv->int_ints[slot].qh = NULL; 1231 qh->ping = FROM_DW3_PING(ptd.dw3);
1232 break;
1222 1233
1223 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map); 1234 case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
1224 free_mem(hcd, qtd); 1235 qtd->status = QTD_PAYLOAD_ALLOC;
1236 ptd.dw0 |= DW0_VALID_BIT;
1237 /* RL counter = ERR counter */
1238 ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
1239 ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
1240 ptd.dw3 &= ~TO_DW3_CERR(3);
1241 ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
1242 qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
1243 qh->ping = FROM_DW3_PING(ptd.dw3);
1244 break;
1225 1245
1226 if (qtd->urb->status == -EPIPE) { 1246 case PTD_STATE_URB_RETIRE:
1227 /* HALT received */ 1247 qtd->status = QTD_RETIRE;
1248 qtd = NULL;
1249 qh->toggle = 0;
1250 qh->ping = 0;
1251 break;
1228 1252
1229 urb = qtd->urb; 1253 default:
1230 qtd = clean_up_qtdlist(qtd, qh); 1254 WARN_ON(1);
1231 isp1760_urb_done(hcd, urb); 1255 continue;
1256 }
1232 1257
1233 } else if (last_qtd_of_urb(qtd, qh)) { 1258 if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
1259 if (slots == priv->int_slots) {
1260 if (state == PTD_STATE_QTD_RELOAD)
1261 dev_err(hcd->self.controller,
1262 "%s: PTD_STATE_QTD_RELOAD on "
1263 "interrupt packet\n", __func__);
1264 if (state != PTD_STATE_QTD_RELOAD)
1265 create_ptd_int(qh, qtd, &ptd);
1266 } else {
1267 if (state != PTD_STATE_QTD_RELOAD)
1268 create_ptd_atl(qh, qtd, &ptd);
1269 }
1234 1270
1235 if (qtd->urb->status == -EINPROGRESS) 1271 start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
1236 qtd->urb->status = 0; 1272 qh, &ptd);
1273 }
1274 }
1237 1275
1238 urb = qtd->urb; 1276 if (modified)
1239 qtd = clean_up_qtdlist(qtd, qh); 1277 schedule_ptds(hcd);
1240 isp1760_urb_done(hcd, urb);
1241 1278
1242 } else { 1279 /* ISP1760 Errata 2 explains that interrupts may be missed (or not
1243 /* next QTD of this URB */ 1280 happen?) if two USB devices are running simultaneously. Perhaps
1281 this happens when a PTD is finished during interrupt handling;
1282 enable SOF interrupts if PTDs are still scheduled when exiting this
1283 interrupt handler, just to be safe. */
1244 1284
1245 qtd = clean_this_qtd(qtd, qh); 1285 if (priv->active_ptds != last_active_ptds) {
1246 BUG_ON(!qtd); 1286 if (priv->active_ptds > 0)
1247 } 1287 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
1288 INTERRUPT_ENABLE_SOT_MASK);
1289 else
1290 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
1291 INTERRUPT_ENABLE_MASK);
1292 last_active_ptds = priv->active_ptds;
1293 }
1248 1294
1249 if (qtd) 1295 irqret = IRQ_HANDLED;
1250 enqueue_an_INT_packet(hcd, qh, qtd); 1296leave:
1297 spin_unlock(&priv->lock);
1251 1298
1252 skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG); 1299 return irqret;
1253 }
1254} 1300}
1255 1301
1256static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len) 1302static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
@@ -1380,197 +1426,136 @@ cleanup:
1380 qtd_list_free(head); 1426 qtd_list_free(head);
1381} 1427}
1382 1428
1383static int enqueue_qtdlist(struct usb_hcd *hcd, struct urb *urb,
1384 struct list_head *qtd_list, gfp_t mem_flags, packet_enqueue *p)
1385{
1386 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1387 struct isp1760_qtd *qtd;
1388 struct isp1760_qh *qh = NULL;
1389 unsigned long flags;
1390 int qh_empty;
1391 int rc;
1392
1393 spin_lock_irqsave(&priv->lock, flags);
1394 if (!HCD_HW_ACCESSIBLE(hcd)) {
1395 rc = -ESHUTDOWN;
1396 goto done;
1397 }
1398 rc = usb_hcd_link_urb_to_ep(hcd, urb);
1399 if (rc)
1400 goto done;
1401
1402 qh = urb->ep->hcpriv;
1403 if (!qh) {
1404 qh = isp1760_qh_alloc(GFP_ATOMIC);
1405 if (!qh) {
1406 usb_hcd_unlink_urb_from_ep(hcd, urb);
1407 rc = -ENOMEM;
1408 goto done;
1409 }
1410 if (!usb_pipecontrol(urb->pipe))
1411 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1412 !usb_pipein(urb->pipe), 1);
1413 urb->ep->hcpriv = qh;
1414 }
1415
1416 qh_empty = list_empty(&qh->qtd_list);
1417 list_splice_tail(qtd_list, &qh->qtd_list);
1418 if (qh_empty) {
1419 qtd = list_entry(qtd_list->next, struct isp1760_qtd, qtd_list);
1420 p(hcd, qh, qtd);
1421 }
1422
1423done:
1424 spin_unlock_irqrestore(&priv->lock, flags);
1425 if (!qh)
1426 qtd_list_free(qtd_list);
1427 return rc;
1428}
1429
1430static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, 1429static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1431 gfp_t mem_flags) 1430 gfp_t mem_flags)
1432{ 1431{
1433 struct list_head qtd_list; 1432 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1434 packet_enqueue *pe; 1433 struct list_head *ep_queue;
1435 1434 struct isp1760_qh *qh, *qhit;
1436 INIT_LIST_HEAD(&qtd_list); 1435 unsigned long spinflags;
1436 LIST_HEAD(new_qtds);
1437 int retval;
1438 int qh_in_queue;
1437 1439
1438 switch (usb_pipetype(urb->pipe)) { 1440 switch (usb_pipetype(urb->pipe)) {
1439 case PIPE_CONTROL: 1441 case PIPE_CONTROL:
1442 ep_queue = &priv->controlqhs;
1443 break;
1440 case PIPE_BULK: 1444 case PIPE_BULK:
1441 pe = enqueue_an_ATL_packet; 1445 ep_queue = &priv->bulkqhs;
1442 break; 1446 break;
1443
1444 case PIPE_INTERRUPT: 1447 case PIPE_INTERRUPT:
1445 pe = enqueue_an_INT_packet; 1448 if (urb->interval < 0)
1449 return -EINVAL;
1450 /* FIXME: Check bandwidth */
1451 ep_queue = &priv->interruptqhs;
1446 break; 1452 break;
1447
1448 case PIPE_ISOCHRONOUS: 1453 case PIPE_ISOCHRONOUS:
1449 dev_err(hcd->self.controller, "PIPE_ISOCHRONOUS ain't supported\n"); 1454 dev_err(hcd->self.controller, "%s: isochronous USB packets "
1455 "not yet supported\n",
1456 __func__);
1457 return -EPIPE;
1450 default: 1458 default:
1459 dev_err(hcd->self.controller, "%s: unknown pipe type\n",
1460 __func__);
1451 return -EPIPE; 1461 return -EPIPE;
1452 } 1462 }
1453 1463
1454 packetize_urb(hcd, urb, &qtd_list, mem_flags); 1464 if (usb_pipein(urb->pipe))
1455 if (list_empty(&qtd_list)) 1465 urb->actual_length = 0;
1456 return -ENOMEM;
1457
1458 return enqueue_qtdlist(hcd, urb, &qtd_list, mem_flags, pe);
1459}
1460
1461static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1462{
1463 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1464 struct inter_packet_info *ints;
1465 u32 i;
1466 u32 reg_base, skip_reg;
1467 unsigned long flags;
1468 struct ptd ptd;
1469 packet_enqueue *pe;
1470 1466
1471 switch (usb_pipetype(urb->pipe)) { 1467 packetize_urb(hcd, urb, &new_qtds, mem_flags);
1472 case PIPE_ISOCHRONOUS: 1468 if (list_empty(&new_qtds))
1473 return -EPIPE; 1469 return -ENOMEM;
1474 break; 1470 urb->hcpriv = NULL; /* Used to signal unlink to interrupt handler */
1475 1471
1476 case PIPE_INTERRUPT: 1472 retval = 0;
1477 ints = priv->int_ints; 1473 spin_lock_irqsave(&priv->lock, spinflags);
1478 reg_base = INT_PTD_OFFSET;
1479 skip_reg = HC_INT_PTD_SKIPMAP_REG;
1480 pe = enqueue_an_INT_packet;
1481 break;
1482 1474
1483 default: 1475 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
1484 ints = priv->atl_ints; 1476 retval = -ESHUTDOWN;
1485 reg_base = ATL_PTD_OFFSET; 1477 goto out;
1486 skip_reg = HC_ATL_PTD_SKIPMAP_REG;
1487 pe = enqueue_an_ATL_packet;
1488 break;
1489 } 1478 }
1479 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1480 if (retval)
1481 goto out;
1490 1482
1491 memset(&ptd, 0, sizeof(ptd)); 1483 qh = urb->ep->hcpriv;
1492 spin_lock_irqsave(&priv->lock, flags); 1484 if (qh) {
1493 1485 qh_in_queue = 0;
1494 for (i = 0; i < 32; i++) { 1486 list_for_each_entry(qhit, ep_queue, qh_list) {
1495 if (!ints[i].qh) 1487 if (qhit == qh) {
1496 continue; 1488 qh_in_queue = 1;
1497 WARN_ON(!ints[i].qtd);
1498
1499 if (ints[i].qtd->urb == urb) {
1500 u32 skip_map;
1501 struct isp1760_qtd *qtd;
1502 struct isp1760_qh *qh;
1503
1504 skip_map = reg_read32(hcd->regs, skip_reg);
1505 skip_map |= 1 << i;
1506 reg_write32(hcd->regs, skip_reg, skip_map);
1507
1508 ptd_write(hcd->regs, reg_base, i, &ptd);
1509
1510 qtd = ints[i].qtd;
1511 qh = ints[i].qh;
1512
1513 free_mem(hcd, qtd);
1514 qtd = clean_up_qtdlist(qtd, qh);
1515
1516 ints[i].qh = NULL;
1517 ints[i].qtd = NULL;
1518
1519 urb->status = status;
1520 isp1760_urb_done(hcd, urb);
1521 if (qtd)
1522 pe(hcd, qh, qtd);
1523 break;
1524
1525 } else {
1526 struct isp1760_qtd *qtd;
1527
1528 list_for_each_entry(qtd, &ints[i].qtd->qtd_list,
1529 qtd_list) {
1530 if (qtd->urb == urb) {
1531 clean_up_qtdlist(qtd, ints[i].qh);
1532 isp1760_urb_done(hcd, urb);
1533 qtd = NULL;
1534 break;
1535 }
1536 }
1537
1538 /* We found the urb before the last slot */
1539 if (!qtd)
1540 break; 1489 break;
1490 }
1491 }
1492 if (!qh_in_queue)
1493 list_add_tail(&qh->qh_list, ep_queue);
1494 } else {
1495 qh = qh_alloc(GFP_ATOMIC);
1496 if (!qh) {
1497 retval = -ENOMEM;
1498 goto out;
1541 } 1499 }
1500 list_add_tail(&qh->qh_list, ep_queue);
1501 urb->ep->hcpriv = qh;
1542 } 1502 }
1543 1503
1544 spin_unlock_irqrestore(&priv->lock, flags); 1504 list_splice_tail(&new_qtds, &qh->qtd_list);
1545 return 0; 1505 schedule_ptds(hcd);
1506
1507out:
1508 spin_unlock_irqrestore(&priv->lock, spinflags);
1509 return retval;
1546} 1510}
1547 1511
1548static irqreturn_t isp1760_irq(struct usb_hcd *hcd) 1512static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1513 int status)
1549{ 1514{
1550 struct isp1760_hcd *priv = hcd_to_priv(hcd); 1515 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1551 u32 imask; 1516 struct isp1760_qh *qh;
1552 irqreturn_t irqret = IRQ_NONE; 1517 struct isp1760_qtd *qtd;
1518 struct ptd ptd;
1519 unsigned long spinflags;
1520 int retval = 0;
1553 1521
1554 spin_lock(&priv->lock); 1522 spin_lock_irqsave(&priv->lock, spinflags);
1555 1523
1556 if (!(hcd->state & HC_STATE_RUNNING)) 1524 qh = urb->ep->hcpriv;
1557 goto leave; 1525 if (!qh) {
1526 retval = -EINVAL;
1527 goto out;
1528 }
1558 1529
1559 imask = reg_read32(hcd->regs, HC_INTERRUPT_REG); 1530 /* We need to forcefully reclaim the slot since some transfers never
1560 if (unlikely(!imask)) 1531 return, e.g. interrupt transfers and NAKed bulk transfers. */
1561 goto leave; 1532 if (qh->slot > -1) {
1533 memset(&ptd, 0, sizeof(ptd));
1534 if (usb_pipebulk(urb->pipe)) {
1535 priv->atl_slots[qh->slot].qh = NULL;
1536 priv->atl_slots[qh->slot].qtd = NULL;
1537 ptd_write(hcd->regs, ATL_PTD_OFFSET, qh->slot, &ptd);
1538 } else {
1539 priv->int_slots[qh->slot].qh = NULL;
1540 priv->int_slots[qh->slot].qtd = NULL;
1541 ptd_write(hcd->regs, INT_PTD_OFFSET, qh->slot, &ptd);
1542 }
1543 priv->active_ptds--;
1544 qh->slot = -1;
1545 }
1562 1546
1563 reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); 1547 list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
1564 if (imask & (HC_ATL_INT | HC_SOT_INT)) 1548 if (qtd->urb == urb)
1565 do_atl_int(hcd); 1549 qtd->status = QTD_RETIRE;
1550 }
1566 1551
1567 if (imask & HC_INTL_INT) 1552 urb->status = status;
1568 do_intl_int(hcd); 1553 schedule_ptds(hcd);
1569 1554
1570 irqret = IRQ_HANDLED; 1555out:
1571leave: 1556 spin_unlock_irqrestore(&priv->lock, spinflags);
1572 spin_unlock(&priv->lock); 1557
1573 return irqret; 1558 return retval;
1574} 1559}
1575 1560
1576static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf) 1561static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
@@ -1661,7 +1646,7 @@ static int check_reset_complete(struct usb_hcd *hcd, int index,
1661 /* if reset finished and it's still not enabled -- handoff */ 1646 /* if reset finished and it's still not enabled -- handoff */
1662 if (!(port_status & PORT_PE)) { 1647 if (!(port_status & PORT_PE)) {
1663 1648
1664 dev_err(hcd->self.controller, 1649 dev_info(hcd->self.controller,
1665 "port %d full speed --> companion\n", 1650 "port %d full speed --> companion\n",
1666 index + 1); 1651 index + 1);
1667 1652
@@ -1670,7 +1655,7 @@ static int check_reset_complete(struct usb_hcd *hcd, int index,
1670 reg_write32(hcd->regs, HC_PORTSC1, port_status); 1655 reg_write32(hcd->regs, HC_PORTSC1, port_status);
1671 1656
1672 } else 1657 } else
1673 dev_err(hcd->self.controller, "port %d high speed\n", 1658 dev_info(hcd->self.controller, "port %d high speed\n",
1674 index + 1); 1659 index + 1);
1675 1660
1676 return port_status; 1661 return port_status;
@@ -1948,43 +1933,32 @@ static void isp1760_endpoint_disable(struct usb_hcd *hcd,
1948 struct isp1760_hcd *priv = hcd_to_priv(hcd); 1933 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1949 struct isp1760_qh *qh; 1934 struct isp1760_qh *qh;
1950 struct isp1760_qtd *qtd; 1935 struct isp1760_qtd *qtd;
1951 unsigned long flags; 1936 unsigned long spinflags;
1937 int do_iter;
1952 1938
1953 spin_lock_irqsave(&priv->lock, flags); 1939 spin_lock_irqsave(&priv->lock, spinflags);
1954 qh = ep->hcpriv; 1940 qh = ep->hcpriv;
1955 if (!qh) 1941 if (!qh)
1956 goto out; 1942 goto out;
1957 1943
1958 ep->hcpriv = NULL; 1944 do_iter = !list_empty(&qh->qtd_list);
1959 do { 1945 while (do_iter) {
1960 /* more than entry might get removed */ 1946 do_iter = 0;
1961 if (list_empty(&qh->qtd_list)) 1947 list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
1962 break; 1948 if (qtd->urb->ep == ep) {
1963 1949 spin_unlock_irqrestore(&priv->lock, spinflags);
1964 qtd = list_first_entry(&qh->qtd_list, struct isp1760_qtd, 1950 isp1760_urb_dequeue(hcd, qtd->urb, -ECONNRESET);
1965 qtd_list); 1951 spin_lock_irqsave(&priv->lock, spinflags);
1966 1952 do_iter = 1;
1967 if (qtd->status & URB_ENQUEUED) { 1953 break; /* Restart iteration */
1968 spin_unlock_irqrestore(&priv->lock, flags); 1954 }
1969 isp1760_urb_dequeue(hcd, qtd->urb, -ECONNRESET);
1970 spin_lock_irqsave(&priv->lock, flags);
1971 } else {
1972 struct urb *urb;
1973
1974 urb = qtd->urb;
1975 clean_up_qtdlist(qtd, qh);
1976 urb->status = -ECONNRESET;
1977 isp1760_urb_done(hcd, urb);
1978 } 1955 }
1979 } while (1); 1956 }
1957 ep->hcpriv = NULL;
1958 /* Cannot free qh here since it will be parsed by schedule_ptds() */
1980 1959
1981 qh_destroy(qh);
1982 /* remove requests and leak them.
1983 * ATL are pretty fast done, INT could take a while...
1984 * The latter shoule be removed
1985 */
1986out: 1960out:
1987 spin_unlock_irqrestore(&priv->lock, flags); 1961 spin_unlock_irqrestore(&priv->lock, spinflags);
1988} 1962}
1989 1963
1990static int isp1760_get_frame(struct usb_hcd *hcd) 1964static int isp1760_get_frame(struct usb_hcd *hcd)
@@ -2048,6 +2022,13 @@ static const struct hc_driver isp1760_hc_driver = {
2048 2022
2049int __init init_kmem_once(void) 2023int __init init_kmem_once(void)
2050{ 2024{
2025 urb_listitem_cachep = kmem_cache_create("isp1760 urb_listitem",
2026 sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
2027 SLAB_MEM_SPREAD, NULL);
2028
2029 if (!urb_listitem_cachep)
2030 return -ENOMEM;
2031
2051 qtd_cachep = kmem_cache_create("isp1760_qtd", 2032 qtd_cachep = kmem_cache_create("isp1760_qtd",
2052 sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY | 2033 sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
2053 SLAB_MEM_SPREAD, NULL); 2034 SLAB_MEM_SPREAD, NULL);
@@ -2070,6 +2051,7 @@ void deinit_kmem_cache(void)
2070{ 2051{
2071 kmem_cache_destroy(qtd_cachep); 2052 kmem_cache_destroy(qtd_cachep);
2072 kmem_cache_destroy(qh_cachep); 2053 kmem_cache_destroy(qh_cachep);
2054 kmem_cache_destroy(urb_listitem_cachep);
2073} 2055}
2074 2056
2075struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len, 2057struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,