aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/isp1760-hcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/isp1760-hcd.c')
-rw-r--r--drivers/usb/host/isp1760-hcd.c1638
1 files changed, 773 insertions, 865 deletions
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 7b2e69aa2e98..c9e6e454c625 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -8,6 +8,8 @@
8 * 8 *
9 * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de> 9 * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
10 * 10 *
11 * (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
12 *
11 */ 13 */
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -26,14 +28,18 @@
26 28
27static struct kmem_cache *qtd_cachep; 29static struct kmem_cache *qtd_cachep;
28static struct kmem_cache *qh_cachep; 30static struct kmem_cache *qh_cachep;
31static struct kmem_cache *urb_listitem_cachep;
29 32
30struct isp1760_hcd { 33struct isp1760_hcd {
31 u32 hcs_params; 34 u32 hcs_params;
32 spinlock_t lock; 35 spinlock_t lock;
33 struct inter_packet_info atl_ints[32]; 36 struct slotinfo atl_slots[32];
34 struct inter_packet_info int_ints[32]; 37 int atl_done_map;
38 struct slotinfo int_slots[32];
39 int int_done_map;
35 struct memory_chunk memory_pool[BLOCKS]; 40 struct memory_chunk memory_pool[BLOCKS];
36 u32 atl_queued; 41 struct list_head controlqhs, bulkqhs, interruptqhs;
42 int active_ptds;
37 43
38 /* periodic schedule support */ 44 /* periodic schedule support */
39#define DEFAULT_I_TDPS 1024 45#define DEFAULT_I_TDPS 1024
@@ -85,18 +91,34 @@ struct isp1760_qtd {
85 struct list_head qtd_list; 91 struct list_head qtd_list;
86 struct urb *urb; 92 struct urb *urb;
87 size_t length; 93 size_t length;
88 94 size_t actual_length;
89 /* isp special*/ 95
96 /* QTD_ENQUEUED: waiting for transfer (inactive) */
97 /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
98 /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
99 interrupt handler may touch this qtd! */
100 /* QTD_XFER_COMPLETE: payload has been transferred successfully */
101 /* QTD_RETIRE: transfer error/abort qtd */
102#define QTD_ENQUEUED 0
103#define QTD_PAYLOAD_ALLOC 1
104#define QTD_XFER_STARTED 2
105#define QTD_XFER_COMPLETE 3
106#define QTD_RETIRE 4
90 u32 status; 107 u32 status;
91#define URB_ENQUEUED (1 << 1)
92}; 108};
93 109
110/* Queue head, one for each active endpoint */
94struct isp1760_qh { 111struct isp1760_qh {
95 /* first part defined by EHCI spec */ 112 struct list_head qh_list;
96 struct list_head qtd_list; 113 struct list_head qtd_list;
97
98 u32 toggle; 114 u32 toggle;
99 u32 ping; 115 u32 ping;
116 int slot;
117};
118
119struct urb_listitem {
120 struct list_head urb_list;
121 struct urb *urb;
100}; 122};
101 123
102/* 124/*
@@ -272,7 +294,7 @@ static void init_memory(struct isp1760_hcd *priv)
272 payload_addr += priv->memory_pool[curr + i].size; 294 payload_addr += priv->memory_pool[curr + i].size;
273 } 295 }
274 296
275 BUG_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE); 297 WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
276} 298}
277 299
278static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) 300static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
@@ -280,7 +302,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
280 struct isp1760_hcd *priv = hcd_to_priv(hcd); 302 struct isp1760_hcd *priv = hcd_to_priv(hcd);
281 int i; 303 int i;
282 304
283 BUG_ON(qtd->payload_addr); 305 WARN_ON(qtd->payload_addr);
284 306
285 if (!qtd->length) 307 if (!qtd->length)
286 return; 308 return;
@@ -293,19 +315,6 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
293 return; 315 return;
294 } 316 }
295 } 317 }
296
297 dev_err(hcd->self.controller,
298 "%s: Cannot allocate %zu bytes of memory\n"
299 "Current memory map:\n",
300 __func__, qtd->length);
301 for (i = 0; i < BLOCKS; i++) {
302 dev_err(hcd->self.controller, "Pool %2d size %4d status: %d\n",
303 i, priv->memory_pool[i].size,
304 priv->memory_pool[i].free);
305 }
306 /* XXX maybe -ENOMEM could be possible */
307 BUG();
308 return;
309} 318}
310 319
311static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) 320static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
@@ -318,7 +327,7 @@ static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
318 327
319 for (i = 0; i < BLOCKS; i++) { 328 for (i = 0; i < BLOCKS; i++) {
320 if (priv->memory_pool[i].start == qtd->payload_addr) { 329 if (priv->memory_pool[i].start == qtd->payload_addr) {
321 BUG_ON(priv->memory_pool[i].free); 330 WARN_ON(priv->memory_pool[i].free);
322 priv->memory_pool[i].free = 1; 331 priv->memory_pool[i].free = 1;
323 qtd->payload_addr = 0; 332 qtd->payload_addr = 0;
324 return; 333 return;
@@ -327,19 +336,8 @@ static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
327 336
328 dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n", 337 dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
329 __func__, qtd->payload_addr); 338 __func__, qtd->payload_addr);
330 BUG(); 339 WARN_ON(1);
331} 340 qtd->payload_addr = 0;
332
333static void isp1760_init_regs(struct usb_hcd *hcd)
334{
335 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
336 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
337 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
338 reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
339
340 reg_write32(hcd->regs, HC_ATL_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE);
341 reg_write32(hcd->regs, HC_INT_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE);
342 reg_write32(hcd->regs, HC_ISO_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE);
343} 341}
344 342
345static int handshake(struct usb_hcd *hcd, u32 reg, 343static int handshake(struct usb_hcd *hcd, u32 reg,
@@ -377,31 +375,27 @@ static int ehci_reset(struct usb_hcd *hcd)
377 return retval; 375 return retval;
378} 376}
379 377
380static void qh_destroy(struct isp1760_qh *qh) 378static struct isp1760_qh *qh_alloc(gfp_t flags)
381{
382 BUG_ON(!list_empty(&qh->qtd_list));
383 kmem_cache_free(qh_cachep, qh);
384}
385
386static struct isp1760_qh *isp1760_qh_alloc(gfp_t flags)
387{ 379{
388 struct isp1760_qh *qh; 380 struct isp1760_qh *qh;
389 381
390 qh = kmem_cache_zalloc(qh_cachep, flags); 382 qh = kmem_cache_zalloc(qh_cachep, flags);
391 if (!qh) 383 if (!qh)
392 return qh; 384 return NULL;
393 385
386 INIT_LIST_HEAD(&qh->qh_list);
394 INIT_LIST_HEAD(&qh->qtd_list); 387 INIT_LIST_HEAD(&qh->qtd_list);
388 qh->slot = -1;
389
395 return qh; 390 return qh;
396} 391}
397 392
398/* magic numbers that can affect system performance */ 393static void qh_free(struct isp1760_qh *qh)
399#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ 394{
400#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ 395 WARN_ON(!list_empty(&qh->qtd_list));
401#define EHCI_TUNE_RL_TT 0 396 WARN_ON(qh->slot > -1);
402#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ 397 kmem_cache_free(qh_cachep, qh);
403#define EHCI_TUNE_MULT_TT 1 398}
404#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
405 399
406/* one-time init, only for memory state */ 400/* one-time init, only for memory state */
407static int priv_init(struct usb_hcd *hcd) 401static int priv_init(struct usb_hcd *hcd)
@@ -411,6 +405,10 @@ static int priv_init(struct usb_hcd *hcd)
411 405
412 spin_lock_init(&priv->lock); 406 spin_lock_init(&priv->lock);
413 407
408 INIT_LIST_HEAD(&priv->interruptqhs);
409 INIT_LIST_HEAD(&priv->controlqhs);
410 INIT_LIST_HEAD(&priv->bulkqhs);
411
414 /* 412 /*
415 * hw default: 1K periodic list heads, one per frame. 413 * hw default: 1K periodic list heads, one per frame.
416 * periodic_size can shrink by USBCMD update if hcc_params allows. 414 * periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -468,7 +466,10 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
468 } 466 }
469 467
470 /* pre reset */ 468 /* pre reset */
471 isp1760_init_regs(hcd); 469 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
470 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
471 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
472 reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
472 473
473 /* reset */ 474 /* reset */
474 reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL); 475 reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
@@ -488,12 +489,15 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
488 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ? 489 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
489 "analog" : "digital"); 490 "analog" : "digital");
490 491
492 /* This is weird: at the first plug-in of a device there seems to be
493 one packet queued that never gets returned? */
494 priv->active_ptds = -1;
495
491 /* ATL reset */ 496 /* ATL reset */
492 reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET); 497 reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
493 mdelay(10); 498 mdelay(10);
494 reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode); 499 reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
495 500
496 reg_write32(hcd->regs, HC_INTERRUPT_REG, INTERRUPT_ENABLE_MASK);
497 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK); 501 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
498 502
499 /* 503 /*
@@ -516,14 +520,21 @@ static void isp1760_init_maps(struct usb_hcd *hcd)
516 reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000); 520 reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
517 reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000); 521 reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
518 reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001); 522 reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
523
524 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
525 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
526 reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
527
528 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
529 ATL_BUF_FILL | INT_BUF_FILL);
519} 530}
520 531
521static void isp1760_enable_interrupts(struct usb_hcd *hcd) 532static void isp1760_enable_interrupts(struct usb_hcd *hcd)
522{ 533{
523 reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0); 534 reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
524 reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0); 535 reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
525 reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0); 536 reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
526 reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0); 537 reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
527 reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0); 538 reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
528 reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff); 539 reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
529 /* step 23 passed */ 540 /* step 23 passed */
@@ -548,8 +559,7 @@ static int isp1760_run(struct usb_hcd *hcd)
548 command |= CMD_RUN; 559 command |= CMD_RUN;
549 reg_write32(hcd->regs, HC_USBCMD, command); 560 reg_write32(hcd->regs, HC_USBCMD, command);
550 561
551 retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 562 retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
552 250 * 1000);
553 if (retval) 563 if (retval)
554 return retval; 564 return retval;
555 565
@@ -598,12 +608,19 @@ static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
598 return (qtd->urb != urb); 608 return (qtd->urb != urb);
599} 609}
600 610
601static void transform_into_atl(struct isp1760_qh *qh, 611/* magic numbers that can affect system performance */
612#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
613#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
614#define EHCI_TUNE_RL_TT 0
615#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
616#define EHCI_TUNE_MULT_TT 1
617#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
618
619static void create_ptd_atl(struct isp1760_qh *qh,
602 struct isp1760_qtd *qtd, struct ptd *ptd) 620 struct isp1760_qtd *qtd, struct ptd *ptd)
603{ 621{
604 u32 maxpacket; 622 u32 maxpacket;
605 u32 multi; 623 u32 multi;
606 u32 pid_code;
607 u32 rl = RL_COUNTER; 624 u32 rl = RL_COUNTER;
608 u32 nak = NAK_COUNTER; 625 u32 nak = NAK_COUNTER;
609 626
@@ -616,67 +633,62 @@ static void transform_into_atl(struct isp1760_qh *qh,
616 maxpacket &= 0x7ff; 633 maxpacket &= 0x7ff;
617 634
618 /* DW0 */ 635 /* DW0 */
619 ptd->dw0 = PTD_VALID; 636 ptd->dw0 = DW0_VALID_BIT;
620 ptd->dw0 |= PTD_LENGTH(qtd->length); 637 ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
621 ptd->dw0 |= PTD_MAXPACKET(maxpacket); 638 ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
622 ptd->dw0 |= PTD_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe)); 639 ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
623 640
624 /* DW1 */ 641 /* DW1 */
625 ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1; 642 ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
626 ptd->dw1 |= PTD_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe)); 643 ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
627 644 ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
628 pid_code = qtd->packet_type;
629 ptd->dw1 |= PTD_PID_TOKEN(pid_code);
630 645
631 if (usb_pipebulk(qtd->urb->pipe)) 646 if (usb_pipebulk(qtd->urb->pipe))
632 ptd->dw1 |= PTD_TRANS_BULK; 647 ptd->dw1 |= DW1_TRANS_BULK;
633 else if (usb_pipeint(qtd->urb->pipe)) 648 else if (usb_pipeint(qtd->urb->pipe))
634 ptd->dw1 |= PTD_TRANS_INT; 649 ptd->dw1 |= DW1_TRANS_INT;
635 650
636 if (qtd->urb->dev->speed != USB_SPEED_HIGH) { 651 if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
637 /* split transaction */ 652 /* split transaction */
638 653
639 ptd->dw1 |= PTD_TRANS_SPLIT; 654 ptd->dw1 |= DW1_TRANS_SPLIT;
640 if (qtd->urb->dev->speed == USB_SPEED_LOW) 655 if (qtd->urb->dev->speed == USB_SPEED_LOW)
641 ptd->dw1 |= PTD_SE_USB_LOSPEED; 656 ptd->dw1 |= DW1_SE_USB_LOSPEED;
642 657
643 ptd->dw1 |= PTD_PORT_NUM(qtd->urb->dev->ttport); 658 ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
644 ptd->dw1 |= PTD_HUB_NUM(qtd->urb->dev->tt->hub->devnum); 659 ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
645 660
646 /* SE bit for Split INT transfers */ 661 /* SE bit for Split INT transfers */
647 if (usb_pipeint(qtd->urb->pipe) && 662 if (usb_pipeint(qtd->urb->pipe) &&
648 (qtd->urb->dev->speed == USB_SPEED_LOW)) 663 (qtd->urb->dev->speed == USB_SPEED_LOW))
649 ptd->dw1 |= 2 << 16; 664 ptd->dw1 |= 2 << 16;
650 665
651 ptd->dw3 = 0;
652 rl = 0; 666 rl = 0;
653 nak = 0; 667 nak = 0;
654 } else { 668 } else {
655 ptd->dw0 |= PTD_MULTI(multi); 669 ptd->dw0 |= TO_DW0_MULTI(multi);
656 if (usb_pipecontrol(qtd->urb->pipe) || 670 if (usb_pipecontrol(qtd->urb->pipe) ||
657 usb_pipebulk(qtd->urb->pipe)) 671 usb_pipebulk(qtd->urb->pipe))
658 ptd->dw3 = qh->ping; 672 ptd->dw3 |= TO_DW3_PING(qh->ping);
659 else
660 ptd->dw3 = 0;
661 } 673 }
662 /* DW2 */ 674 /* DW2 */
663 ptd->dw2 = 0; 675 ptd->dw2 = 0;
664 ptd->dw2 |= PTD_DATA_START_ADDR(base_to_chip(qtd->payload_addr)); 676 ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
665 ptd->dw2 |= PTD_RL_CNT(rl); 677 ptd->dw2 |= TO_DW2_RL(rl);
666 ptd->dw3 |= PTD_NAC_CNT(nak);
667 678
668 /* DW3 */ 679 /* DW3 */
669 ptd->dw3 |= qh->toggle; 680 ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
681 ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
670 if (usb_pipecontrol(qtd->urb->pipe)) { 682 if (usb_pipecontrol(qtd->urb->pipe)) {
671 if (qtd->data_buffer == qtd->urb->setup_packet) 683 if (qtd->data_buffer == qtd->urb->setup_packet)
672 ptd->dw3 &= ~PTD_DATA_TOGGLE(1); 684 ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
673 else if (last_qtd_of_urb(qtd, qh)) 685 else if (last_qtd_of_urb(qtd, qh))
674 ptd->dw3 |= PTD_DATA_TOGGLE(1); 686 ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
675 } 687 }
676 688
677 ptd->dw3 |= PTD_ACTIVE; 689 ptd->dw3 |= DW3_ACTIVE_BIT;
678 /* Cerr */ 690 /* Cerr */
679 ptd->dw3 |= PTD_CERR(ERR_COUNTER); 691 ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
680} 692}
681 693
682static void transform_add_int(struct isp1760_qh *qh, 694static void transform_add_int(struct isp1760_qh *qh,
@@ -731,197 +743,13 @@ static void transform_add_int(struct isp1760_qh *qh,
731 ptd->dw4 = usof; 743 ptd->dw4 = usof;
732} 744}
733 745
734static void transform_into_int(struct isp1760_qh *qh, 746static void create_ptd_int(struct isp1760_qh *qh,
735 struct isp1760_qtd *qtd, struct ptd *ptd) 747 struct isp1760_qtd *qtd, struct ptd *ptd)
736{ 748{
737 transform_into_atl(qh, qtd, ptd); 749 create_ptd_atl(qh, qtd, ptd);
738 transform_add_int(qh, qtd, ptd); 750 transform_add_int(qh, qtd, ptd);
739} 751}
740 752
741static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len,
742 u32 token)
743{
744 int count;
745
746 qtd->data_buffer = databuffer;
747 qtd->packet_type = GET_QTD_TOKEN_TYPE(token);
748
749 if (len > MAX_PAYLOAD_SIZE)
750 count = MAX_PAYLOAD_SIZE;
751 else
752 count = len;
753
754 qtd->length = count;
755 return count;
756}
757
758static int check_error(struct usb_hcd *hcd, struct ptd *ptd)
759{
760 int error = 0;
761
762 if (ptd->dw3 & DW3_HALT_BIT) {
763 error = -EPIPE;
764
765 if (ptd->dw3 & DW3_ERROR_BIT)
766 pr_err("error bit is set in DW3\n");
767 }
768
769 if (ptd->dw3 & DW3_QTD_ACTIVE) {
770 dev_err(hcd->self.controller, "Transfer active bit is set DW3\n"
771 "nak counter: %d, rl: %d\n",
772 (ptd->dw3 >> 19) & 0xf, (ptd->dw2 >> 25) & 0xf);
773 }
774
775 return error;
776}
777
778static void check_int_err_status(struct usb_hcd *hcd, u32 dw4)
779{
780 u32 i;
781
782 dw4 >>= 8;
783
784 for (i = 0; i < 8; i++) {
785 switch (dw4 & 0x7) {
786 case INT_UNDERRUN:
787 dev_err(hcd->self.controller, "Underrun (%d)\n", i);
788 break;
789
790 case INT_EXACT:
791 dev_err(hcd->self.controller,
792 "Transaction error (%d)\n", i);
793 break;
794
795 case INT_BABBLE:
796 dev_err(hcd->self.controller, "Babble error (%d)\n", i);
797 break;
798 }
799 dw4 >>= 3;
800 }
801}
802
803static void enqueue_one_qtd(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
804{
805 if (qtd->length && (qtd->length <= MAX_PAYLOAD_SIZE)) {
806 switch (qtd->packet_type) {
807 case IN_PID:
808 break;
809 case OUT_PID:
810 case SETUP_PID:
811 mem_writes8(hcd->regs, qtd->payload_addr,
812 qtd->data_buffer, qtd->length);
813 }
814 }
815}
816
817static void enqueue_one_atl_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
818 u32 slot, struct isp1760_qtd *qtd)
819{
820 struct isp1760_hcd *priv = hcd_to_priv(hcd);
821 struct ptd ptd;
822
823 alloc_mem(hcd, qtd);
824 transform_into_atl(qh, qtd, &ptd);
825 ptd_write(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
826 enqueue_one_qtd(hcd, qtd);
827
828 priv->atl_ints[slot].qh = qh;
829 priv->atl_ints[slot].qtd = qtd;
830 qtd->status |= URB_ENQUEUED;
831 qtd->status |= slot << 16;
832}
833
834static void enqueue_one_int_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
835 u32 slot, struct isp1760_qtd *qtd)
836{
837 struct isp1760_hcd *priv = hcd_to_priv(hcd);
838 struct ptd ptd;
839
840 alloc_mem(hcd, qtd);
841 transform_into_int(qh, qtd, &ptd);
842 ptd_write(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
843 enqueue_one_qtd(hcd, qtd);
844
845 priv->int_ints[slot].qh = qh;
846 priv->int_ints[slot].qtd = qtd;
847 qtd->status |= URB_ENQUEUED;
848 qtd->status |= slot << 16;
849}
850
851static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
852 struct isp1760_qtd *qtd)
853{
854 struct isp1760_hcd *priv = hcd_to_priv(hcd);
855 u32 skip_map, or_map;
856 u32 slot;
857 u32 buffstatus;
858
859 /*
860 * When this function is called from the interrupt handler to enqueue
861 * a follow-up packet, the SKIP register gets written and read back
862 * almost immediately. With ISP1761, this register requires a delay of
863 * 195ns between a write and subsequent read (see section 15.1.1.3).
864 */
865 mmiowb();
866 ndelay(195);
867 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
868
869 BUG_ON(!skip_map);
870 slot = __ffs(skip_map);
871
872 enqueue_one_atl_qtd(hcd, qh, slot, qtd);
873
874 or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG);
875 or_map |= (1 << slot);
876 reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map);
877
878 skip_map &= ~(1 << slot);
879 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
880
881 priv->atl_queued++;
882 if (priv->atl_queued == 2)
883 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
884 INTERRUPT_ENABLE_SOT_MASK);
885
886 buffstatus = reg_read32(hcd->regs, HC_BUFFER_STATUS_REG);
887 buffstatus |= ATL_BUFFER;
888 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, buffstatus);
889}
890
891static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
892 struct isp1760_qtd *qtd)
893{
894 u32 skip_map, or_map;
895 u32 slot;
896 u32 buffstatus;
897
898 /*
899 * When this function is called from the interrupt handler to enqueue
900 * a follow-up packet, the SKIP register gets written and read back
901 * almost immediately. With ISP1761, this register requires a delay of
902 * 195ns between a write and subsequent read (see section 15.1.1.3).
903 */
904 mmiowb();
905 ndelay(195);
906 skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
907
908 BUG_ON(!skip_map);
909 slot = __ffs(skip_map);
910
911 enqueue_one_int_qtd(hcd, qh, slot, qtd);
912
913 or_map = reg_read32(hcd->regs, HC_INT_IRQ_MASK_OR_REG);
914 or_map |= (1 << slot);
915 reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, or_map);
916
917 skip_map &= ~(1 << slot);
918 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
919
920 buffstatus = reg_read32(hcd->regs, HC_BUFFER_STATUS_REG);
921 buffstatus |= INT_BUFFER;
922 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, buffstatus);
923}
924
925static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb) 753static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
926__releases(priv->lock) 754__releases(priv->lock)
927__acquires(priv->lock) 755__acquires(priv->lock)
@@ -948,557 +776,654 @@ __acquires(priv->lock)
948 spin_lock(&priv->lock); 776 spin_lock(&priv->lock);
949} 777}
950 778
951static void isp1760_qtd_free(struct isp1760_qtd *qtd) 779static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
780 u8 packet_type)
952{ 781{
953 BUG_ON(qtd->payload_addr); 782 struct isp1760_qtd *qtd;
954 kmem_cache_free(qtd_cachep, qtd); 783
784 qtd = kmem_cache_zalloc(qtd_cachep, flags);
785 if (!qtd)
786 return NULL;
787
788 INIT_LIST_HEAD(&qtd->qtd_list);
789 qtd->urb = urb;
790 qtd->packet_type = packet_type;
791 qtd->status = QTD_ENQUEUED;
792 qtd->actual_length = 0;
793
794 return qtd;
955} 795}
956 796
957static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd, 797static void qtd_free(struct isp1760_qtd *qtd)
958 struct isp1760_qh *qh)
959{ 798{
960 struct isp1760_qtd *tmp_qtd; 799 WARN_ON(qtd->payload_addr);
961 800 kmem_cache_free(qtd_cachep, qtd);
962 if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
963 tmp_qtd = NULL;
964 else
965 tmp_qtd = list_entry(qtd->qtd_list.next, struct isp1760_qtd,
966 qtd_list);
967 list_del(&qtd->qtd_list);
968 isp1760_qtd_free(qtd);
969 return tmp_qtd;
970} 801}
971 802
972/* 803static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
973 * Remove this QTD from the QH list and free its memory. If this QTD 804 struct slotinfo *slots, struct isp1760_qtd *qtd,
974 * isn't the last one than remove also his successor(s). 805 struct isp1760_qh *qh, struct ptd *ptd)
975 * Returns the QTD which is part of an new URB and should be enqueued.
976 */
977static struct isp1760_qtd *clean_up_qtdlist(struct isp1760_qtd *qtd,
978 struct isp1760_qh *qh)
979{ 806{
980 struct urb *urb; 807 struct isp1760_hcd *priv = hcd_to_priv(hcd);
808 int skip_map;
809
810 WARN_ON((slot < 0) || (slot > 31));
811 WARN_ON(qtd->length && !qtd->payload_addr);
812 WARN_ON(slots[slot].qtd);
813 WARN_ON(slots[slot].qh);
814 WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
815
816 slots[slot].qtd = qtd;
817 slots[slot].qh = qh;
818 qh->slot = slot;
819 qtd->status = QTD_XFER_STARTED; /* Set this before writing ptd, since
820 interrupt routine may preempt and expects this value. */
821 ptd_write(hcd->regs, ptd_offset, slot, ptd);
822 priv->active_ptds++;
823
824 /* Make sure done map has not triggered from some unlinked transfer */
825 if (ptd_offset == ATL_PTD_OFFSET) {
826 priv->atl_done_map |= reg_read32(hcd->regs,
827 HC_ATL_PTD_DONEMAP_REG);
828 priv->atl_done_map &= ~(1 << qh->slot);
981 829
982 urb = qtd->urb; 830 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
983 do { 831 skip_map &= ~(1 << qh->slot);
984 qtd = clean_this_qtd(qtd, qh); 832 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
985 } while (qtd && (qtd->urb == urb)); 833 } else {
834 priv->int_done_map |= reg_read32(hcd->regs,
835 HC_INT_PTD_DONEMAP_REG);
836 priv->int_done_map &= ~(1 << qh->slot);
986 837
987 return qtd; 838 skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
839 skip_map &= ~(1 << qh->slot);
840 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
841 }
988} 842}
989 843
990static void do_atl_int(struct usb_hcd *hcd) 844static int is_short_bulk(struct isp1760_qtd *qtd)
991{ 845{
992 struct isp1760_hcd *priv = hcd_to_priv(hcd); 846 return (usb_pipebulk(qtd->urb->pipe) &&
993 u32 done_map, skip_map; 847 (qtd->actual_length < qtd->length));
994 struct ptd ptd; 848}
995 struct urb *urb;
996 u32 slot;
997 u32 length;
998 u32 or_map;
999 u32 status = -EINVAL;
1000 int error;
1001 struct isp1760_qtd *qtd;
1002 struct isp1760_qh *qh;
1003 u32 rl;
1004 u32 nakcount;
1005 849
1006 done_map = reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG); 850static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
1007 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); 851 struct list_head *urb_list)
852{
853 int last_qtd;
854 struct isp1760_qtd *qtd, *qtd_next;
855 struct urb_listitem *urb_listitem;
1008 856
1009 or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG); 857 list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
1010 or_map &= ~done_map; 858 if (qtd->status < QTD_XFER_COMPLETE)
1011 reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map); 859 break;
1012 860
1013 while (done_map) { 861 if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
1014 status = 0; 862 last_qtd = 1;
1015 priv->atl_queued--; 863 else
864 last_qtd = qtd->urb != qtd_next->urb;
865
866 if ((!last_qtd) && (qtd->status == QTD_RETIRE))
867 qtd_next->status = QTD_RETIRE;
868
869 if (qtd->status == QTD_XFER_COMPLETE) {
870 if (qtd->actual_length) {
871 switch (qtd->packet_type) {
872 case IN_PID:
873 mem_reads8(hcd->regs, qtd->payload_addr,
874 qtd->data_buffer,
875 qtd->actual_length);
876 /* Fall through (?) */
877 case OUT_PID:
878 qtd->urb->actual_length +=
879 qtd->actual_length;
880 /* Fall through ... */
881 case SETUP_PID:
882 break;
883 }
884 }
1016 885
1017 slot = __ffs(done_map); 886 if (is_short_bulk(qtd)) {
1018 done_map &= ~(1 << slot); 887 if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
1019 skip_map |= (1 << slot); 888 qtd->urb->status = -EREMOTEIO;
889 if (!last_qtd)
890 qtd_next->status = QTD_RETIRE;
891 }
892 }
1020 893
1021 qtd = priv->atl_ints[slot].qtd; 894 if (qtd->payload_addr)
1022 qh = priv->atl_ints[slot].qh; 895 free_mem(hcd, qtd);
1023 896
1024 if (!qh) { 897 if (last_qtd) {
1025 dev_err(hcd->self.controller, "qh is 0\n"); 898 if ((qtd->status == QTD_RETIRE) &&
1026 continue; 899 (qtd->urb->status == -EINPROGRESS))
900 qtd->urb->status = -EPIPE;
901 /* Defer calling of urb_done() since it releases lock */
902 urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
903 GFP_ATOMIC);
904 if (unlikely(!urb_listitem))
905 break;
906 urb_listitem->urb = qtd->urb;
907 list_add_tail(&urb_listitem->urb_list, urb_list);
1027 } 908 }
1028 ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
1029 909
1030 rl = (ptd.dw2 >> 25) & 0x0f; 910 list_del(&qtd->qtd_list);
1031 nakcount = (ptd.dw3 >> 19) & 0xf; 911 qtd_free(qtd);
1032 912 }
1033 /* Transfer Error, *but* active and no HALT -> reload */ 913}
1034 if ((ptd.dw3 & DW3_ERROR_BIT) && (ptd.dw3 & DW3_QTD_ACTIVE) &&
1035 !(ptd.dw3 & DW3_HALT_BIT)) {
1036
1037 /* according to ppriv code, we have to
1038 * reload this one if trasfered bytes != requested bytes
1039 * else act like everything went smooth..
1040 * XXX This just doesn't feel right and hasn't
1041 * triggered so far.
1042 */
1043 914
1044 length = PTD_XFERRED_LENGTH(ptd.dw3); 915#define ENQUEUE_DEPTH 2
1045 dev_err(hcd->self.controller, 916static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
1046 "Should reload now... transferred %d " 917{
1047 "of %zu\n", length, qtd->length); 918 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1048 BUG(); 919 int ptd_offset;
1049 } 920 struct slotinfo *slots;
921 int curr_slot, free_slot;
922 int n;
923 struct ptd ptd;
924 struct isp1760_qtd *qtd;
1050 925
1051 if (!nakcount && (ptd.dw3 & DW3_QTD_ACTIVE)) { 926 if (unlikely(list_empty(&qh->qtd_list))) {
1052 u32 buffstatus; 927 WARN_ON(1);
928 return;
929 }
1053 930
1054 /* 931 if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
1055 * NAKs are handled in HW by the chip. Usually if the 932 qtd_list)->urb->pipe)) {
1056 * device is not able to send data fast enough. 933 ptd_offset = INT_PTD_OFFSET;
1057 * This happens mostly on slower hardware. 934 slots = priv->int_slots;
1058 */ 935 } else {
936 ptd_offset = ATL_PTD_OFFSET;
937 slots = priv->atl_slots;
938 }
1059 939
1060 /* RL counter = ERR counter */ 940 free_slot = -1;
1061 ptd.dw3 &= ~(0xf << 19); 941 for (curr_slot = 0; curr_slot < 32; curr_slot++) {
1062 ptd.dw3 |= rl << 19; 942 if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
1063 ptd.dw3 &= ~(3 << (55 - 32)); 943 free_slot = curr_slot;
1064 ptd.dw3 |= ERR_COUNTER << (55 - 32); 944 if (slots[curr_slot].qh == qh)
1065 945 break;
1066 /* 946 }
1067 * It is not needed to write skip map back because it
1068 * is unchanged. Just make sure that this entry is
1069 * unskipped once it gets written to the HW.
1070 */
1071 skip_map &= ~(1 << slot);
1072 or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG);
1073 or_map |= 1 << slot;
1074 reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map);
1075 947
1076 ptd.dw0 |= PTD_VALID; 948 n = 0;
1077 ptd_write(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); 949 list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
950 if (qtd->status == QTD_ENQUEUED) {
951 WARN_ON(qtd->payload_addr);
952 alloc_mem(hcd, qtd);
953 if ((qtd->length) && (!qtd->payload_addr))
954 break;
1078 955
1079 priv->atl_queued++; 956 if ((qtd->length) &&
1080 if (priv->atl_queued == 2) 957 ((qtd->packet_type == SETUP_PID) ||
1081 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, 958 (qtd->packet_type == OUT_PID))) {
1082 INTERRUPT_ENABLE_SOT_MASK); 959 mem_writes8(hcd->regs, qtd->payload_addr,
960 qtd->data_buffer, qtd->length);
961 }
1083 962
1084 buffstatus = reg_read32(hcd->regs, 963 qtd->status = QTD_PAYLOAD_ALLOC;
1085 HC_BUFFER_STATUS_REG);
1086 buffstatus |= ATL_BUFFER;
1087 reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
1088 buffstatus);
1089 continue;
1090 } 964 }
1091 965
1092 error = check_error(hcd, &ptd); 966 if (qtd->status == QTD_PAYLOAD_ALLOC) {
1093 if (error) { 967/*
1094 status = error; 968 if ((curr_slot > 31) && (free_slot == -1))
1095 priv->atl_ints[slot].qh->toggle = 0; 969 dev_dbg(hcd->self.controller, "%s: No slot "
1096 priv->atl_ints[slot].qh->ping = 0; 970 "available for transfer\n", __func__);
1097 qtd->urb->status = -EPIPE; 971*/
1098 972 /* Start xfer for this endpoint if not already done */
1099#if 0 973 if ((curr_slot > 31) && (free_slot > -1)) {
1100 printk(KERN_ERR "Error in %s().\n", __func__); 974 if (usb_pipeint(qtd->urb->pipe))
1101 printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x " 975 create_ptd_int(qh, qtd, &ptd);
1102 "dw3: %08x dw4: %08x dw5: %08x dw6: " 976 else
1103 "%08x dw7: %08x\n", 977 create_ptd_atl(qh, qtd, &ptd);
1104 ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3, 978
1105 ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7); 979 start_bus_transfer(hcd, ptd_offset, free_slot,
1106#endif 980 slots, qtd, qh, &ptd);
1107 } else { 981 curr_slot = free_slot;
1108 priv->atl_ints[slot].qh->toggle = ptd.dw3 & (1 << 25); 982 }
1109 priv->atl_ints[slot].qh->ping = ptd.dw3 & (1 << 26);
1110 }
1111 983
1112 length = PTD_XFERRED_LENGTH(ptd.dw3); 984 n++;
1113 if (length) { 985 if (n >= ENQUEUE_DEPTH)
1114 switch (DW1_GET_PID(ptd.dw1)) { 986 break;
1115 case IN_PID: 987 }
1116 mem_reads8(hcd->regs, qtd->payload_addr, 988 }
1117 qtd->data_buffer, length); 989}
1118 990
1119 case OUT_PID: 991void schedule_ptds(struct usb_hcd *hcd)
992{
993 struct isp1760_hcd *priv;
994 struct isp1760_qh *qh, *qh_next;
995 struct list_head *ep_queue;
996 struct usb_host_endpoint *ep;
997 LIST_HEAD(urb_list);
998 struct urb_listitem *urb_listitem, *urb_listitem_next;
999
1000 if (!hcd) {
1001 WARN_ON(1);
1002 return;
1003 }
1120 1004
1121 qtd->urb->actual_length += length; 1005 priv = hcd_to_priv(hcd);
1122 1006
1123 case SETUP_PID: 1007 /*
1124 break; 1008 * check finished/retired xfers, transfer payloads, call urb_done()
1009 */
1010 ep_queue = &priv->interruptqhs;
1011 while (ep_queue) {
1012 list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
1013 ep = list_entry(qh->qtd_list.next, struct isp1760_qtd,
1014 qtd_list)->urb->ep;
1015 collect_qtds(hcd, qh, &urb_list);
1016 if (list_empty(&qh->qtd_list)) {
1017 list_del(&qh->qh_list);
1018 if (ep->hcpriv == NULL) {
1019 /* Endpoint has been disabled, so we
1020 can free the associated queue head. */
1021 qh_free(qh);
1022 }
1125 } 1023 }
1126 } 1024 }
1127 1025
1128 priv->atl_ints[slot].qtd = NULL; 1026 if (ep_queue == &priv->interruptqhs)
1129 priv->atl_ints[slot].qh = NULL; 1027 ep_queue = &priv->controlqhs;
1130 1028 else if (ep_queue == &priv->controlqhs)
1131 free_mem(hcd, qtd); 1029 ep_queue = &priv->bulkqhs;
1030 else
1031 ep_queue = NULL;
1032 }
1132 1033
1133 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map); 1034 list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
1035 urb_list) {
1036 isp1760_urb_done(hcd, urb_listitem->urb);
1037 kmem_cache_free(urb_listitem_cachep, urb_listitem);
1038 }
1134 1039
1135 if (qtd->urb->status == -EPIPE) { 1040 /*
1136 /* HALT was received */ 1041 * Schedule packets for transfer.
1042 *
1043 * According to USB2.0 specification:
1044 *
1045 * 1st prio: interrupt xfers, up to 80 % of bandwidth
1046 * 2nd prio: control xfers
1047 * 3rd prio: bulk xfers
1048 *
1049 * ... but let's use a simpler scheme here (mostly because ISP1761 doc
1050 * is very unclear on how to prioritize traffic):
1051 *
1052 * 1) Enqueue any queued control transfers, as long as payload chip mem
1053 * and PTD ATL slots are available.
1054 * 2) Enqueue any queued INT transfers, as long as payload chip mem
1055 * and PTD INT slots are available.
1056 * 3) Enqueue any queued bulk transfers, as long as payload chip mem
1057 * and PTD ATL slots are available.
1058 *
1059 * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
1060 * conservation of chip mem and performance.
1061 *
1062 * I'm sure this scheme could be improved upon!
1063 */
1064 ep_queue = &priv->controlqhs;
1065 while (ep_queue) {
1066 list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
1067 enqueue_qtds(hcd, qh);
1068
1069 if (ep_queue == &priv->controlqhs)
1070 ep_queue = &priv->interruptqhs;
1071 else if (ep_queue == &priv->interruptqhs)
1072 ep_queue = &priv->bulkqhs;
1073 else
1074 ep_queue = NULL;
1075 }
1076}
1137 1077
1138 urb = qtd->urb; 1078#define PTD_STATE_QTD_DONE 1
1139 qtd = clean_up_qtdlist(qtd, qh); 1079#define PTD_STATE_QTD_RELOAD 2
1140 isp1760_urb_done(hcd, urb); 1080#define PTD_STATE_URB_RETIRE 3
1141 1081
1142 } else if (usb_pipebulk(qtd->urb->pipe) && 1082static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
1143 (length < qtd->length)) { 1083 struct urb *urb)
1144 /* short BULK received */ 1084{
1085 __dw dw4;
1086 int i;
1145 1087
1146 if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK) { 1088 dw4 = ptd->dw4;
1147 qtd->urb->status = -EREMOTEIO; 1089 dw4 >>= 8;
1148 dev_dbg(hcd->self.controller,
1149 "short bulk, %d instead %zu "
1150 "with URB_SHORT_NOT_OK flag.\n",
1151 length, qtd->length);
1152 }
1153 1090
1154 if (qtd->urb->status == -EINPROGRESS) 1091 /* FIXME: ISP1761 datasheet does not say what to do with these. Do we
1155 qtd->urb->status = 0; 1092 need to handle these errors? Is it done in hardware? */
1156 1093
1157 urb = qtd->urb; 1094 if (ptd->dw3 & DW3_HALT_BIT) {
1158 qtd = clean_up_qtdlist(qtd, qh);
1159 isp1760_urb_done(hcd, urb);
1160 1095
1161 } else if (last_qtd_of_urb(qtd, qh)) { 1096 urb->status = -EPROTO; /* Default unknown error */
1162 /* that was the last qtd of that URB */
1163 1097
1164 if (qtd->urb->status == -EINPROGRESS) 1098 for (i = 0; i < 8; i++) {
1165 qtd->urb->status = 0; 1099 switch (dw4 & 0x7) {
1100 case INT_UNDERRUN:
1101 dev_dbg(hcd->self.controller, "%s: underrun "
1102 "during uFrame %d\n",
1103 __func__, i);
1104 urb->status = -ECOMM; /* Could not write data */
1105 break;
1106 case INT_EXACT:
1107 dev_dbg(hcd->self.controller, "%s: transaction "
1108 "error during uFrame %d\n",
1109 __func__, i);
1110 urb->status = -EPROTO; /* timeout, bad CRC, PID
1111 error etc. */
1112 break;
1113 case INT_BABBLE:
1114 dev_dbg(hcd->self.controller, "%s: babble "
1115 "error during uFrame %d\n",
1116 __func__, i);
1117 urb->status = -EOVERFLOW;
1118 break;
1119 }
1120 dw4 >>= 3;
1121 }
1166 1122
1167 urb = qtd->urb; 1123 return PTD_STATE_URB_RETIRE;
1168 qtd = clean_up_qtdlist(qtd, qh); 1124 }
1169 isp1760_urb_done(hcd, urb);
1170 1125
1171 } else { 1126 return PTD_STATE_QTD_DONE;
1172 /* next QTD of this URB */ 1127}
1173 1128
1174 qtd = clean_this_qtd(qtd, qh); 1129static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
1175 BUG_ON(!qtd); 1130 struct urb *urb)
1176 } 1131{
1132 WARN_ON(!ptd);
1133 if (ptd->dw3 & DW3_HALT_BIT) {
1134 if (ptd->dw3 & DW3_BABBLE_BIT)
1135 urb->status = -EOVERFLOW;
1136 else if (FROM_DW3_CERR(ptd->dw3))
1137 urb->status = -EPIPE; /* Stall */
1138 else if (ptd->dw3 & DW3_ERROR_BIT)
1139 urb->status = -EPROTO; /* XactErr */
1140 else
1141 urb->status = -EPROTO; /* Unknown */
1142/*
1143 dev_dbg(hcd->self.controller, "%s: ptd error:\n"
1144 " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
1145 " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
1146 __func__,
1147 ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
1148 ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
1149*/
1150 return PTD_STATE_URB_RETIRE;
1151 }
1177 1152
1178 if (qtd) 1153 if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
1179 enqueue_an_ATL_packet(hcd, qh, qtd); 1154 /* Transfer Error, *but* active and no HALT -> reload */
1155 dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
1156 return PTD_STATE_QTD_RELOAD;
1157 }
1180 1158
1181 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); 1159 if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
1160 /*
1161 * NAKs are handled in HW by the chip. Usually if the
1162 * device is not able to send data fast enough.
1163 * This happens mostly on slower hardware.
1164 */
1165 return PTD_STATE_QTD_RELOAD;
1182 } 1166 }
1183 if (priv->atl_queued <= 1) 1167
1184 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, 1168 return PTD_STATE_QTD_DONE;
1185 INTERRUPT_ENABLE_MASK);
1186} 1169}
1187 1170
1188static void do_intl_int(struct usb_hcd *hcd) 1171static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
1189{ 1172{
1190 struct isp1760_hcd *priv = hcd_to_priv(hcd); 1173 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1191 u32 done_map, skip_map; 1174 u32 imask;
1175 irqreturn_t irqret = IRQ_NONE;
1192 struct ptd ptd; 1176 struct ptd ptd;
1193 struct urb *urb;
1194 u32 length;
1195 u32 or_map;
1196 int error;
1197 u32 slot;
1198 struct isp1760_qtd *qtd;
1199 struct isp1760_qh *qh; 1177 struct isp1760_qh *qh;
1178 int slot;
1179 int state;
1180 struct slotinfo *slots;
1181 u32 ptd_offset;
1182 struct isp1760_qtd *qtd;
1183 int modified;
1184 static int last_active_ptds;
1185 int int_skip_map, atl_skip_map;
1200 1186
1201 done_map = reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG); 1187 spin_lock(&priv->lock);
1202 skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
1203
1204 or_map = reg_read32(hcd->regs, HC_INT_IRQ_MASK_OR_REG);
1205 or_map &= ~done_map;
1206 reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, or_map);
1207
1208 while (done_map) {
1209 slot = __ffs(done_map);
1210 done_map &= ~(1 << slot);
1211 skip_map |= (1 << slot);
1212
1213 qtd = priv->int_ints[slot].qtd;
1214 qh = priv->int_ints[slot].qh;
1215
1216 if (!qh) {
1217 dev_err(hcd->self.controller, "(INT) qh is 0\n");
1218 continue;
1219 }
1220 1188
1221 ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd); 1189 if (!(hcd->state & HC_STATE_RUNNING))
1222 check_int_err_status(hcd, ptd.dw4); 1190 goto leave;
1223
1224 error = check_error(hcd, &ptd);
1225 if (error) {
1226#if 0
1227 printk(KERN_ERR "Error in %s().\n", __func__);
1228 printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x "
1229 "dw3: %08x dw4: %08x dw5: %08x dw6: "
1230 "%08x dw7: %08x\n",
1231 ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
1232 ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
1233#endif
1234 qtd->urb->status = -EPIPE;
1235 priv->int_ints[slot].qh->toggle = 0;
1236 priv->int_ints[slot].qh->ping = 0;
1237 1191
1192 imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
1193 if (unlikely(!imask))
1194 goto leave;
1195 reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
1196
1197 int_skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
1198 atl_skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
1199 priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
1200 priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
1201 priv->int_done_map &= ~int_skip_map;
1202 priv->atl_done_map &= ~atl_skip_map;
1203
1204 modified = priv->int_done_map | priv->atl_done_map;
1205
1206 while (priv->int_done_map || priv->atl_done_map) {
1207 if (priv->int_done_map) {
1208 /* INT ptd */
1209 slot = __ffs(priv->int_done_map);
1210 priv->int_done_map &= ~(1 << slot);
1211 slots = priv->int_slots;
1212 /* This should not trigger, and could be removed if
1213 noone have any problems with it triggering: */
1214 if (!slots[slot].qh) {
1215 WARN_ON(1);
1216 continue;
1217 }
1218 ptd_offset = INT_PTD_OFFSET;
1219 ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
1220 state = check_int_transfer(hcd, &ptd,
1221 slots[slot].qtd->urb);
1238 } else { 1222 } else {
1239 priv->int_ints[slot].qh->toggle = ptd.dw3 & (1 << 25); 1223 /* ATL ptd */
1240 priv->int_ints[slot].qh->ping = ptd.dw3 & (1 << 26); 1224 slot = __ffs(priv->atl_done_map);
1241 } 1225 priv->atl_done_map &= ~(1 << slot);
1242 1226 slots = priv->atl_slots;
1243 if (qtd->urb->dev->speed != USB_SPEED_HIGH) 1227 /* This should not trigger, and could be removed if
1244 length = PTD_XFERRED_LENGTH_LO(ptd.dw3); 1228 noone have any problems with it triggering: */
1245 else 1229 if (!slots[slot].qh) {
1246 length = PTD_XFERRED_LENGTH(ptd.dw3); 1230 WARN_ON(1);
1247 1231 continue;
1248 if (length) {
1249 switch (DW1_GET_PID(ptd.dw1)) {
1250 case IN_PID:
1251 mem_reads8(hcd->regs, qtd->payload_addr,
1252 qtd->data_buffer, length);
1253 case OUT_PID:
1254
1255 qtd->urb->actual_length += length;
1256
1257 case SETUP_PID:
1258 break;
1259 } 1232 }
1233 ptd_offset = ATL_PTD_OFFSET;
1234 ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
1235 state = check_atl_transfer(hcd, &ptd,
1236 slots[slot].qtd->urb);
1260 } 1237 }
1261 1238
1262 priv->int_ints[slot].qtd = NULL; 1239 qtd = slots[slot].qtd;
1263 priv->int_ints[slot].qh = NULL; 1240 slots[slot].qtd = NULL;
1264 1241 qh = slots[slot].qh;
1265 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map); 1242 slots[slot].qh = NULL;
1266 free_mem(hcd, qtd); 1243 priv->active_ptds--;
1267 1244 qh->slot = -1;
1268 if (qtd->urb->status == -EPIPE) { 1245
1269 /* HALT received */ 1246 WARN_ON(qtd->status != QTD_XFER_STARTED);
1270 1247
1271 urb = qtd->urb; 1248 switch (state) {
1272 qtd = clean_up_qtdlist(qtd, qh); 1249 case PTD_STATE_QTD_DONE:
1273 isp1760_urb_done(hcd, urb); 1250 if ((usb_pipeint(qtd->urb->pipe)) &&
1274 1251 (qtd->urb->dev->speed != USB_SPEED_HIGH))
1275 } else if (last_qtd_of_urb(qtd, qh)) { 1252 qtd->actual_length =
1276 1253 FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
1277 if (qtd->urb->status == -EINPROGRESS) 1254 else
1278 qtd->urb->status = 0; 1255 qtd->actual_length =
1256 FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
1257
1258 qtd->status = QTD_XFER_COMPLETE;
1259 if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
1260 is_short_bulk(qtd))
1261 qtd = NULL;
1262 else
1263 qtd = list_entry(qtd->qtd_list.next,
1264 typeof(*qtd), qtd_list);
1265
1266 qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
1267 qh->ping = FROM_DW3_PING(ptd.dw3);
1268 break;
1279 1269
1280 urb = qtd->urb; 1270 case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
1281 qtd = clean_up_qtdlist(qtd, qh); 1271 qtd->status = QTD_PAYLOAD_ALLOC;
1282 isp1760_urb_done(hcd, urb); 1272 ptd.dw0 |= DW0_VALID_BIT;
1273 /* RL counter = ERR counter */
1274 ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
1275 ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
1276 ptd.dw3 &= ~TO_DW3_CERR(3);
1277 ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
1278 qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
1279 qh->ping = FROM_DW3_PING(ptd.dw3);
1280 break;
1283 1281
1284 } else { 1282 case PTD_STATE_URB_RETIRE:
1285 /* next QTD of this URB */ 1283 qtd->status = QTD_RETIRE;
1284 qtd = NULL;
1285 qh->toggle = 0;
1286 qh->ping = 0;
1287 break;
1286 1288
1287 qtd = clean_this_qtd(qtd, qh); 1289 default:
1288 BUG_ON(!qtd); 1290 WARN_ON(1);
1291 continue;
1289 } 1292 }
1290 1293
1291 if (qtd) 1294 if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
1292 enqueue_an_INT_packet(hcd, qh, qtd); 1295 if (slots == priv->int_slots) {
1296 if (state == PTD_STATE_QTD_RELOAD)
1297 dev_err(hcd->self.controller,
1298 "%s: PTD_STATE_QTD_RELOAD on "
1299 "interrupt packet\n", __func__);
1300 if (state != PTD_STATE_QTD_RELOAD)
1301 create_ptd_int(qh, qtd, &ptd);
1302 } else {
1303 if (state != PTD_STATE_QTD_RELOAD)
1304 create_ptd_atl(qh, qtd, &ptd);
1305 }
1293 1306
1294 skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG); 1307 start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
1308 qh, &ptd);
1309 }
1295 } 1310 }
1296}
1297 1311
1298static struct isp1760_qh *qh_make(struct usb_hcd *hcd, struct urb *urb, 1312 if (modified)
1299 gfp_t flags) 1313 schedule_ptds(hcd);
1300{
1301 struct isp1760_qh *qh;
1302 int is_input, type;
1303 1314
1304 qh = isp1760_qh_alloc(flags); 1315 /* ISP1760 Errata 2 explains that interrupts may be missed (or not
1305 if (!qh) 1316 happen?) if two USB devices are running simultaneously. Perhaps
1306 return qh; 1317 this happens when a PTD is finished during interrupt handling;
1307 1318 enable SOF interrupts if PTDs are still scheduled when exiting this
1308 /* 1319 interrupt handler, just to be safe. */
1309 * init endpoint/device data for this QH
1310 */
1311 is_input = usb_pipein(urb->pipe);
1312 type = usb_pipetype(urb->pipe);
1313 1320
1314 if (!usb_pipecontrol(urb->pipe)) 1321 if (priv->active_ptds != last_active_ptds) {
1315 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1322 if (priv->active_ptds > 0)
1316 1); 1323 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
1317 return qh; 1324 INTERRUPT_ENABLE_SOT_MASK);
1318} 1325 else
1319 1326 reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
1320/* 1327 INTERRUPT_ENABLE_MASK);
1321 * For control/bulk/interrupt, return QH with these TDs appended. 1328 last_active_ptds = priv->active_ptds;
1322 * Allocates and initializes the QH if necessary.
1323 * Returns null if it can't allocate a QH it needs to.
1324 * If the QH has TDs (urbs) already, that's great.
1325 */
1326static struct isp1760_qh *qh_append_tds(struct usb_hcd *hcd,
1327 struct urb *urb, struct list_head *qtd_list, int epnum,
1328 void **ptr)
1329{
1330 struct isp1760_qh *qh;
1331
1332 qh = (struct isp1760_qh *)*ptr;
1333 if (!qh) {
1334 /* can't sleep here, we have priv->lock... */
1335 qh = qh_make(hcd, urb, GFP_ATOMIC);
1336 if (!qh)
1337 return qh;
1338 *ptr = qh;
1339 } 1329 }
1340 1330
1341 list_splice(qtd_list, qh->qtd_list.prev); 1331 irqret = IRQ_HANDLED;
1332leave:
1333 spin_unlock(&priv->lock);
1342 1334
1343 return qh; 1335 return irqret;
1344} 1336}
1345 1337
1346static void qtd_list_free(struct urb *urb, struct list_head *qtd_list) 1338static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
1347{ 1339{
1348 struct list_head *entry, *temp; 1340 qtd->data_buffer = databuffer;
1349 1341
1350 list_for_each_safe(entry, temp, qtd_list) { 1342 if (len > MAX_PAYLOAD_SIZE)
1351 struct isp1760_qtd *qtd; 1343 len = MAX_PAYLOAD_SIZE;
1344 qtd->length = len;
1352 1345
1353 qtd = list_entry(entry, struct isp1760_qtd, qtd_list); 1346 return qtd->length;
1354 list_del(&qtd->qtd_list);
1355 isp1760_qtd_free(qtd);
1356 }
1357} 1347}
1358 1348
1359static int isp1760_prepare_enqueue(struct usb_hcd *hcd, struct urb *urb, 1349static void qtd_list_free(struct list_head *qtd_list)
1360 struct list_head *qtd_list, gfp_t mem_flags, packet_enqueue *p)
1361{ 1350{
1362 struct isp1760_hcd *priv = hcd_to_priv(hcd); 1351 struct isp1760_qtd *qtd, *qtd_next;
1363 struct isp1760_qtd *qtd;
1364 int epnum;
1365 unsigned long flags;
1366 struct isp1760_qh *qh = NULL;
1367 int rc;
1368 int qh_busy;
1369
1370 qtd = list_entry(qtd_list->next, struct isp1760_qtd, qtd_list);
1371 epnum = urb->ep->desc.bEndpointAddress;
1372
1373 spin_lock_irqsave(&priv->lock, flags);
1374 if (!HCD_HW_ACCESSIBLE(hcd)) {
1375 rc = -ESHUTDOWN;
1376 goto done;
1377 }
1378 rc = usb_hcd_link_urb_to_ep(hcd, urb);
1379 if (rc)
1380 goto done;
1381
1382 qh = urb->ep->hcpriv;
1383 if (qh)
1384 qh_busy = !list_empty(&qh->qtd_list);
1385 else
1386 qh_busy = 0;
1387 1352
1388 qh = qh_append_tds(hcd, urb, qtd_list, epnum, &urb->ep->hcpriv); 1353 list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) {
1389 if (!qh) { 1354 list_del(&qtd->qtd_list);
1390 usb_hcd_unlink_urb_from_ep(hcd, urb); 1355 qtd_free(qtd);
1391 rc = -ENOMEM;
1392 goto done;
1393 } 1356 }
1394
1395 if (!qh_busy)
1396 p(hcd, qh, qtd);
1397
1398done:
1399 spin_unlock_irqrestore(&priv->lock, flags);
1400 if (!qh)
1401 qtd_list_free(urb, qtd_list);
1402 return rc;
1403}
1404
1405static struct isp1760_qtd *isp1760_qtd_alloc(gfp_t flags)
1406{
1407 struct isp1760_qtd *qtd;
1408
1409 qtd = kmem_cache_zalloc(qtd_cachep, flags);
1410 if (qtd)
1411 INIT_LIST_HEAD(&qtd->qtd_list);
1412
1413 return qtd;
1414} 1357}
1415 1358
1416/* 1359/*
1417 * create a list of filled qtds for this URB; won't link into qh. 1360 * Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize.
1361 * Also calculate the PID type (SETUP/IN/OUT) for each packet.
1418 */ 1362 */
1419#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) 1363#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
1420static struct list_head *qh_urb_transaction(struct usb_hcd *hcd, 1364static void packetize_urb(struct usb_hcd *hcd,
1421 struct urb *urb, struct list_head *head, gfp_t flags) 1365 struct urb *urb, struct list_head *head, gfp_t flags)
1422{ 1366{
1423 struct isp1760_qtd *qtd; 1367 struct isp1760_qtd *qtd;
1424 void *buf; 1368 void *buf;
1425 int len, maxpacket; 1369 int len, maxpacketsize;
1426 int is_input; 1370 u8 packet_type;
1427 u32 token;
1428 1371
1429 /* 1372 /*
1430 * URBs map to sequences of QTDs: one logical transaction 1373 * URBs map to sequences of QTDs: one logical transaction
1431 */ 1374 */
1432 qtd = isp1760_qtd_alloc(flags);
1433 if (!qtd)
1434 return NULL;
1435 1375
1436 list_add_tail(&qtd->qtd_list, head); 1376 if (!urb->transfer_buffer && urb->transfer_buffer_length) {
1437 qtd->urb = urb; 1377 /* XXX This looks like usb storage / SCSI bug */
1438 urb->status = -EINPROGRESS; 1378 dev_err(hcd->self.controller,
1379 "buf is null, dma is %08lx len is %d\n",
1380 (long unsigned)urb->transfer_dma,
1381 urb->transfer_buffer_length);
1382 WARN_ON(1);
1383 }
1439 1384
1440 token = 0; 1385 if (usb_pipein(urb->pipe))
1441 /* for split transactions, SplitXState initialized to zero */ 1386 packet_type = IN_PID;
1387 else
1388 packet_type = OUT_PID;
1442 1389
1443 len = urb->transfer_buffer_length;
1444 is_input = usb_pipein(urb->pipe);
1445 if (usb_pipecontrol(urb->pipe)) { 1390 if (usb_pipecontrol(urb->pipe)) {
1446 /* SETUP pid */ 1391 qtd = qtd_alloc(flags, urb, SETUP_PID);
1447 qtd_fill(qtd, urb->setup_packet,
1448 sizeof(struct usb_ctrlrequest),
1449 token | SETUP_PID);
1450
1451 /* ... and always at least one more pid */
1452 qtd = isp1760_qtd_alloc(flags);
1453 if (!qtd) 1392 if (!qtd)
1454 goto cleanup; 1393 goto cleanup;
1455 qtd->urb = urb; 1394 qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest));
1456 list_add_tail(&qtd->qtd_list, head); 1395 list_add_tail(&qtd->qtd_list, head);
1457 1396
1458 /* for zero length DATA stages, STATUS is always IN */ 1397 /* for zero length DATA stages, STATUS is always IN */
1459 if (len == 0) 1398 if (urb->transfer_buffer_length == 0)
1460 token |= IN_PID; 1399 packet_type = IN_PID;
1461 } 1400 }
1462 1401
1463 /* 1402 maxpacketsize = max_packet(usb_maxpacket(urb->dev, urb->pipe,
1464 * data transfer stage: buffer setup 1403 usb_pipeout(urb->pipe)));
1465 */
1466 buf = urb->transfer_buffer;
1467
1468 if (is_input)
1469 token |= IN_PID;
1470 else
1471 token |= OUT_PID;
1472
1473 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
1474 1404
1475 /* 1405 /*
1476 * buffer gets wrapped in one or more qtds; 1406 * buffer gets wrapped in one or more qtds;
1477 * last one may be "short" (including zero len) 1407 * last one may be "short" (including zero len)
1478 * and may serve as a control status ack 1408 * and may serve as a control status ack
1479 */ 1409 */
1410 buf = urb->transfer_buffer;
1411 len = urb->transfer_buffer_length;
1412
1480 for (;;) { 1413 for (;;) {
1481 int this_qtd_len; 1414 int this_qtd_len;
1482 1415
1483 if (!buf && len) { 1416 qtd = qtd_alloc(flags, urb, packet_type);
1484 /* XXX This looks like usb storage / SCSI bug */ 1417 if (!qtd)
1485 dev_err(hcd->self.controller, "buf is null, dma is %08lx len is %d\n", 1418 goto cleanup;
1486 (long unsigned)urb->transfer_dma, len); 1419 this_qtd_len = qtd_fill(qtd, buf, len);
1487 WARN_ON(1); 1420 list_add_tail(&qtd->qtd_list, head);
1488 }
1489 1421
1490 this_qtd_len = qtd_fill(qtd, buf, len, token);
1491 len -= this_qtd_len; 1422 len -= this_qtd_len;
1492 buf += this_qtd_len; 1423 buf += this_qtd_len;
1493 1424
1494 if (len <= 0) 1425 if (len <= 0)
1495 break; 1426 break;
1496
1497 qtd = isp1760_qtd_alloc(flags);
1498 if (!qtd)
1499 goto cleanup;
1500 qtd->urb = urb;
1501 list_add_tail(&qtd->qtd_list, head);
1502 } 1427 }
1503 1428
1504 /* 1429 /*
@@ -1510,184 +1435,204 @@ static struct list_head *qh_urb_transaction(struct usb_hcd *hcd,
1510 1435
1511 if (usb_pipecontrol(urb->pipe)) { 1436 if (usb_pipecontrol(urb->pipe)) {
1512 one_more = 1; 1437 one_more = 1;
1513 /* "in" <--> "out" */ 1438 if (packet_type == IN_PID)
1514 token ^= IN_PID; 1439 packet_type = OUT_PID;
1440 else
1441 packet_type = IN_PID;
1515 } else if (usb_pipebulk(urb->pipe) 1442 } else if (usb_pipebulk(urb->pipe)
1516 && (urb->transfer_flags & URB_ZERO_PACKET) 1443 && (urb->transfer_flags & URB_ZERO_PACKET)
1517 && !(urb->transfer_buffer_length % maxpacket)) { 1444 && !(urb->transfer_buffer_length %
1445 maxpacketsize)) {
1518 one_more = 1; 1446 one_more = 1;
1519 } 1447 }
1520 if (one_more) { 1448 if (one_more) {
1521 qtd = isp1760_qtd_alloc(flags); 1449 qtd = qtd_alloc(flags, urb, packet_type);
1522 if (!qtd) 1450 if (!qtd)
1523 goto cleanup; 1451 goto cleanup;
1524 qtd->urb = urb;
1525 list_add_tail(&qtd->qtd_list, head);
1526 1452
1527 /* never any data in such packets */ 1453 /* never any data in such packets */
1528 qtd_fill(qtd, NULL, 0, token); 1454 qtd_fill(qtd, NULL, 0);
1455 list_add_tail(&qtd->qtd_list, head);
1529 } 1456 }
1530 } 1457 }
1531 1458
1532 qtd->status = 0; 1459 return;
1533 return head;
1534 1460
1535cleanup: 1461cleanup:
1536 qtd_list_free(urb, head); 1462 qtd_list_free(head);
1537 return NULL;
1538} 1463}
1539 1464
1540static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, 1465static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1541 gfp_t mem_flags) 1466 gfp_t mem_flags)
1542{ 1467{
1543 struct list_head qtd_list; 1468 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1544 packet_enqueue *pe; 1469 struct list_head *ep_queue;
1545 1470 struct isp1760_qh *qh, *qhit;
1546 INIT_LIST_HEAD(&qtd_list); 1471 unsigned long spinflags;
1472 LIST_HEAD(new_qtds);
1473 int retval;
1474 int qh_in_queue;
1547 1475
1548 switch (usb_pipetype(urb->pipe)) { 1476 switch (usb_pipetype(urb->pipe)) {
1549 case PIPE_CONTROL: 1477 case PIPE_CONTROL:
1478 ep_queue = &priv->controlqhs;
1479 break;
1550 case PIPE_BULK: 1480 case PIPE_BULK:
1551 if (!qh_urb_transaction(hcd, urb, &qtd_list, mem_flags)) 1481 ep_queue = &priv->bulkqhs;
1552 return -ENOMEM;
1553 pe = enqueue_an_ATL_packet;
1554 break; 1482 break;
1555
1556 case PIPE_INTERRUPT: 1483 case PIPE_INTERRUPT:
1557 if (!qh_urb_transaction(hcd, urb, &qtd_list, mem_flags)) 1484 if (urb->interval < 0)
1558 return -ENOMEM; 1485 return -EINVAL;
1559 pe = enqueue_an_INT_packet; 1486 /* FIXME: Check bandwidth */
1487 ep_queue = &priv->interruptqhs;
1560 break; 1488 break;
1561
1562 case PIPE_ISOCHRONOUS: 1489 case PIPE_ISOCHRONOUS:
1563 dev_err(hcd->self.controller, "PIPE_ISOCHRONOUS ain't supported\n"); 1490 dev_err(hcd->self.controller, "%s: isochronous USB packets "
1491 "not yet supported\n",
1492 __func__);
1493 return -EPIPE;
1564 default: 1494 default:
1495 dev_err(hcd->self.controller, "%s: unknown pipe type\n",
1496 __func__);
1565 return -EPIPE; 1497 return -EPIPE;
1566 } 1498 }
1567 1499
1568 return isp1760_prepare_enqueue(hcd, urb, &qtd_list, mem_flags, pe); 1500 if (usb_pipein(urb->pipe))
1569} 1501 urb->actual_length = 0;
1570
1571static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1572{
1573 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1574 struct inter_packet_info *ints;
1575 u32 i;
1576 u32 reg_base, or_reg, skip_reg;
1577 unsigned long flags;
1578 struct ptd ptd;
1579 packet_enqueue *pe;
1580 1502
1581 switch (usb_pipetype(urb->pipe)) { 1503 packetize_urb(hcd, urb, &new_qtds, mem_flags);
1582 case PIPE_ISOCHRONOUS: 1504 if (list_empty(&new_qtds))
1583 return -EPIPE; 1505 return -ENOMEM;
1584 break; 1506 urb->hcpriv = NULL; /* Used to signal unlink to interrupt handler */
1585 1507
1586 case PIPE_INTERRUPT: 1508 retval = 0;
1587 ints = priv->int_ints; 1509 spin_lock_irqsave(&priv->lock, spinflags);
1588 reg_base = INT_PTD_OFFSET;
1589 or_reg = HC_INT_IRQ_MASK_OR_REG;
1590 skip_reg = HC_INT_PTD_SKIPMAP_REG;
1591 pe = enqueue_an_INT_packet;
1592 break;
1593 1510
1594 default: 1511 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
1595 ints = priv->atl_ints; 1512 retval = -ESHUTDOWN;
1596 reg_base = ATL_PTD_OFFSET; 1513 goto out;
1597 or_reg = HC_ATL_IRQ_MASK_OR_REG;
1598 skip_reg = HC_ATL_PTD_SKIPMAP_REG;
1599 pe = enqueue_an_ATL_packet;
1600 break;
1601 } 1514 }
1515 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1516 if (retval)
1517 goto out;
1602 1518
1603 memset(&ptd, 0, sizeof(ptd)); 1519 qh = urb->ep->hcpriv;
1604 spin_lock_irqsave(&priv->lock, flags); 1520 if (qh) {
1605 1521 qh_in_queue = 0;
1606 for (i = 0; i < 32; i++) { 1522 list_for_each_entry(qhit, ep_queue, qh_list) {
1607 if (!ints[i].qh) 1523 if (qhit == qh) {
1608 continue; 1524 qh_in_queue = 1;
1609 BUG_ON(!ints[i].qtd); 1525 break;
1526 }
1527 }
1528 if (!qh_in_queue)
1529 list_add_tail(&qh->qh_list, ep_queue);
1530 } else {
1531 qh = qh_alloc(GFP_ATOMIC);
1532 if (!qh) {
1533 retval = -ENOMEM;
1534 goto out;
1535 }
1536 list_add_tail(&qh->qh_list, ep_queue);
1537 urb->ep->hcpriv = qh;
1538 }
1610 1539
1611 if (ints[i].qtd->urb == urb) { 1540 list_splice_tail(&new_qtds, &qh->qtd_list);
1612 u32 skip_map; 1541 schedule_ptds(hcd);
1613 u32 or_map;
1614 struct isp1760_qtd *qtd;
1615 struct isp1760_qh *qh;
1616 1542
1617 skip_map = reg_read32(hcd->regs, skip_reg); 1543out:
1618 skip_map |= 1 << i; 1544 spin_unlock_irqrestore(&priv->lock, spinflags);
1619 reg_write32(hcd->regs, skip_reg, skip_map); 1545 return retval;
1546}
1620 1547
1621 or_map = reg_read32(hcd->regs, or_reg); 1548static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
1622 or_map &= ~(1 << i); 1549 struct isp1760_qh *qh)
1623 reg_write32(hcd->regs, or_reg, or_map); 1550{
1551 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1552 int skip_map;
1624 1553
1625 ptd_write(hcd->regs, reg_base, i, &ptd); 1554 WARN_ON(qh->slot == -1);
1626 1555
1627 qtd = ints[i].qtd; 1556 /* We need to forcefully reclaim the slot since some transfers never
1628 qh = ints[i].qh; 1557 return, e.g. interrupt transfers and NAKed bulk transfers. */
1558 if (usb_pipebulk(urb->pipe)) {
1559 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
1560 skip_map |= (1 << qh->slot);
1561 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
1562 priv->atl_slots[qh->slot].qh = NULL;
1563 priv->atl_slots[qh->slot].qtd = NULL;
1564 } else {
1565 skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
1566 skip_map |= (1 << qh->slot);
1567 reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
1568 priv->int_slots[qh->slot].qh = NULL;
1569 priv->int_slots[qh->slot].qtd = NULL;
1570 }
1629 1571
1630 free_mem(hcd, qtd); 1572 qh->slot = -1;
1631 qtd = clean_up_qtdlist(qtd, qh); 1573 priv->active_ptds--;
1574}
1632 1575
1633 ints[i].qh = NULL; 1576static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1634 ints[i].qtd = NULL; 1577 int status)
1578{
1579 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1580 unsigned long spinflags;
1581 struct isp1760_qh *qh;
1582 struct isp1760_qtd *qtd;
1583 int retval = 0;
1635 1584
1636 urb->status = status; 1585 spin_lock_irqsave(&priv->lock, spinflags);
1637 isp1760_urb_done(hcd, urb);
1638 if (qtd)
1639 pe(hcd, qh, qtd);
1640 break;
1641 1586
1642 } else { 1587 qh = urb->ep->hcpriv;
1643 struct isp1760_qtd *qtd; 1588 if (!qh) {
1644 1589 retval = -EINVAL;
1645 list_for_each_entry(qtd, &ints[i].qtd->qtd_list, 1590 goto out;
1646 qtd_list) { 1591 }
1647 if (qtd->urb == urb) {
1648 clean_up_qtdlist(qtd, ints[i].qh);
1649 isp1760_urb_done(hcd, urb);
1650 qtd = NULL;
1651 break;
1652 }
1653 }
1654 1592
1655 /* We found the urb before the last slot */ 1593 list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
1656 if (!qtd) 1594 if (qtd->urb == urb) {
1657 break; 1595 if (qtd->status == QTD_XFER_STARTED)
1596 kill_transfer(hcd, urb, qh);
1597 qtd->status = QTD_RETIRE;
1658 } 1598 }
1659 }
1660 1599
1661 spin_unlock_irqrestore(&priv->lock, flags); 1600 urb->status = status;
1662 return 0; 1601 schedule_ptds(hcd);
1602
1603out:
1604 spin_unlock_irqrestore(&priv->lock, spinflags);
1605 return retval;
1663} 1606}
1664 1607
1665static irqreturn_t isp1760_irq(struct usb_hcd *hcd) 1608static void isp1760_endpoint_disable(struct usb_hcd *hcd,
1609 struct usb_host_endpoint *ep)
1666{ 1610{
1667 struct isp1760_hcd *priv = hcd_to_priv(hcd); 1611 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1668 u32 imask; 1612 unsigned long spinflags;
1669 irqreturn_t irqret = IRQ_NONE; 1613 struct isp1760_qh *qh;
1614 struct isp1760_qtd *qtd;
1670 1615
1671 spin_lock(&priv->lock); 1616 spin_lock_irqsave(&priv->lock, spinflags);
1672 1617
1673 if (!(hcd->state & HC_STATE_RUNNING)) 1618 qh = ep->hcpriv;
1674 goto leave; 1619 if (!qh)
1620 goto out;
1675 1621
1676 imask = reg_read32(hcd->regs, HC_INTERRUPT_REG); 1622 list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
1677 if (unlikely(!imask)) 1623 if (qtd->status == QTD_XFER_STARTED)
1678 goto leave; 1624 kill_transfer(hcd, qtd->urb, qh);
1625 qtd->status = QTD_RETIRE;
1626 qtd->urb->status = -ECONNRESET;
1627 }
1679 1628
1680 reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); 1629 ep->hcpriv = NULL;
1681 if (imask & (HC_ATL_INT | HC_SOT_INT)) 1630 /* Cannot free qh here since it will be parsed by schedule_ptds() */
1682 do_atl_int(hcd);
1683 1631
1684 if (imask & HC_INTL_INT) 1632 schedule_ptds(hcd);
1685 do_intl_int(hcd);
1686 1633
1687 irqret = IRQ_HANDLED; 1634out:
1688leave: 1635 spin_unlock_irqrestore(&priv->lock, spinflags);
1689 spin_unlock(&priv->lock);
1690 return irqret;
1691} 1636}
1692 1637
1693static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf) 1638static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
@@ -1778,7 +1723,7 @@ static int check_reset_complete(struct usb_hcd *hcd, int index,
1778 /* if reset finished and it's still not enabled -- handoff */ 1723 /* if reset finished and it's still not enabled -- handoff */
1779 if (!(port_status & PORT_PE)) { 1724 if (!(port_status & PORT_PE)) {
1780 1725
1781 dev_err(hcd->self.controller, 1726 dev_info(hcd->self.controller,
1782 "port %d full speed --> companion\n", 1727 "port %d full speed --> companion\n",
1783 index + 1); 1728 index + 1);
1784 1729
@@ -1787,7 +1732,7 @@ static int check_reset_complete(struct usb_hcd *hcd, int index,
1787 reg_write32(hcd->regs, HC_PORTSC1, port_status); 1732 reg_write32(hcd->regs, HC_PORTSC1, port_status);
1788 1733
1789 } else 1734 } else
1790 dev_err(hcd->self.controller, "port %d high speed\n", 1735 dev_info(hcd->self.controller, "port %d high speed\n",
1791 index + 1); 1736 index + 1);
1792 1737
1793 return port_status; 1738 return port_status;
@@ -2059,51 +2004,6 @@ error:
2059 return retval; 2004 return retval;
2060} 2005}
2061 2006
2062static void isp1760_endpoint_disable(struct usb_hcd *hcd,
2063 struct usb_host_endpoint *ep)
2064{
2065 struct isp1760_hcd *priv = hcd_to_priv(hcd);
2066 struct isp1760_qh *qh;
2067 struct isp1760_qtd *qtd;
2068 unsigned long flags;
2069
2070 spin_lock_irqsave(&priv->lock, flags);
2071 qh = ep->hcpriv;
2072 if (!qh)
2073 goto out;
2074
2075 ep->hcpriv = NULL;
2076 do {
2077 /* more than entry might get removed */
2078 if (list_empty(&qh->qtd_list))
2079 break;
2080
2081 qtd = list_first_entry(&qh->qtd_list, struct isp1760_qtd,
2082 qtd_list);
2083
2084 if (qtd->status & URB_ENQUEUED) {
2085 spin_unlock_irqrestore(&priv->lock, flags);
2086 isp1760_urb_dequeue(hcd, qtd->urb, -ECONNRESET);
2087 spin_lock_irqsave(&priv->lock, flags);
2088 } else {
2089 struct urb *urb;
2090
2091 urb = qtd->urb;
2092 clean_up_qtdlist(qtd, qh);
2093 urb->status = -ECONNRESET;
2094 isp1760_urb_done(hcd, urb);
2095 }
2096 } while (1);
2097
2098 qh_destroy(qh);
2099 /* remove requests and leak them.
2100 * ATL are pretty fast done, INT could take a while...
2101 * The latter shoule be removed
2102 */
2103out:
2104 spin_unlock_irqrestore(&priv->lock, flags);
2105}
2106
2107static int isp1760_get_frame(struct usb_hcd *hcd) 2007static int isp1760_get_frame(struct usb_hcd *hcd)
2108{ 2008{
2109 struct isp1760_hcd *priv = hcd_to_priv(hcd); 2009 struct isp1760_hcd *priv = hcd_to_priv(hcd);
@@ -2165,6 +2065,13 @@ static const struct hc_driver isp1760_hc_driver = {
2165 2065
2166int __init init_kmem_once(void) 2066int __init init_kmem_once(void)
2167{ 2067{
2068 urb_listitem_cachep = kmem_cache_create("isp1760 urb_listitem",
2069 sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
2070 SLAB_MEM_SPREAD, NULL);
2071
2072 if (!urb_listitem_cachep)
2073 return -ENOMEM;
2074
2168 qtd_cachep = kmem_cache_create("isp1760_qtd", 2075 qtd_cachep = kmem_cache_create("isp1760_qtd",
2169 sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY | 2076 sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
2170 SLAB_MEM_SPREAD, NULL); 2077 SLAB_MEM_SPREAD, NULL);
@@ -2187,6 +2094,7 @@ void deinit_kmem_cache(void)
2187{ 2094{
2188 kmem_cache_destroy(qtd_cachep); 2095 kmem_cache_destroy(qtd_cachep);
2189 kmem_cache_destroy(qh_cachep); 2096 kmem_cache_destroy(qh_cachep);
2097 kmem_cache_destroy(urb_listitem_cachep);
2190} 2098}
2191 2099
2192struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len, 2100struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,