aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/cadence/macb.c456
-rw-r--r--drivers/net/ethernet/cadence/macb.h36
2 files changed, 328 insertions, 164 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index b6bc318b148e..0987d2a77f9f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -66,23 +66,25 @@ static unsigned int macb_tx_ring_wrap(unsigned int index)
66 return index & (TX_RING_SIZE - 1); 66 return index & (TX_RING_SIZE - 1);
67} 67}
68 68
69static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index) 69static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
70 unsigned int index)
70{ 71{
71 return &bp->tx_ring[macb_tx_ring_wrap(index)]; 72 return &queue->tx_ring[macb_tx_ring_wrap(index)];
72} 73}
73 74
74static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index) 75static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
76 unsigned int index)
75{ 77{
76 return &bp->tx_skb[macb_tx_ring_wrap(index)]; 78 return &queue->tx_skb[macb_tx_ring_wrap(index)];
77} 79}
78 80
79static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index) 81static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
80{ 82{
81 dma_addr_t offset; 83 dma_addr_t offset;
82 84
83 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); 85 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
84 86
85 return bp->tx_ring_dma + offset; 87 return queue->tx_ring_dma + offset;
86} 88}
87 89
88static unsigned int macb_rx_ring_wrap(unsigned int index) 90static unsigned int macb_rx_ring_wrap(unsigned int index)
@@ -490,38 +492,49 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
490 492
491static void macb_tx_error_task(struct work_struct *work) 493static void macb_tx_error_task(struct work_struct *work)
492{ 494{
493 struct macb *bp = container_of(work, struct macb, tx_error_task); 495 struct macb_queue *queue = container_of(work, struct macb_queue,
496 tx_error_task);
497 struct macb *bp = queue->bp;
494 struct macb_tx_skb *tx_skb; 498 struct macb_tx_skb *tx_skb;
499 struct macb_dma_desc *desc;
495 struct sk_buff *skb; 500 struct sk_buff *skb;
496 unsigned int tail; 501 unsigned int tail;
502 unsigned long flags;
503
504 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
505 (unsigned int)(queue - bp->queues),
506 queue->tx_tail, queue->tx_head);
497 507
498 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n", 508 /* Prevent the queue IRQ handlers from running: each of them may call
499 bp->tx_tail, bp->tx_head); 509 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
510 * As explained below, we have to halt the transmission before updating
511 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
512 * network engine about the macb/gem being halted.
513 */
514 spin_lock_irqsave(&bp->lock, flags);
500 515
501 /* Make sure nobody is trying to queue up new packets */ 516 /* Make sure nobody is trying to queue up new packets */
502 netif_stop_queue(bp->dev); 517 netif_tx_stop_all_queues(bp->dev);
503 518
504 /* 519 /*
505 * Stop transmission now 520 * Stop transmission now
506 * (in case we have just queued new packets) 521 * (in case we have just queued new packets)
522 * macb/gem must be halted to write TBQP register
507 */ 523 */
508 if (macb_halt_tx(bp)) 524 if (macb_halt_tx(bp))
509 /* Just complain for now, reinitializing TX path can be good */ 525 /* Just complain for now, reinitializing TX path can be good */
510 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 526 netdev_err(bp->dev, "BUG: halt tx timed out\n");
511 527
512 /* No need for the lock here as nobody will interrupt us anymore */
513
514 /* 528 /*
515 * Treat frames in TX queue including the ones that caused the error. 529 * Treat frames in TX queue including the ones that caused the error.
516 * Free transmit buffers in upper layer. 530 * Free transmit buffers in upper layer.
517 */ 531 */
518 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) { 532 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
519 struct macb_dma_desc *desc; 533 u32 ctrl;
520 u32 ctrl;
521 534
522 desc = macb_tx_desc(bp, tail); 535 desc = macb_tx_desc(queue, tail);
523 ctrl = desc->ctrl; 536 ctrl = desc->ctrl;
524 tx_skb = macb_tx_skb(bp, tail); 537 tx_skb = macb_tx_skb(queue, tail);
525 skb = tx_skb->skb; 538 skb = tx_skb->skb;
526 539
527 if (ctrl & MACB_BIT(TX_USED)) { 540 if (ctrl & MACB_BIT(TX_USED)) {
@@ -529,7 +542,7 @@ static void macb_tx_error_task(struct work_struct *work)
529 while (!skb) { 542 while (!skb) {
530 macb_tx_unmap(bp, tx_skb); 543 macb_tx_unmap(bp, tx_skb);
531 tail++; 544 tail++;
532 tx_skb = macb_tx_skb(bp, tail); 545 tx_skb = macb_tx_skb(queue, tail);
533 skb = tx_skb->skb; 546 skb = tx_skb->skb;
534 } 547 }
535 548
@@ -558,45 +571,56 @@ static void macb_tx_error_task(struct work_struct *work)
558 macb_tx_unmap(bp, tx_skb); 571 macb_tx_unmap(bp, tx_skb);
559 } 572 }
560 573
574 /* Set end of TX queue */
575 desc = macb_tx_desc(queue, 0);
576 desc->addr = 0;
577 desc->ctrl = MACB_BIT(TX_USED);
578
561 /* Make descriptor updates visible to hardware */ 579 /* Make descriptor updates visible to hardware */
562 wmb(); 580 wmb();
563 581
564 /* Reinitialize the TX desc queue */ 582 /* Reinitialize the TX desc queue */
565 macb_writel(bp, TBQP, bp->tx_ring_dma); 583 queue_writel(queue, TBQP, queue->tx_ring_dma);
566 /* Make TX ring reflect state of hardware */ 584 /* Make TX ring reflect state of hardware */
567 bp->tx_head = bp->tx_tail = 0; 585 queue->tx_head = 0;
568 586 queue->tx_tail = 0;
569 /* Now we are ready to start transmission again */
570 netif_wake_queue(bp->dev);
571 587
572 /* Housework before enabling TX IRQ */ 588 /* Housework before enabling TX IRQ */
573 macb_writel(bp, TSR, macb_readl(bp, TSR)); 589 macb_writel(bp, TSR, macb_readl(bp, TSR));
574 macb_writel(bp, IER, MACB_TX_INT_FLAGS); 590 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
591
592 /* Now we are ready to start transmission again */
593 netif_tx_start_all_queues(bp->dev);
594 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
595
596 spin_unlock_irqrestore(&bp->lock, flags);
575} 597}
576 598
577static void macb_tx_interrupt(struct macb *bp) 599static void macb_tx_interrupt(struct macb_queue *queue)
578{ 600{
579 unsigned int tail; 601 unsigned int tail;
580 unsigned int head; 602 unsigned int head;
581 u32 status; 603 u32 status;
604 struct macb *bp = queue->bp;
605 u16 queue_index = queue - bp->queues;
582 606
583 status = macb_readl(bp, TSR); 607 status = macb_readl(bp, TSR);
584 macb_writel(bp, TSR, status); 608 macb_writel(bp, TSR, status);
585 609
586 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 610 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
587 macb_writel(bp, ISR, MACB_BIT(TCOMP)); 611 queue_writel(queue, ISR, MACB_BIT(TCOMP));
588 612
589 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 613 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
590 (unsigned long)status); 614 (unsigned long)status);
591 615
592 head = bp->tx_head; 616 head = queue->tx_head;
593 for (tail = bp->tx_tail; tail != head; tail++) { 617 for (tail = queue->tx_tail; tail != head; tail++) {
594 struct macb_tx_skb *tx_skb; 618 struct macb_tx_skb *tx_skb;
595 struct sk_buff *skb; 619 struct sk_buff *skb;
596 struct macb_dma_desc *desc; 620 struct macb_dma_desc *desc;
597 u32 ctrl; 621 u32 ctrl;
598 622
599 desc = macb_tx_desc(bp, tail); 623 desc = macb_tx_desc(queue, tail);
600 624
601 /* Make hw descriptor updates visible to CPU */ 625 /* Make hw descriptor updates visible to CPU */
602 rmb(); 626 rmb();
@@ -611,7 +635,7 @@ static void macb_tx_interrupt(struct macb *bp)
611 635
612 /* Process all buffers of the current transmitted frame */ 636 /* Process all buffers of the current transmitted frame */
613 for (;; tail++) { 637 for (;; tail++) {
614 tx_skb = macb_tx_skb(bp, tail); 638 tx_skb = macb_tx_skb(queue, tail);
615 skb = tx_skb->skb; 639 skb = tx_skb->skb;
616 640
617 /* First, update TX stats if needed */ 641 /* First, update TX stats if needed */
@@ -634,11 +658,11 @@ static void macb_tx_interrupt(struct macb *bp)
634 } 658 }
635 } 659 }
636 660
637 bp->tx_tail = tail; 661 queue->tx_tail = tail;
638 if (netif_queue_stopped(bp->dev) 662 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
639 && CIRC_CNT(bp->tx_head, bp->tx_tail, 663 CIRC_CNT(queue->tx_head, queue->tx_tail,
640 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) 664 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
641 netif_wake_queue(bp->dev); 665 netif_wake_subqueue(bp->dev, queue_index);
642} 666}
643 667
644static void gem_rx_refill(struct macb *bp) 668static void gem_rx_refill(struct macb *bp)
@@ -949,11 +973,12 @@ static int macb_poll(struct napi_struct *napi, int budget)
949 973
950static irqreturn_t macb_interrupt(int irq, void *dev_id) 974static irqreturn_t macb_interrupt(int irq, void *dev_id)
951{ 975{
952 struct net_device *dev = dev_id; 976 struct macb_queue *queue = dev_id;
953 struct macb *bp = netdev_priv(dev); 977 struct macb *bp = queue->bp;
978 struct net_device *dev = bp->dev;
954 u32 status; 979 u32 status;
955 980
956 status = macb_readl(bp, ISR); 981 status = queue_readl(queue, ISR);
957 982
958 if (unlikely(!status)) 983 if (unlikely(!status))
959 return IRQ_NONE; 984 return IRQ_NONE;
@@ -963,11 +988,13 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
963 while (status) { 988 while (status) {
964 /* close possible race with dev_close */ 989 /* close possible race with dev_close */
965 if (unlikely(!netif_running(dev))) { 990 if (unlikely(!netif_running(dev))) {
966 macb_writel(bp, IDR, -1); 991 queue_writel(queue, IDR, -1);
967 break; 992 break;
968 } 993 }
969 994
970 netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status); 995 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
996 (unsigned int)(queue - bp->queues),
997 (unsigned long)status);
971 998
972 if (status & MACB_RX_INT_FLAGS) { 999 if (status & MACB_RX_INT_FLAGS) {
973 /* 1000 /*
@@ -977,9 +1004,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
977 * is already scheduled, so disable interrupts 1004 * is already scheduled, so disable interrupts
978 * now. 1005 * now.
979 */ 1006 */
980 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 1007 queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
981 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1008 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
982 macb_writel(bp, ISR, MACB_BIT(RCOMP)); 1009 queue_writel(queue, ISR, MACB_BIT(RCOMP));
983 1010
984 if (napi_schedule_prep(&bp->napi)) { 1011 if (napi_schedule_prep(&bp->napi)) {
985 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1012 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -988,17 +1015,17 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
988 } 1015 }
989 1016
990 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1017 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
991 macb_writel(bp, IDR, MACB_TX_INT_FLAGS); 1018 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
992 schedule_work(&bp->tx_error_task); 1019 schedule_work(&queue->tx_error_task);
993 1020
994 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1021 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
995 macb_writel(bp, ISR, MACB_TX_ERR_FLAGS); 1022 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
996 1023
997 break; 1024 break;
998 } 1025 }
999 1026
1000 if (status & MACB_BIT(TCOMP)) 1027 if (status & MACB_BIT(TCOMP))
1001 macb_tx_interrupt(bp); 1028 macb_tx_interrupt(queue);
1002 1029
1003 /* 1030 /*
1004 * Link change detection isn't possible with RMII, so we'll 1031 * Link change detection isn't possible with RMII, so we'll
@@ -1013,7 +1040,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1013 bp->hw_stats.macb.rx_overruns++; 1040 bp->hw_stats.macb.rx_overruns++;
1014 1041
1015 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1042 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1016 macb_writel(bp, ISR, MACB_BIT(ISR_ROVR)); 1043 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1017 } 1044 }
1018 1045
1019 if (status & MACB_BIT(HRESP)) { 1046 if (status & MACB_BIT(HRESP)) {
@@ -1025,10 +1052,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1025 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1052 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1026 1053
1027 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1054 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1028 macb_writel(bp, ISR, MACB_BIT(HRESP)); 1055 queue_writel(queue, ISR, MACB_BIT(HRESP));
1029 } 1056 }
1030 1057
1031 status = macb_readl(bp, ISR); 1058 status = queue_readl(queue, ISR);
1032 } 1059 }
1033 1060
1034 spin_unlock(&bp->lock); 1061 spin_unlock(&bp->lock);
@@ -1043,10 +1070,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1043 */ 1070 */
1044static void macb_poll_controller(struct net_device *dev) 1071static void macb_poll_controller(struct net_device *dev)
1045{ 1072{
1073 struct macb *bp = netdev_priv(dev);
1074 struct macb_queue *queue;
1046 unsigned long flags; 1075 unsigned long flags;
1076 unsigned int q;
1047 1077
1048 local_irq_save(flags); 1078 local_irq_save(flags);
1049 macb_interrupt(dev->irq, dev); 1079 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1080 macb_interrupt(dev->irq, queue);
1050 local_irq_restore(flags); 1081 local_irq_restore(flags);
1051} 1082}
1052#endif 1083#endif
@@ -1058,10 +1089,11 @@ static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1058} 1089}
1059 1090
1060static unsigned int macb_tx_map(struct macb *bp, 1091static unsigned int macb_tx_map(struct macb *bp,
1092 struct macb_queue *queue,
1061 struct sk_buff *skb) 1093 struct sk_buff *skb)
1062{ 1094{
1063 dma_addr_t mapping; 1095 dma_addr_t mapping;
1064 unsigned int len, entry, i, tx_head = bp->tx_head; 1096 unsigned int len, entry, i, tx_head = queue->tx_head;
1065 struct macb_tx_skb *tx_skb = NULL; 1097 struct macb_tx_skb *tx_skb = NULL;
1066 struct macb_dma_desc *desc; 1098 struct macb_dma_desc *desc;
1067 unsigned int offset, size, count = 0; 1099 unsigned int offset, size, count = 0;
@@ -1075,7 +1107,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1075 while (len) { 1107 while (len) {
1076 size = min(len, bp->max_tx_length); 1108 size = min(len, bp->max_tx_length);
1077 entry = macb_tx_ring_wrap(tx_head); 1109 entry = macb_tx_ring_wrap(tx_head);
1078 tx_skb = &bp->tx_skb[entry]; 1110 tx_skb = &queue->tx_skb[entry];
1079 1111
1080 mapping = dma_map_single(&bp->pdev->dev, 1112 mapping = dma_map_single(&bp->pdev->dev,
1081 skb->data + offset, 1113 skb->data + offset,
@@ -1104,7 +1136,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1104 while (len) { 1136 while (len) {
1105 size = min(len, bp->max_tx_length); 1137 size = min(len, bp->max_tx_length);
1106 entry = macb_tx_ring_wrap(tx_head); 1138 entry = macb_tx_ring_wrap(tx_head);
1107 tx_skb = &bp->tx_skb[entry]; 1139 tx_skb = &queue->tx_skb[entry];
1108 1140
1109 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1141 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1110 offset, size, DMA_TO_DEVICE); 1142 offset, size, DMA_TO_DEVICE);
@@ -1143,14 +1175,14 @@ static unsigned int macb_tx_map(struct macb *bp,
1143 i = tx_head; 1175 i = tx_head;
1144 entry = macb_tx_ring_wrap(i); 1176 entry = macb_tx_ring_wrap(i);
1145 ctrl = MACB_BIT(TX_USED); 1177 ctrl = MACB_BIT(TX_USED);
1146 desc = &bp->tx_ring[entry]; 1178 desc = &queue->tx_ring[entry];
1147 desc->ctrl = ctrl; 1179 desc->ctrl = ctrl;
1148 1180
1149 do { 1181 do {
1150 i--; 1182 i--;
1151 entry = macb_tx_ring_wrap(i); 1183 entry = macb_tx_ring_wrap(i);
1152 tx_skb = &bp->tx_skb[entry]; 1184 tx_skb = &queue->tx_skb[entry];
1153 desc = &bp->tx_ring[entry]; 1185 desc = &queue->tx_ring[entry];
1154 1186
1155 ctrl = (u32)tx_skb->size; 1187 ctrl = (u32)tx_skb->size;
1156 if (eof) { 1188 if (eof) {
@@ -1167,17 +1199,17 @@ static unsigned int macb_tx_map(struct macb *bp,
1167 */ 1199 */
1168 wmb(); 1200 wmb();
1169 desc->ctrl = ctrl; 1201 desc->ctrl = ctrl;
1170 } while (i != bp->tx_head); 1202 } while (i != queue->tx_head);
1171 1203
1172 bp->tx_head = tx_head; 1204 queue->tx_head = tx_head;
1173 1205
1174 return count; 1206 return count;
1175 1207
1176dma_error: 1208dma_error:
1177 netdev_err(bp->dev, "TX DMA map failed\n"); 1209 netdev_err(bp->dev, "TX DMA map failed\n");
1178 1210
1179 for (i = bp->tx_head; i != tx_head; i++) { 1211 for (i = queue->tx_head; i != tx_head; i++) {
1180 tx_skb = macb_tx_skb(bp, i); 1212 tx_skb = macb_tx_skb(queue, i);
1181 1213
1182 macb_tx_unmap(bp, tx_skb); 1214 macb_tx_unmap(bp, tx_skb);
1183 } 1215 }
@@ -1187,14 +1219,16 @@ dma_error:
1187 1219
1188static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1220static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1189{ 1221{
1222 u16 queue_index = skb_get_queue_mapping(skb);
1190 struct macb *bp = netdev_priv(dev); 1223 struct macb *bp = netdev_priv(dev);
1224 struct macb_queue *queue = &bp->queues[queue_index];
1191 unsigned long flags; 1225 unsigned long flags;
1192 unsigned int count, nr_frags, frag_size, f; 1226 unsigned int count, nr_frags, frag_size, f;
1193 1227
1194#if defined(DEBUG) && defined(VERBOSE_DEBUG) 1228#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1195 netdev_vdbg(bp->dev, 1229 netdev_vdbg(bp->dev,
1196 "start_xmit: len %u head %p data %p tail %p end %p\n", 1230 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1197 skb->len, skb->head, skb->data, 1231 queue_index, skb->len, skb->head, skb->data,
1198 skb_tail_pointer(skb), skb_end_pointer(skb)); 1232 skb_tail_pointer(skb), skb_end_pointer(skb));
1199 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1233 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1200 skb->data, 16, true); 1234 skb->data, 16, true);
@@ -1214,16 +1248,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1214 spin_lock_irqsave(&bp->lock, flags); 1248 spin_lock_irqsave(&bp->lock, flags);
1215 1249
1216 /* This is a hard error, log it. */ 1250 /* This is a hard error, log it. */
1217 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) { 1251 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1218 netif_stop_queue(dev); 1252 netif_stop_subqueue(dev, queue_index);
1219 spin_unlock_irqrestore(&bp->lock, flags); 1253 spin_unlock_irqrestore(&bp->lock, flags);
1220 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1254 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1221 bp->tx_head, bp->tx_tail); 1255 queue->tx_head, queue->tx_tail);
1222 return NETDEV_TX_BUSY; 1256 return NETDEV_TX_BUSY;
1223 } 1257 }
1224 1258
1225 /* Map socket buffer for DMA transfer */ 1259 /* Map socket buffer for DMA transfer */
1226 if (!macb_tx_map(bp, skb)) { 1260 if (!macb_tx_map(bp, queue, skb)) {
1227 dev_kfree_skb_any(skb); 1261 dev_kfree_skb_any(skb);
1228 goto unlock; 1262 goto unlock;
1229 } 1263 }
@@ -1235,8 +1269,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1235 1269
1236 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1270 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1237 1271
1238 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) 1272 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1239 netif_stop_queue(dev); 1273 netif_stop_subqueue(dev, queue_index);
1240 1274
1241unlock: 1275unlock:
1242 spin_unlock_irqrestore(&bp->lock, flags); 1276 spin_unlock_irqrestore(&bp->lock, flags);
@@ -1304,20 +1338,24 @@ static void macb_free_rx_buffers(struct macb *bp)
1304 1338
1305static void macb_free_consistent(struct macb *bp) 1339static void macb_free_consistent(struct macb *bp)
1306{ 1340{
1307 if (bp->tx_skb) { 1341 struct macb_queue *queue;
1308 kfree(bp->tx_skb); 1342 unsigned int q;
1309 bp->tx_skb = NULL; 1343
1310 }
1311 bp->macbgem_ops.mog_free_rx_buffers(bp); 1344 bp->macbgem_ops.mog_free_rx_buffers(bp);
1312 if (bp->rx_ring) { 1345 if (bp->rx_ring) {
1313 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, 1346 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1314 bp->rx_ring, bp->rx_ring_dma); 1347 bp->rx_ring, bp->rx_ring_dma);
1315 bp->rx_ring = NULL; 1348 bp->rx_ring = NULL;
1316 } 1349 }
1317 if (bp->tx_ring) { 1350
1318 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, 1351 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1319 bp->tx_ring, bp->tx_ring_dma); 1352 kfree(queue->tx_skb);
1320 bp->tx_ring = NULL; 1353 queue->tx_skb = NULL;
1354 if (queue->tx_ring) {
1355 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1356 queue->tx_ring, queue->tx_ring_dma);
1357 queue->tx_ring = NULL;
1358 }
1321 } 1359 }
1322} 1360}
1323 1361
@@ -1354,12 +1392,27 @@ static int macb_alloc_rx_buffers(struct macb *bp)
1354 1392
1355static int macb_alloc_consistent(struct macb *bp) 1393static int macb_alloc_consistent(struct macb *bp)
1356{ 1394{
1395 struct macb_queue *queue;
1396 unsigned int q;
1357 int size; 1397 int size;
1358 1398
1359 size = TX_RING_SIZE * sizeof(struct macb_tx_skb); 1399 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1360 bp->tx_skb = kmalloc(size, GFP_KERNEL); 1400 size = TX_RING_BYTES;
1361 if (!bp->tx_skb) 1401 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1362 goto out_err; 1402 &queue->tx_ring_dma,
1403 GFP_KERNEL);
1404 if (!queue->tx_ring)
1405 goto out_err;
1406 netdev_dbg(bp->dev,
1407 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1408 q, size, (unsigned long)queue->tx_ring_dma,
1409 queue->tx_ring);
1410
1411 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1412 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1413 if (!queue->tx_skb)
1414 goto out_err;
1415 }
1363 1416
1364 size = RX_RING_BYTES; 1417 size = RX_RING_BYTES;
1365 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1418 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
@@ -1370,15 +1423,6 @@ static int macb_alloc_consistent(struct macb *bp)
1370 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 1423 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1371 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); 1424 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1372 1425
1373 size = TX_RING_BYTES;
1374 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1375 &bp->tx_ring_dma, GFP_KERNEL);
1376 if (!bp->tx_ring)
1377 goto out_err;
1378 netdev_dbg(bp->dev,
1379 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
1380 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
1381
1382 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 1426 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1383 goto out_err; 1427 goto out_err;
1384 1428
@@ -1391,15 +1435,22 @@ out_err:
1391 1435
1392static void gem_init_rings(struct macb *bp) 1436static void gem_init_rings(struct macb *bp)
1393{ 1437{
1438 struct macb_queue *queue;
1439 unsigned int q;
1394 int i; 1440 int i;
1395 1441
1396 for (i = 0; i < TX_RING_SIZE; i++) { 1442 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1397 bp->tx_ring[i].addr = 0; 1443 for (i = 0; i < TX_RING_SIZE; i++) {
1398 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1444 queue->tx_ring[i].addr = 0;
1445 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1446 }
1447 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1448 queue->tx_head = 0;
1449 queue->tx_tail = 0;
1399 } 1450 }
1400 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1401 1451
1402 bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0; 1452 bp->rx_tail = 0;
1453 bp->rx_prepared_head = 0;
1403 1454
1404 gem_rx_refill(bp); 1455 gem_rx_refill(bp);
1405} 1456}
@@ -1418,16 +1469,21 @@ static void macb_init_rings(struct macb *bp)
1418 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); 1469 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1419 1470
1420 for (i = 0; i < TX_RING_SIZE; i++) { 1471 for (i = 0; i < TX_RING_SIZE; i++) {
1421 bp->tx_ring[i].addr = 0; 1472 bp->queues[0].tx_ring[i].addr = 0;
1422 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1473 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1474 bp->queues[0].tx_head = 0;
1475 bp->queues[0].tx_tail = 0;
1423 } 1476 }
1424 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1477 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1425 1478
1426 bp->rx_tail = bp->tx_head = bp->tx_tail = 0; 1479 bp->rx_tail = 0;
1427} 1480}
1428 1481
1429static void macb_reset_hw(struct macb *bp) 1482static void macb_reset_hw(struct macb *bp)
1430{ 1483{
1484 struct macb_queue *queue;
1485 unsigned int q;
1486
1431 /* 1487 /*
1432 * Disable RX and TX (XXX: Should we halt the transmission 1488 * Disable RX and TX (XXX: Should we halt the transmission
1433 * more gracefully?) 1489 * more gracefully?)
@@ -1442,8 +1498,10 @@ static void macb_reset_hw(struct macb *bp)
1442 macb_writel(bp, RSR, -1); 1498 macb_writel(bp, RSR, -1);
1443 1499
1444 /* Disable all interrupts */ 1500 /* Disable all interrupts */
1445 macb_writel(bp, IDR, -1); 1501 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1446 macb_readl(bp, ISR); 1502 queue_writel(queue, IDR, -1);
1503 queue_readl(queue, ISR);
1504 }
1447} 1505}
1448 1506
1449static u32 gem_mdc_clk_div(struct macb *bp) 1507static u32 gem_mdc_clk_div(struct macb *bp)
@@ -1540,6 +1598,9 @@ static void macb_configure_dma(struct macb *bp)
1540 1598
1541static void macb_init_hw(struct macb *bp) 1599static void macb_init_hw(struct macb *bp)
1542{ 1600{
1601 struct macb_queue *queue;
1602 unsigned int q;
1603
1543 u32 config; 1604 u32 config;
1544 1605
1545 macb_reset_hw(bp); 1606 macb_reset_hw(bp);
@@ -1565,16 +1626,18 @@ static void macb_init_hw(struct macb *bp)
1565 1626
1566 /* Initialize TX and RX buffers */ 1627 /* Initialize TX and RX buffers */
1567 macb_writel(bp, RBQP, bp->rx_ring_dma); 1628 macb_writel(bp, RBQP, bp->rx_ring_dma);
1568 macb_writel(bp, TBQP, bp->tx_ring_dma); 1629 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1630 queue_writel(queue, TBQP, queue->tx_ring_dma);
1631
1632 /* Enable interrupts */
1633 queue_writel(queue, IER,
1634 MACB_RX_INT_FLAGS |
1635 MACB_TX_INT_FLAGS |
1636 MACB_BIT(HRESP));
1637 }
1569 1638
1570 /* Enable TX and RX */ 1639 /* Enable TX and RX */
1571 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 1640 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1572
1573 /* Enable interrupts */
1574 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
1575 | MACB_TX_INT_FLAGS
1576 | MACB_BIT(HRESP)));
1577
1578} 1641}
1579 1642
1580/* 1643/*
@@ -1736,7 +1799,7 @@ static int macb_open(struct net_device *dev)
1736 /* schedule a link state check */ 1799 /* schedule a link state check */
1737 phy_start(bp->phy_dev); 1800 phy_start(bp->phy_dev);
1738 1801
1739 netif_start_queue(dev); 1802 netif_tx_start_all_queues(dev);
1740 1803
1741 return 0; 1804 return 0;
1742} 1805}
@@ -1746,7 +1809,7 @@ static int macb_close(struct net_device *dev)
1746 struct macb *bp = netdev_priv(dev); 1809 struct macb *bp = netdev_priv(dev);
1747 unsigned long flags; 1810 unsigned long flags;
1748 1811
1749 netif_stop_queue(dev); 1812 netif_tx_stop_all_queues(dev);
1750 napi_disable(&bp->napi); 1813 napi_disable(&bp->napi);
1751 1814
1752 if (bp->phy_dev) 1815 if (bp->phy_dev)
@@ -1895,8 +1958,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1895 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 1958 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
1896 | MACB_GREGS_VERSION; 1959 | MACB_GREGS_VERSION;
1897 1960
1898 tail = macb_tx_ring_wrap(bp->tx_tail); 1961 tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
1899 head = macb_tx_ring_wrap(bp->tx_head); 1962 head = macb_tx_ring_wrap(bp->queues[0].tx_head);
1900 1963
1901 regs_buff[0] = macb_readl(bp, NCR); 1964 regs_buff[0] = macb_readl(bp, NCR);
1902 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 1965 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
@@ -1909,8 +1972,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1909 1972
1910 regs_buff[8] = tail; 1973 regs_buff[8] = tail;
1911 regs_buff[9] = head; 1974 regs_buff[9] = head;
1912 regs_buff[10] = macb_tx_dma(bp, tail); 1975 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
1913 regs_buff[11] = macb_tx_dma(bp, head); 1976 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
1914 1977
1915 if (macb_is_gem(bp)) { 1978 if (macb_is_gem(bp)) {
1916 regs_buff[12] = gem_readl(bp, USRIO); 1979 regs_buff[12] = gem_readl(bp, USRIO);
@@ -2061,16 +2124,44 @@ static void macb_configure_caps(struct macb *bp)
2061 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); 2124 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
2062} 2125}
2063 2126
2127static void macb_probe_queues(void __iomem *mem,
2128 unsigned int *queue_mask,
2129 unsigned int *num_queues)
2130{
2131 unsigned int hw_q;
2132 u32 mid;
2133
2134 *queue_mask = 0x1;
2135 *num_queues = 1;
2136
2137 /* is it macb or gem ? */
2138 mid = __raw_readl(mem + MACB_MID);
2139 if (MACB_BFEXT(IDNUM, mid) != 0x2)
2140 return;
2141
2142 /* bit 0 is never set but queue 0 always exists */
2143 *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff;
2144 *queue_mask |= 0x1;
2145
2146 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2147 if (*queue_mask & (1 << hw_q))
2148 (*num_queues)++;
2149}
2150
2064static int __init macb_probe(struct platform_device *pdev) 2151static int __init macb_probe(struct platform_device *pdev)
2065{ 2152{
2066 struct macb_platform_data *pdata; 2153 struct macb_platform_data *pdata;
2067 struct resource *regs; 2154 struct resource *regs;
2068 struct net_device *dev; 2155 struct net_device *dev;
2069 struct macb *bp; 2156 struct macb *bp;
2157 struct macb_queue *queue;
2070 struct phy_device *phydev; 2158 struct phy_device *phydev;
2071 u32 config; 2159 u32 config;
2072 int err = -ENXIO; 2160 int err = -ENXIO;
2073 const char *mac; 2161 const char *mac;
2162 void __iomem *mem;
2163 unsigned int hw_q, queue_mask, q, num_queues, q_irq = 0;
2164 struct clk *pclk, *hclk, *tx_clk;
2074 2165
2075 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2166 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2076 if (!regs) { 2167 if (!regs) {
@@ -2078,72 +2169,112 @@ static int __init macb_probe(struct platform_device *pdev)
2078 goto err_out; 2169 goto err_out;
2079 } 2170 }
2080 2171
2081 err = -ENOMEM; 2172 pclk = devm_clk_get(&pdev->dev, "pclk");
2082 dev = alloc_etherdev(sizeof(*bp)); 2173 if (IS_ERR(pclk)) {
2083 if (!dev) 2174 err = PTR_ERR(pclk);
2084 goto err_out;
2085
2086 SET_NETDEV_DEV(dev, &pdev->dev);
2087
2088 bp = netdev_priv(dev);
2089 bp->pdev = pdev;
2090 bp->dev = dev;
2091
2092 spin_lock_init(&bp->lock);
2093 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
2094
2095 bp->pclk = devm_clk_get(&pdev->dev, "pclk");
2096 if (IS_ERR(bp->pclk)) {
2097 err = PTR_ERR(bp->pclk);
2098 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 2175 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2099 goto err_out_free_dev; 2176 goto err_out;
2100 } 2177 }
2101 2178
2102 bp->hclk = devm_clk_get(&pdev->dev, "hclk"); 2179 hclk = devm_clk_get(&pdev->dev, "hclk");
2103 if (IS_ERR(bp->hclk)) { 2180 if (IS_ERR(hclk)) {
2104 err = PTR_ERR(bp->hclk); 2181 err = PTR_ERR(hclk);
2105 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 2182 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2106 goto err_out_free_dev; 2183 goto err_out;
2107 } 2184 }
2108 2185
2109 bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); 2186 tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2110 2187
2111 err = clk_prepare_enable(bp->pclk); 2188 err = clk_prepare_enable(pclk);
2112 if (err) { 2189 if (err) {
2113 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 2190 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2114 goto err_out_free_dev; 2191 goto err_out;
2115 } 2192 }
2116 2193
2117 err = clk_prepare_enable(bp->hclk); 2194 err = clk_prepare_enable(hclk);
2118 if (err) { 2195 if (err) {
2119 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); 2196 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2120 goto err_out_disable_pclk; 2197 goto err_out_disable_pclk;
2121 } 2198 }
2122 2199
2123 if (!IS_ERR(bp->tx_clk)) { 2200 if (!IS_ERR(tx_clk)) {
2124 err = clk_prepare_enable(bp->tx_clk); 2201 err = clk_prepare_enable(tx_clk);
2125 if (err) { 2202 if (err) {
2126 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", 2203 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
2127 err); 2204 err);
2128 goto err_out_disable_hclk; 2205 goto err_out_disable_hclk;
2129 } 2206 }
2130 } 2207 }
2131 2208
2132 bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); 2209 err = -ENOMEM;
2133 if (!bp->regs) { 2210 mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
2211 if (!mem) {
2134 dev_err(&pdev->dev, "failed to map registers, aborting.\n"); 2212 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
2135 err = -ENOMEM;
2136 goto err_out_disable_clocks; 2213 goto err_out_disable_clocks;
2137 } 2214 }
2138 2215
2139 dev->irq = platform_get_irq(pdev, 0); 2216 macb_probe_queues(mem, &queue_mask, &num_queues);
2140 err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0, 2217 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2141 dev->name, dev); 2218 if (!dev)
2142 if (err) {
2143 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
2144 dev->irq, err);
2145 goto err_out_disable_clocks; 2219 goto err_out_disable_clocks;
2220
2221 SET_NETDEV_DEV(dev, &pdev->dev);
2222
2223 bp = netdev_priv(dev);
2224 bp->pdev = pdev;
2225 bp->dev = dev;
2226 bp->regs = mem;
2227 bp->num_queues = num_queues;
2228 bp->pclk = pclk;
2229 bp->hclk = hclk;
2230 bp->tx_clk = tx_clk;
2231
2232 spin_lock_init(&bp->lock);
2233
2234 /* set the queue register mapping once for all: queue0 has a special
2235 * register mapping but we don't want to test the queue index then
2236 * compute the corresponding register offset at run time.
2237 */
2238 for (hw_q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2239 if (!(queue_mask & (1 << hw_q)))
2240 continue;
2241
2242 queue = &bp->queues[q_irq];
2243 queue->bp = bp;
2244 if (hw_q) {
2245 queue->ISR = GEM_ISR(hw_q - 1);
2246 queue->IER = GEM_IER(hw_q - 1);
2247 queue->IDR = GEM_IDR(hw_q - 1);
2248 queue->IMR = GEM_IMR(hw_q - 1);
2249 queue->TBQP = GEM_TBQP(hw_q - 1);
2250 } else {
2251 /* queue0 uses legacy registers */
2252 queue->ISR = MACB_ISR;
2253 queue->IER = MACB_IER;
2254 queue->IDR = MACB_IDR;
2255 queue->IMR = MACB_IMR;
2256 queue->TBQP = MACB_TBQP;
2257 }
2258
2259 /* get irq: here we use the linux queue index, not the hardware
2260 * queue index. the queue irq definitions in the device tree
2261 * must remove the optional gaps that could exist in the
2262 * hardware queue mask.
2263 */
2264 queue->irq = platform_get_irq(pdev, q_irq);
2265 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2266 0, dev->name, queue);
2267 if (err) {
2268 dev_err(&pdev->dev,
2269 "Unable to request IRQ %d (error %d)\n",
2270 queue->irq, err);
2271 goto err_out_free_irq;
2272 }
2273
2274 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2275 q_irq++;
2146 } 2276 }
2277 dev->irq = bp->queues[0].irq;
2147 2278
2148 dev->netdev_ops = &macb_netdev_ops; 2279 dev->netdev_ops = &macb_netdev_ops;
2149 netif_napi_add(dev, &bp->napi, macb_poll, 64); 2280 netif_napi_add(dev, &bp->napi, macb_poll, 64);
@@ -2219,7 +2350,7 @@ static int __init macb_probe(struct platform_device *pdev)
2219 err = register_netdev(dev); 2350 err = register_netdev(dev);
2220 if (err) { 2351 if (err) {
2221 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 2352 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2222 goto err_out_disable_clocks; 2353 goto err_out_free_irq;
2223 } 2354 }
2224 2355
2225 err = macb_mii_init(bp); 2356 err = macb_mii_init(bp);
@@ -2242,15 +2373,17 @@ static int __init macb_probe(struct platform_device *pdev)
2242 2373
2243err_out_unregister_netdev: 2374err_out_unregister_netdev:
2244 unregister_netdev(dev); 2375 unregister_netdev(dev);
2376err_out_free_irq:
2377 for (q = 0, queue = bp->queues; q < q_irq; ++q, ++queue)
2378 devm_free_irq(&pdev->dev, queue->irq, queue);
2379 free_netdev(dev);
2245err_out_disable_clocks: 2380err_out_disable_clocks:
2246 if (!IS_ERR(bp->tx_clk)) 2381 if (!IS_ERR(tx_clk))
2247 clk_disable_unprepare(bp->tx_clk); 2382 clk_disable_unprepare(tx_clk);
2248err_out_disable_hclk: 2383err_out_disable_hclk:
2249 clk_disable_unprepare(bp->hclk); 2384 clk_disable_unprepare(hclk);
2250err_out_disable_pclk: 2385err_out_disable_pclk:
2251 clk_disable_unprepare(bp->pclk); 2386 clk_disable_unprepare(pclk);
2252err_out_free_dev:
2253 free_netdev(dev);
2254err_out: 2387err_out:
2255 return err; 2388 return err;
2256} 2389}
@@ -2259,6 +2392,8 @@ static int __exit macb_remove(struct platform_device *pdev)
2259{ 2392{
2260 struct net_device *dev; 2393 struct net_device *dev;
2261 struct macb *bp; 2394 struct macb *bp;
2395 struct macb_queue *queue;
2396 unsigned int q;
2262 2397
2263 dev = platform_get_drvdata(pdev); 2398 dev = platform_get_drvdata(pdev);
2264 2399
@@ -2270,11 +2405,14 @@ static int __exit macb_remove(struct platform_device *pdev)
2270 kfree(bp->mii_bus->irq); 2405 kfree(bp->mii_bus->irq);
2271 mdiobus_free(bp->mii_bus); 2406 mdiobus_free(bp->mii_bus);
2272 unregister_netdev(dev); 2407 unregister_netdev(dev);
2408 queue = bp->queues;
2409 for (q = 0; q < bp->num_queues; ++q, ++queue)
2410 devm_free_irq(&pdev->dev, queue->irq, queue);
2411 free_netdev(dev);
2273 if (!IS_ERR(bp->tx_clk)) 2412 if (!IS_ERR(bp->tx_clk))
2274 clk_disable_unprepare(bp->tx_clk); 2413 clk_disable_unprepare(bp->tx_clk);
2275 clk_disable_unprepare(bp->hclk); 2414 clk_disable_unprepare(bp->hclk);
2276 clk_disable_unprepare(bp->pclk); 2415 clk_disable_unprepare(bp->pclk);
2277 free_netdev(dev);
2278 } 2416 }
2279 2417
2280 return 0; 2418 return 0;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 517c09d72c4a..084191b6fad2 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -12,6 +12,7 @@
12 12
13#define MACB_GREGS_NBR 16 13#define MACB_GREGS_NBR 16
14#define MACB_GREGS_VERSION 1 14#define MACB_GREGS_VERSION 1
15#define MACB_MAX_QUEUES 8
15 16
16/* MACB register offsets */ 17/* MACB register offsets */
17#define MACB_NCR 0x0000 18#define MACB_NCR 0x0000
@@ -89,6 +90,13 @@
89#define GEM_DCFG6 0x0294 90#define GEM_DCFG6 0x0294
90#define GEM_DCFG7 0x0298 91#define GEM_DCFG7 0x0298
91 92
93#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
94#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
95#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
96#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
97#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
98#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
99
92/* Bitfields in NCR */ 100/* Bitfields in NCR */
93#define MACB_LB_OFFSET 0 101#define MACB_LB_OFFSET 0
94#define MACB_LB_SIZE 1 102#define MACB_LB_SIZE 1
@@ -376,6 +384,10 @@
376 __raw_readl((port)->regs + GEM_##reg) 384 __raw_readl((port)->regs + GEM_##reg)
377#define gem_writel(port, reg, value) \ 385#define gem_writel(port, reg, value) \
378 __raw_writel((value), (port)->regs + GEM_##reg) 386 __raw_writel((value), (port)->regs + GEM_##reg)
387#define queue_readl(queue, reg) \
388 __raw_readl((queue)->bp->regs + (queue)->reg)
389#define queue_writel(queue, reg, value) \
390 __raw_writel((value), (queue)->bp->regs + (queue)->reg)
379 391
380/* 392/*
381 * Conditional GEM/MACB macros. These perform the operation to the correct 393 * Conditional GEM/MACB macros. These perform the operation to the correct
@@ -597,6 +609,23 @@ struct macb_config {
597 unsigned int dma_burst_length; 609 unsigned int dma_burst_length;
598}; 610};
599 611
612struct macb_queue {
613 struct macb *bp;
614 int irq;
615
616 unsigned int ISR;
617 unsigned int IER;
618 unsigned int IDR;
619 unsigned int IMR;
620 unsigned int TBQP;
621
622 unsigned int tx_head, tx_tail;
623 struct macb_dma_desc *tx_ring;
624 struct macb_tx_skb *tx_skb;
625 dma_addr_t tx_ring_dma;
626 struct work_struct tx_error_task;
627};
628
600struct macb { 629struct macb {
601 void __iomem *regs; 630 void __iomem *regs;
602 631
@@ -607,9 +636,8 @@ struct macb {
607 void *rx_buffers; 636 void *rx_buffers;
608 size_t rx_buffer_size; 637 size_t rx_buffer_size;
609 638
610 unsigned int tx_head, tx_tail; 639 unsigned int num_queues;
611 struct macb_dma_desc *tx_ring; 640 struct macb_queue queues[MACB_MAX_QUEUES];
612 struct macb_tx_skb *tx_skb;
613 641
614 spinlock_t lock; 642 spinlock_t lock;
615 struct platform_device *pdev; 643 struct platform_device *pdev;
@@ -618,7 +646,6 @@ struct macb {
618 struct clk *tx_clk; 646 struct clk *tx_clk;
619 struct net_device *dev; 647 struct net_device *dev;
620 struct napi_struct napi; 648 struct napi_struct napi;
621 struct work_struct tx_error_task;
622 struct net_device_stats stats; 649 struct net_device_stats stats;
623 union { 650 union {
624 struct macb_stats macb; 651 struct macb_stats macb;
@@ -626,7 +653,6 @@ struct macb {
626 } hw_stats; 653 } hw_stats;
627 654
628 dma_addr_t rx_ring_dma; 655 dma_addr_t rx_ring_dma;
629 dma_addr_t tx_ring_dma;
630 dma_addr_t rx_buffers_dma; 656 dma_addr_t rx_buffers_dma;
631 657
632 struct macb_or_gem_ops macbgem_ops; 658 struct macb_or_gem_ops macbgem_ops;