diff options
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r-- | drivers/net/vmxnet3/upt1_defs.h | 8 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_defs.h | 6 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 1282 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_ethtool.c | 476 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_int.h | 102 |
5 files changed, 1293 insertions, 581 deletions
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h index 37108fb226d3..969c751ee404 100644 --- a/drivers/net/vmxnet3/upt1_defs.h +++ b/drivers/net/vmxnet3/upt1_defs.h | |||
@@ -88,9 +88,9 @@ struct UPT1_RSSConf { | |||
88 | 88 | ||
89 | /* features */ | 89 | /* features */ |
90 | enum { | 90 | enum { |
91 | UPT1_F_RXCSUM = 0x0001, /* rx csum verification */ | 91 | UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */ |
92 | UPT1_F_RSS = 0x0002, | 92 | UPT1_F_RSS = cpu_to_le64(0x0002), |
93 | UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */ | 93 | UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */ |
94 | UPT1_F_LRO = 0x0008, | 94 | UPT1_F_LRO = cpu_to_le64(0x0008), |
95 | }; | 95 | }; |
96 | #endif | 96 | #endif |
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h index ca7727b940ad..4d84912c99ba 100644 --- a/drivers/net/vmxnet3/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/vmxnet3_defs.h | |||
@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf { | |||
523 | #define VMXNET3_PM_MAX_PATTERN_SIZE 128 | 523 | #define VMXNET3_PM_MAX_PATTERN_SIZE 128 |
524 | #define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) | 524 | #define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) |
525 | 525 | ||
526 | #define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */ | 526 | #define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */ |
527 | #define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching | 527 | #define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching |
528 | * filters */ | 528 | * filters */ |
529 | 529 | ||
530 | 530 | ||
531 | struct Vmxnet3_PM_PktFilter { | 531 | struct Vmxnet3_PM_PktFilter { |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index abe0ff53daf3..67402350d0df 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -44,6 +44,12 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); | |||
44 | 44 | ||
45 | static atomic_t devices_found; | 45 | static atomic_t devices_found; |
46 | 46 | ||
47 | #define VMXNET3_MAX_DEVICES 10 | ||
48 | static int enable_mq = 1; | ||
49 | static int irq_share_mode; | ||
50 | |||
51 | static void | ||
52 | vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); | ||
47 | 53 | ||
48 | /* | 54 | /* |
49 | * Enable/Disable the given intr | 55 | * Enable/Disable the given intr |
@@ -99,7 +105,7 @@ vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) | |||
99 | static bool | 105 | static bool |
100 | vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) | 106 | vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) |
101 | { | 107 | { |
102 | return netif_queue_stopped(adapter->netdev); | 108 | return tq->stopped; |
103 | } | 109 | } |
104 | 110 | ||
105 | 111 | ||
@@ -107,7 +113,7 @@ static void | |||
107 | vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) | 113 | vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) |
108 | { | 114 | { |
109 | tq->stopped = false; | 115 | tq->stopped = false; |
110 | netif_start_queue(adapter->netdev); | 116 | netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); |
111 | } | 117 | } |
112 | 118 | ||
113 | 119 | ||
@@ -115,7 +121,7 @@ static void | |||
115 | vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) | 121 | vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) |
116 | { | 122 | { |
117 | tq->stopped = false; | 123 | tq->stopped = false; |
118 | netif_wake_queue(adapter->netdev); | 124 | netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); |
119 | } | 125 | } |
120 | 126 | ||
121 | 127 | ||
@@ -124,7 +130,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) | |||
124 | { | 130 | { |
125 | tq->stopped = true; | 131 | tq->stopped = true; |
126 | tq->num_stop++; | 132 | tq->num_stop++; |
127 | netif_stop_queue(adapter->netdev); | 133 | netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); |
128 | } | 134 | } |
129 | 135 | ||
130 | 136 | ||
@@ -135,9 +141,14 @@ static void | |||
135 | vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) | 141 | vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) |
136 | { | 142 | { |
137 | u32 ret; | 143 | u32 ret; |
144 | int i; | ||
145 | unsigned long flags; | ||
138 | 146 | ||
147 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
139 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); | 148 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); |
140 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 149 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
150 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
151 | |||
141 | adapter->link_speed = ret >> 16; | 152 | adapter->link_speed = ret >> 16; |
142 | if (ret & 1) { /* Link is up. */ | 153 | if (ret & 1) { /* Link is up. */ |
143 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", | 154 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", |
@@ -145,22 +156,29 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) | |||
145 | if (!netif_carrier_ok(adapter->netdev)) | 156 | if (!netif_carrier_ok(adapter->netdev)) |
146 | netif_carrier_on(adapter->netdev); | 157 | netif_carrier_on(adapter->netdev); |
147 | 158 | ||
148 | if (affectTxQueue) | 159 | if (affectTxQueue) { |
149 | vmxnet3_tq_start(&adapter->tx_queue, adapter); | 160 | for (i = 0; i < adapter->num_tx_queues; i++) |
161 | vmxnet3_tq_start(&adapter->tx_queue[i], | ||
162 | adapter); | ||
163 | } | ||
150 | } else { | 164 | } else { |
151 | printk(KERN_INFO "%s: NIC Link is Down\n", | 165 | printk(KERN_INFO "%s: NIC Link is Down\n", |
152 | adapter->netdev->name); | 166 | adapter->netdev->name); |
153 | if (netif_carrier_ok(adapter->netdev)) | 167 | if (netif_carrier_ok(adapter->netdev)) |
154 | netif_carrier_off(adapter->netdev); | 168 | netif_carrier_off(adapter->netdev); |
155 | 169 | ||
156 | if (affectTxQueue) | 170 | if (affectTxQueue) { |
157 | vmxnet3_tq_stop(&adapter->tx_queue, adapter); | 171 | for (i = 0; i < adapter->num_tx_queues; i++) |
172 | vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); | ||
173 | } | ||
158 | } | 174 | } |
159 | } | 175 | } |
160 | 176 | ||
161 | static void | 177 | static void |
162 | vmxnet3_process_events(struct vmxnet3_adapter *adapter) | 178 | vmxnet3_process_events(struct vmxnet3_adapter *adapter) |
163 | { | 179 | { |
180 | int i; | ||
181 | unsigned long flags; | ||
164 | u32 events = le32_to_cpu(adapter->shared->ecr); | 182 | u32 events = le32_to_cpu(adapter->shared->ecr); |
165 | if (!events) | 183 | if (!events) |
166 | return; | 184 | return; |
@@ -173,19 +191,23 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) | |||
173 | 191 | ||
174 | /* Check if there is an error on xmit/recv queues */ | 192 | /* Check if there is an error on xmit/recv queues */ |
175 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { | 193 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { |
194 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
176 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 195 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
177 | VMXNET3_CMD_GET_QUEUE_STATUS); | 196 | VMXNET3_CMD_GET_QUEUE_STATUS); |
178 | 197 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | |
179 | if (adapter->tqd_start->status.stopped) { | 198 | |
180 | printk(KERN_ERR "%s: tq error 0x%x\n", | 199 | for (i = 0; i < adapter->num_tx_queues; i++) |
181 | adapter->netdev->name, | 200 | if (adapter->tqd_start[i].status.stopped) |
182 | le32_to_cpu(adapter->tqd_start->status.error)); | 201 | dev_err(&adapter->netdev->dev, |
183 | } | 202 | "%s: tq[%d] error 0x%x\n", |
184 | if (adapter->rqd_start->status.stopped) { | 203 | adapter->netdev->name, i, le32_to_cpu( |
185 | printk(KERN_ERR "%s: rq error 0x%x\n", | 204 | adapter->tqd_start[i].status.error)); |
186 | adapter->netdev->name, | 205 | for (i = 0; i < adapter->num_rx_queues; i++) |
187 | adapter->rqd_start->status.error); | 206 | if (adapter->rqd_start[i].status.stopped) |
188 | } | 207 | dev_err(&adapter->netdev->dev, |
208 | "%s: rq[%d] error 0x%x\n", | ||
209 | adapter->netdev->name, i, | ||
210 | adapter->rqd_start[i].status.error); | ||
189 | 211 | ||
190 | schedule_work(&adapter->work); | 212 | schedule_work(&adapter->work); |
191 | } | 213 | } |
@@ -410,7 +432,7 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, | |||
410 | } | 432 | } |
411 | 433 | ||
412 | 434 | ||
413 | void | 435 | static void |
414 | vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, | 436 | vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, |
415 | struct vmxnet3_adapter *adapter) | 437 | struct vmxnet3_adapter *adapter) |
416 | { | 438 | { |
@@ -437,6 +459,17 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, | |||
437 | } | 459 | } |
438 | 460 | ||
439 | 461 | ||
462 | /* Destroy all tx queues */ | ||
463 | void | ||
464 | vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter) | ||
465 | { | ||
466 | int i; | ||
467 | |||
468 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
469 | vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); | ||
470 | } | ||
471 | |||
472 | |||
440 | static void | 473 | static void |
441 | vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, | 474 | vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, |
442 | struct vmxnet3_adapter *adapter) | 475 | struct vmxnet3_adapter *adapter) |
@@ -518,6 +551,14 @@ err: | |||
518 | return -ENOMEM; | 551 | return -ENOMEM; |
519 | } | 552 | } |
520 | 553 | ||
554 | static void | ||
555 | vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter) | ||
556 | { | ||
557 | int i; | ||
558 | |||
559 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
560 | vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); | ||
561 | } | ||
521 | 562 | ||
522 | /* | 563 | /* |
523 | * starting from ring->next2fill, allocate rx buffers for the given ring | 564 | * starting from ring->next2fill, allocate rx buffers for the given ring |
@@ -534,7 +575,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | |||
534 | struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; | 575 | struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; |
535 | u32 val; | 576 | u32 val; |
536 | 577 | ||
537 | while (num_allocated < num_to_alloc) { | 578 | while (num_allocated <= num_to_alloc) { |
538 | struct vmxnet3_rx_buf_info *rbi; | 579 | struct vmxnet3_rx_buf_info *rbi; |
539 | union Vmxnet3_GenericDesc *gd; | 580 | union Vmxnet3_GenericDesc *gd; |
540 | 581 | ||
@@ -580,9 +621,15 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | |||
580 | 621 | ||
581 | BUG_ON(rbi->dma_addr == 0); | 622 | BUG_ON(rbi->dma_addr == 0); |
582 | gd->rxd.addr = cpu_to_le64(rbi->dma_addr); | 623 | gd->rxd.addr = cpu_to_le64(rbi->dma_addr); |
583 | gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT) | 624 | gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) |
584 | | val | rbi->len); | 625 | | val | rbi->len); |
585 | 626 | ||
627 | /* Fill the last buffer but dont mark it ready, or else the | ||
628 | * device will think that the queue is full */ | ||
629 | if (num_allocated == num_to_alloc) | ||
630 | break; | ||
631 | |||
632 | gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); | ||
586 | num_allocated++; | 633 | num_allocated++; |
587 | vmxnet3_cmd_ring_adv_next2fill(ring); | 634 | vmxnet3_cmd_ring_adv_next2fill(ring); |
588 | } | 635 | } |
@@ -732,6 +779,17 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
732 | } | 779 | } |
733 | 780 | ||
734 | 781 | ||
782 | /* Init all tx queues */ | ||
783 | static void | ||
784 | vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) | ||
785 | { | ||
786 | int i; | ||
787 | |||
788 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
789 | vmxnet3_tq_init(&adapter->tx_queue[i], adapter); | ||
790 | } | ||
791 | |||
792 | |||
735 | /* | 793 | /* |
736 | * parse and copy relevant protocol headers: | 794 | * parse and copy relevant protocol headers: |
737 | * For a tso pkt, relevant headers are L2/3/4 including options | 795 | * For a tso pkt, relevant headers are L2/3/4 including options |
@@ -756,36 +814,31 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
756 | { | 814 | { |
757 | struct Vmxnet3_TxDataDesc *tdd; | 815 | struct Vmxnet3_TxDataDesc *tdd; |
758 | 816 | ||
759 | if (ctx->mss) { | 817 | if (ctx->mss) { /* TSO */ |
760 | ctx->eth_ip_hdr_size = skb_transport_offset(skb); | 818 | ctx->eth_ip_hdr_size = skb_transport_offset(skb); |
761 | ctx->l4_hdr_size = ((struct tcphdr *) | 819 | ctx->l4_hdr_size = ((struct tcphdr *) |
762 | skb_transport_header(skb))->doff * 4; | 820 | skb_transport_header(skb))->doff * 4; |
763 | ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; | 821 | ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; |
764 | } else { | 822 | } else { |
765 | unsigned int pull_size; | ||
766 | |||
767 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 823 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
768 | ctx->eth_ip_hdr_size = skb_transport_offset(skb); | 824 | ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); |
769 | 825 | ||
770 | if (ctx->ipv4) { | 826 | if (ctx->ipv4) { |
771 | struct iphdr *iph = (struct iphdr *) | 827 | struct iphdr *iph = (struct iphdr *) |
772 | skb_network_header(skb); | 828 | skb_network_header(skb); |
773 | if (iph->protocol == IPPROTO_TCP) { | 829 | if (iph->protocol == IPPROTO_TCP) |
774 | pull_size = ctx->eth_ip_hdr_size + | ||
775 | sizeof(struct tcphdr); | ||
776 | |||
777 | if (unlikely(!pskb_may_pull(skb, | ||
778 | pull_size))) { | ||
779 | goto err; | ||
780 | } | ||
781 | ctx->l4_hdr_size = ((struct tcphdr *) | 830 | ctx->l4_hdr_size = ((struct tcphdr *) |
782 | skb_transport_header(skb))->doff * 4; | 831 | skb_transport_header(skb))->doff * 4; |
783 | } else if (iph->protocol == IPPROTO_UDP) { | 832 | else if (iph->protocol == IPPROTO_UDP) |
833 | /* | ||
834 | * Use tcp header size so that bytes to | ||
835 | * be copied are more than required by | ||
836 | * the device. | ||
837 | */ | ||
784 | ctx->l4_hdr_size = | 838 | ctx->l4_hdr_size = |
785 | sizeof(struct udphdr); | 839 | sizeof(struct tcphdr); |
786 | } else { | 840 | else |
787 | ctx->l4_hdr_size = 0; | 841 | ctx->l4_hdr_size = 0; |
788 | } | ||
789 | } else { | 842 | } else { |
790 | /* for simplicity, don't copy L4 headers */ | 843 | /* for simplicity, don't copy L4 headers */ |
791 | ctx->l4_hdr_size = 0; | 844 | ctx->l4_hdr_size = 0; |
@@ -846,7 +899,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb, | |||
846 | * Transmits a pkt thru a given tq | 899 | * Transmits a pkt thru a given tq |
847 | * Returns: | 900 | * Returns: |
848 | * NETDEV_TX_OK: descriptors are setup successfully | 901 | * NETDEV_TX_OK: descriptors are setup successfully |
849 | * NETDEV_TX_OK: error occured, the pkt is dropped | 902 | * NETDEV_TX_OK: error occurred, the pkt is dropped |
850 | * NETDEV_TX_BUSY: tx ring is full, queue is stopped | 903 | * NETDEV_TX_BUSY: tx ring is full, queue is stopped |
851 | * | 904 | * |
852 | * Side-effects: | 905 | * Side-effects: |
@@ -873,7 +926,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
873 | count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + | 926 | count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + |
874 | skb_shinfo(skb)->nr_frags + 1; | 927 | skb_shinfo(skb)->nr_frags + 1; |
875 | 928 | ||
876 | ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP)); | 929 | ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP)); |
877 | 930 | ||
878 | ctx.mss = skb_shinfo(skb)->gso_size; | 931 | ctx.mss = skb_shinfo(skb)->gso_size; |
879 | if (ctx.mss) { | 932 | if (ctx.mss) { |
@@ -903,6 +956,21 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
903 | } | 956 | } |
904 | } | 957 | } |
905 | 958 | ||
959 | spin_lock_irqsave(&tq->tx_lock, flags); | ||
960 | |||
961 | if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { | ||
962 | tq->stats.tx_ring_full++; | ||
963 | dev_dbg(&adapter->netdev->dev, | ||
964 | "tx queue stopped on %s, next2comp %u" | ||
965 | " next2fill %u\n", adapter->netdev->name, | ||
966 | tq->tx_ring.next2comp, tq->tx_ring.next2fill); | ||
967 | |||
968 | vmxnet3_tq_stop(tq, adapter); | ||
969 | spin_unlock_irqrestore(&tq->tx_lock, flags); | ||
970 | return NETDEV_TX_BUSY; | ||
971 | } | ||
972 | |||
973 | |||
906 | ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); | 974 | ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); |
907 | if (ret >= 0) { | 975 | if (ret >= 0) { |
908 | BUG_ON(ret <= 0 && ctx.copy_size != 0); | 976 | BUG_ON(ret <= 0 && ctx.copy_size != 0); |
@@ -923,21 +991,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
923 | } | 991 | } |
924 | } else { | 992 | } else { |
925 | tq->stats.drop_hdr_inspect_err++; | 993 | tq->stats.drop_hdr_inspect_err++; |
926 | goto drop_pkt; | 994 | goto unlock_drop_pkt; |
927 | } | ||
928 | |||
929 | spin_lock_irqsave(&tq->tx_lock, flags); | ||
930 | |||
931 | if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { | ||
932 | tq->stats.tx_ring_full++; | ||
933 | dev_dbg(&adapter->netdev->dev, | ||
934 | "tx queue stopped on %s, next2comp %u" | ||
935 | " next2fill %u\n", adapter->netdev->name, | ||
936 | tq->tx_ring.next2comp, tq->tx_ring.next2fill); | ||
937 | |||
938 | vmxnet3_tq_stop(tq, adapter); | ||
939 | spin_unlock_irqrestore(&tq->tx_lock, flags); | ||
940 | return NETDEV_TX_BUSY; | ||
941 | } | 995 | } |
942 | 996 | ||
943 | /* fill tx descs related to addr & len */ | 997 | /* fill tx descs related to addr & len */ |
@@ -1000,7 +1054,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
1000 | if (le32_to_cpu(tq->shared->txNumDeferred) >= | 1054 | if (le32_to_cpu(tq->shared->txNumDeferred) >= |
1001 | le32_to_cpu(tq->shared->txThreshold)) { | 1055 | le32_to_cpu(tq->shared->txThreshold)) { |
1002 | tq->shared->txNumDeferred = 0; | 1056 | tq->shared->txNumDeferred = 0; |
1003 | VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, | 1057 | VMXNET3_WRITE_BAR0_REG(adapter, |
1058 | VMXNET3_REG_TXPROD + tq->qid * 8, | ||
1004 | tq->tx_ring.next2fill); | 1059 | tq->tx_ring.next2fill); |
1005 | } | 1060 | } |
1006 | 1061 | ||
@@ -1008,6 +1063,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
1008 | 1063 | ||
1009 | hdr_too_big: | 1064 | hdr_too_big: |
1010 | tq->stats.drop_oversized_hdr++; | 1065 | tq->stats.drop_oversized_hdr++; |
1066 | unlock_drop_pkt: | ||
1067 | spin_unlock_irqrestore(&tq->tx_lock, flags); | ||
1011 | drop_pkt: | 1068 | drop_pkt: |
1012 | tq->stats.drop_total++; | 1069 | tq->stats.drop_total++; |
1013 | dev_kfree_skb(skb); | 1070 | dev_kfree_skb(skb); |
@@ -1020,7 +1077,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1020 | { | 1077 | { |
1021 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1078 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1022 | 1079 | ||
1023 | return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev); | 1080 | BUG_ON(skb->queue_mapping > adapter->num_tx_queues); |
1081 | return vmxnet3_tq_xmit(skb, | ||
1082 | &adapter->tx_queue[skb->queue_mapping], | ||
1083 | adapter, netdev); | ||
1024 | } | 1084 | } |
1025 | 1085 | ||
1026 | 1086 | ||
@@ -1029,7 +1089,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, | |||
1029 | struct sk_buff *skb, | 1089 | struct sk_buff *skb, |
1030 | union Vmxnet3_GenericDesc *gdesc) | 1090 | union Vmxnet3_GenericDesc *gdesc) |
1031 | { | 1091 | { |
1032 | if (!gdesc->rcd.cnc && adapter->rxcsum) { | 1092 | if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { |
1033 | /* typical case: TCP/UDP over IP and both csums are correct */ | 1093 | /* typical case: TCP/UDP over IP and both csums are correct */ |
1034 | if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == | 1094 | if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == |
1035 | VMXNET3_RCD_CSUM_OK) { | 1095 | VMXNET3_RCD_CSUM_OK) { |
@@ -1042,11 +1102,11 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, | |||
1042 | skb->csum = htons(gdesc->rcd.csum); | 1102 | skb->csum = htons(gdesc->rcd.csum); |
1043 | skb->ip_summed = CHECKSUM_PARTIAL; | 1103 | skb->ip_summed = CHECKSUM_PARTIAL; |
1044 | } else { | 1104 | } else { |
1045 | skb->ip_summed = CHECKSUM_NONE; | 1105 | skb_checksum_none_assert(skb); |
1046 | } | 1106 | } |
1047 | } | 1107 | } |
1048 | } else { | 1108 | } else { |
1049 | skb->ip_summed = CHECKSUM_NONE; | 1109 | skb_checksum_none_assert(skb); |
1050 | } | 1110 | } |
1051 | } | 1111 | } |
1052 | 1112 | ||
@@ -1082,8 +1142,11 @@ static int | |||
1082 | vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | 1142 | vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, |
1083 | struct vmxnet3_adapter *adapter, int quota) | 1143 | struct vmxnet3_adapter *adapter, int quota) |
1084 | { | 1144 | { |
1085 | static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; | 1145 | static const u32 rxprod_reg[2] = { |
1146 | VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 | ||
1147 | }; | ||
1086 | u32 num_rxd = 0; | 1148 | u32 num_rxd = 0; |
1149 | bool skip_page_frags = false; | ||
1087 | struct Vmxnet3_RxCompDesc *rcd; | 1150 | struct Vmxnet3_RxCompDesc *rcd; |
1088 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; | 1151 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; |
1089 | #ifdef __BIG_ENDIAN_BITFIELD | 1152 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -1094,11 +1157,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1094 | &rxComp); | 1157 | &rxComp); |
1095 | while (rcd->gen == rq->comp_ring.gen) { | 1158 | while (rcd->gen == rq->comp_ring.gen) { |
1096 | struct vmxnet3_rx_buf_info *rbi; | 1159 | struct vmxnet3_rx_buf_info *rbi; |
1097 | struct sk_buff *skb; | 1160 | struct sk_buff *skb, *new_skb = NULL; |
1161 | struct page *new_page = NULL; | ||
1098 | int num_to_alloc; | 1162 | int num_to_alloc; |
1099 | struct Vmxnet3_RxDesc *rxd; | 1163 | struct Vmxnet3_RxDesc *rxd; |
1100 | u32 idx, ring_idx; | 1164 | u32 idx, ring_idx; |
1101 | 1165 | struct vmxnet3_cmd_ring *ring = NULL; | |
1102 | if (num_rxd >= quota) { | 1166 | if (num_rxd >= quota) { |
1103 | /* we may stop even before we see the EOP desc of | 1167 | /* we may stop even before we see the EOP desc of |
1104 | * the current pkt | 1168 | * the current pkt |
@@ -1106,9 +1170,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1106 | break; | 1170 | break; |
1107 | } | 1171 | } |
1108 | num_rxd++; | 1172 | num_rxd++; |
1109 | 1173 | BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); | |
1110 | idx = rcd->rxdIdx; | 1174 | idx = rcd->rxdIdx; |
1111 | ring_idx = rcd->rqID == rq->qid ? 0 : 1; | 1175 | ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; |
1176 | ring = rq->rx_ring + ring_idx; | ||
1112 | vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, | 1177 | vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, |
1113 | &rxCmdDesc); | 1178 | &rxCmdDesc); |
1114 | rbi = rq->buf_info[ring_idx] + idx; | 1179 | rbi = rq->buf_info[ring_idx] + idx; |
@@ -1137,37 +1202,80 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1137 | goto rcd_done; | 1202 | goto rcd_done; |
1138 | } | 1203 | } |
1139 | 1204 | ||
1205 | skip_page_frags = false; | ||
1140 | ctx->skb = rbi->skb; | 1206 | ctx->skb = rbi->skb; |
1141 | rbi->skb = NULL; | 1207 | new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN); |
1208 | if (new_skb == NULL) { | ||
1209 | /* Skb allocation failed, do not handover this | ||
1210 | * skb to stack. Reuse it. Drop the existing pkt | ||
1211 | */ | ||
1212 | rq->stats.rx_buf_alloc_failure++; | ||
1213 | ctx->skb = NULL; | ||
1214 | rq->stats.drop_total++; | ||
1215 | skip_page_frags = true; | ||
1216 | goto rcd_done; | ||
1217 | } | ||
1142 | 1218 | ||
1143 | pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, | 1219 | pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, |
1144 | PCI_DMA_FROMDEVICE); | 1220 | PCI_DMA_FROMDEVICE); |
1145 | 1221 | ||
1146 | skb_put(ctx->skb, rcd->len); | 1222 | skb_put(ctx->skb, rcd->len); |
1223 | |||
1224 | /* Immediate refill */ | ||
1225 | new_skb->dev = adapter->netdev; | ||
1226 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1227 | rbi->skb = new_skb; | ||
1228 | rbi->dma_addr = pci_map_single(adapter->pdev, | ||
1229 | rbi->skb->data, rbi->len, | ||
1230 | PCI_DMA_FROMDEVICE); | ||
1231 | rxd->addr = cpu_to_le64(rbi->dma_addr); | ||
1232 | rxd->len = rbi->len; | ||
1233 | |||
1147 | } else { | 1234 | } else { |
1148 | BUG_ON(ctx->skb == NULL); | 1235 | BUG_ON(ctx->skb == NULL && !skip_page_frags); |
1236 | |||
1149 | /* non SOP buffer must be type 1 in most cases */ | 1237 | /* non SOP buffer must be type 1 in most cases */ |
1150 | if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { | 1238 | BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); |
1151 | BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); | 1239 | BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); |
1152 | 1240 | ||
1153 | if (rcd->len) { | 1241 | /* If an sop buffer was dropped, skip all |
1154 | pci_unmap_page(adapter->pdev, | 1242 | * following non-sop fragments. They will be reused. |
1155 | rbi->dma_addr, rbi->len, | 1243 | */ |
1156 | PCI_DMA_FROMDEVICE); | 1244 | if (skip_page_frags) |
1245 | goto rcd_done; | ||
1157 | 1246 | ||
1158 | vmxnet3_append_frag(ctx->skb, rcd, rbi); | 1247 | new_page = alloc_page(GFP_ATOMIC); |
1159 | rbi->page = NULL; | 1248 | if (unlikely(new_page == NULL)) { |
1160 | } | 1249 | /* Replacement page frag could not be allocated. |
1161 | } else { | 1250 | * Reuse this page. Drop the pkt and free the |
1162 | /* | 1251 | * skb which contained this page as a frag. Skip |
1163 | * The only time a non-SOP buffer is type 0 is | 1252 | * processing all the following non-sop frags. |
1164 | * when it's EOP and error flag is raised, which | ||
1165 | * has already been handled. | ||
1166 | */ | 1253 | */ |
1167 | BUG_ON(true); | 1254 | rq->stats.rx_buf_alloc_failure++; |
1255 | dev_kfree_skb(ctx->skb); | ||
1256 | ctx->skb = NULL; | ||
1257 | skip_page_frags = true; | ||
1258 | goto rcd_done; | ||
1168 | } | 1259 | } |
1260 | |||
1261 | if (rcd->len) { | ||
1262 | pci_unmap_page(adapter->pdev, | ||
1263 | rbi->dma_addr, rbi->len, | ||
1264 | PCI_DMA_FROMDEVICE); | ||
1265 | |||
1266 | vmxnet3_append_frag(ctx->skb, rcd, rbi); | ||
1267 | } | ||
1268 | |||
1269 | /* Immediate refill */ | ||
1270 | rbi->page = new_page; | ||
1271 | rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page, | ||
1272 | 0, PAGE_SIZE, | ||
1273 | PCI_DMA_FROMDEVICE); | ||
1274 | rxd->addr = cpu_to_le64(rbi->dma_addr); | ||
1275 | rxd->len = rbi->len; | ||
1169 | } | 1276 | } |
1170 | 1277 | ||
1278 | |||
1171 | skb = ctx->skb; | 1279 | skb = ctx->skb; |
1172 | if (rcd->eop) { | 1280 | if (rcd->eop) { |
1173 | skb->len += skb->data_len; | 1281 | skb->len += skb->data_len; |
@@ -1188,26 +1296,27 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1188 | } | 1296 | } |
1189 | 1297 | ||
1190 | rcd_done: | 1298 | rcd_done: |
1191 | /* device may skip some rx descs */ | 1299 | /* device may have skipped some rx descs */ |
1192 | rq->rx_ring[ring_idx].next2comp = idx; | 1300 | ring->next2comp = idx; |
1193 | VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, | 1301 | num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); |
1194 | rq->rx_ring[ring_idx].size); | 1302 | ring = rq->rx_ring + ring_idx; |
1195 | 1303 | while (num_to_alloc) { | |
1196 | /* refill rx buffers frequently to avoid starving the h/w */ | 1304 | vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, |
1197 | num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + | 1305 | &rxCmdDesc); |
1198 | ring_idx); | 1306 | BUG_ON(!rxd->addr); |
1199 | if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, | 1307 | |
1200 | ring_idx, adapter))) { | 1308 | /* Recv desc is ready to be used by the device */ |
1201 | vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, | 1309 | rxd->gen = ring->gen; |
1202 | adapter); | 1310 | vmxnet3_cmd_ring_adv_next2fill(ring); |
1203 | 1311 | num_to_alloc--; | |
1204 | /* if needed, update the register */ | 1312 | } |
1205 | if (unlikely(rq->shared->updateRxProd)) { | 1313 | |
1206 | VMXNET3_WRITE_BAR0_REG(adapter, | 1314 | /* if needed, update the register */ |
1207 | rxprod_reg[ring_idx] + rq->qid * 8, | 1315 | if (unlikely(rq->shared->updateRxProd)) { |
1208 | rq->rx_ring[ring_idx].next2fill); | 1316 | VMXNET3_WRITE_BAR0_REG(adapter, |
1209 | rq->uncommitted[ring_idx] = 0; | 1317 | rxprod_reg[ring_idx] + rq->qid * 8, |
1210 | } | 1318 | ring->next2fill); |
1319 | rq->uncommitted[ring_idx] = 0; | ||
1211 | } | 1320 | } |
1212 | 1321 | ||
1213 | vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); | 1322 | vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); |
@@ -1260,6 +1369,16 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, | |||
1260 | } | 1369 | } |
1261 | 1370 | ||
1262 | 1371 | ||
1372 | static void | ||
1373 | vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) | ||
1374 | { | ||
1375 | int i; | ||
1376 | |||
1377 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
1378 | vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); | ||
1379 | } | ||
1380 | |||
1381 | |||
1263 | void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, | 1382 | void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, |
1264 | struct vmxnet3_adapter *adapter) | 1383 | struct vmxnet3_adapter *adapter) |
1265 | { | 1384 | { |
@@ -1351,6 +1470,25 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, | |||
1351 | 1470 | ||
1352 | 1471 | ||
1353 | static int | 1472 | static int |
1473 | vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter) | ||
1474 | { | ||
1475 | int i, err = 0; | ||
1476 | |||
1477 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1478 | err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); | ||
1479 | if (unlikely(err)) { | ||
1480 | dev_err(&adapter->netdev->dev, "%s: failed to " | ||
1481 | "initialize rx queue%i\n", | ||
1482 | adapter->netdev->name, i); | ||
1483 | break; | ||
1484 | } | ||
1485 | } | ||
1486 | return err; | ||
1487 | |||
1488 | } | ||
1489 | |||
1490 | |||
1491 | static int | ||
1354 | vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) | 1492 | vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) |
1355 | { | 1493 | { |
1356 | int i; | 1494 | int i; |
@@ -1398,32 +1536,176 @@ err: | |||
1398 | 1536 | ||
1399 | 1537 | ||
1400 | static int | 1538 | static int |
1539 | vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) | ||
1540 | { | ||
1541 | int i, err = 0; | ||
1542 | |||
1543 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1544 | err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); | ||
1545 | if (unlikely(err)) { | ||
1546 | dev_err(&adapter->netdev->dev, | ||
1547 | "%s: failed to create rx queue%i\n", | ||
1548 | adapter->netdev->name, i); | ||
1549 | goto err_out; | ||
1550 | } | ||
1551 | } | ||
1552 | return err; | ||
1553 | err_out: | ||
1554 | vmxnet3_rq_destroy_all(adapter); | ||
1555 | return err; | ||
1556 | |||
1557 | } | ||
1558 | |||
1559 | /* Multiple queue aware polling function for tx and rx */ | ||
1560 | |||
1561 | static int | ||
1401 | vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) | 1562 | vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) |
1402 | { | 1563 | { |
1564 | int rcd_done = 0, i; | ||
1403 | if (unlikely(adapter->shared->ecr)) | 1565 | if (unlikely(adapter->shared->ecr)) |
1404 | vmxnet3_process_events(adapter); | 1566 | vmxnet3_process_events(adapter); |
1567 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
1568 | vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); | ||
1405 | 1569 | ||
1406 | vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter); | 1570 | for (i = 0; i < adapter->num_rx_queues; i++) |
1407 | return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget); | 1571 | rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], |
1572 | adapter, budget); | ||
1573 | return rcd_done; | ||
1408 | } | 1574 | } |
1409 | 1575 | ||
1410 | 1576 | ||
1411 | static int | 1577 | static int |
1412 | vmxnet3_poll(struct napi_struct *napi, int budget) | 1578 | vmxnet3_poll(struct napi_struct *napi, int budget) |
1413 | { | 1579 | { |
1414 | struct vmxnet3_adapter *adapter = container_of(napi, | 1580 | struct vmxnet3_rx_queue *rx_queue = container_of(napi, |
1415 | struct vmxnet3_adapter, napi); | 1581 | struct vmxnet3_rx_queue, napi); |
1416 | int rxd_done; | 1582 | int rxd_done; |
1417 | 1583 | ||
1418 | rxd_done = vmxnet3_do_poll(adapter, budget); | 1584 | rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); |
1419 | 1585 | ||
1420 | if (rxd_done < budget) { | 1586 | if (rxd_done < budget) { |
1421 | napi_complete(napi); | 1587 | napi_complete(napi); |
1422 | vmxnet3_enable_intr(adapter, 0); | 1588 | vmxnet3_enable_all_intrs(rx_queue->adapter); |
1423 | } | 1589 | } |
1424 | return rxd_done; | 1590 | return rxd_done; |
1425 | } | 1591 | } |
1426 | 1592 | ||
1593 | /* | ||
1594 | * NAPI polling function for MSI-X mode with multiple Rx queues | ||
1595 | * Returns the # of the NAPI credit consumed (# of rx descriptors processed) | ||
1596 | */ | ||
1597 | |||
1598 | static int | ||
1599 | vmxnet3_poll_rx_only(struct napi_struct *napi, int budget) | ||
1600 | { | ||
1601 | struct vmxnet3_rx_queue *rq = container_of(napi, | ||
1602 | struct vmxnet3_rx_queue, napi); | ||
1603 | struct vmxnet3_adapter *adapter = rq->adapter; | ||
1604 | int rxd_done; | ||
1605 | |||
1606 | /* When sharing interrupt with corresponding tx queue, process | ||
1607 | * tx completions in that queue as well | ||
1608 | */ | ||
1609 | if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { | ||
1610 | struct vmxnet3_tx_queue *tq = | ||
1611 | &adapter->tx_queue[rq - adapter->rx_queue]; | ||
1612 | vmxnet3_tq_tx_complete(tq, adapter); | ||
1613 | } | ||
1614 | |||
1615 | rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); | ||
1616 | |||
1617 | if (rxd_done < budget) { | ||
1618 | napi_complete(napi); | ||
1619 | vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); | ||
1620 | } | ||
1621 | return rxd_done; | ||
1622 | } | ||
1623 | |||
1624 | |||
1625 | #ifdef CONFIG_PCI_MSI | ||
1626 | |||
1627 | /* | ||
1628 | * Handle completion interrupts on tx queues | ||
1629 | * Returns whether or not the intr is handled | ||
1630 | */ | ||
1631 | |||
1632 | static irqreturn_t | ||
1633 | vmxnet3_msix_tx(int irq, void *data) | ||
1634 | { | ||
1635 | struct vmxnet3_tx_queue *tq = data; | ||
1636 | struct vmxnet3_adapter *adapter = tq->adapter; | ||
1637 | |||
1638 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) | ||
1639 | vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); | ||
1640 | |||
1641 | /* Handle the case where only one irq is allocate for all tx queues */ | ||
1642 | if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { | ||
1643 | int i; | ||
1644 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1645 | struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; | ||
1646 | vmxnet3_tq_tx_complete(txq, adapter); | ||
1647 | } | ||
1648 | } else { | ||
1649 | vmxnet3_tq_tx_complete(tq, adapter); | ||
1650 | } | ||
1651 | vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); | ||
1652 | |||
1653 | return IRQ_HANDLED; | ||
1654 | } | ||
1655 | |||
1656 | |||
1657 | /* | ||
1658 | * Handle completion interrupts on rx queues. Returns whether or not the | ||
1659 | * intr is handled | ||
1660 | */ | ||
1661 | |||
1662 | static irqreturn_t | ||
1663 | vmxnet3_msix_rx(int irq, void *data) | ||
1664 | { | ||
1665 | struct vmxnet3_rx_queue *rq = data; | ||
1666 | struct vmxnet3_adapter *adapter = rq->adapter; | ||
1667 | |||
1668 | /* disable intr if needed */ | ||
1669 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) | ||
1670 | vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); | ||
1671 | napi_schedule(&rq->napi); | ||
1672 | |||
1673 | return IRQ_HANDLED; | ||
1674 | } | ||
1675 | |||
1676 | /* | ||
1677 | *---------------------------------------------------------------------------- | ||
1678 | * | ||
1679 | * vmxnet3_msix_event -- | ||
1680 | * | ||
1681 | * vmxnet3 msix event intr handler | ||
1682 | * | ||
1683 | * Result: | ||
1684 | * whether or not the intr is handled | ||
1685 | * | ||
1686 | *---------------------------------------------------------------------------- | ||
1687 | */ | ||
1688 | |||
1689 | static irqreturn_t | ||
1690 | vmxnet3_msix_event(int irq, void *data) | ||
1691 | { | ||
1692 | struct net_device *dev = data; | ||
1693 | struct vmxnet3_adapter *adapter = netdev_priv(dev); | ||
1694 | |||
1695 | /* disable intr if needed */ | ||
1696 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) | ||
1697 | vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); | ||
1698 | |||
1699 | if (adapter->shared->ecr) | ||
1700 | vmxnet3_process_events(adapter); | ||
1701 | |||
1702 | vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); | ||
1703 | |||
1704 | return IRQ_HANDLED; | ||
1705 | } | ||
1706 | |||
1707 | #endif /* CONFIG_PCI_MSI */ | ||
1708 | |||
1427 | 1709 | ||
1428 | /* Interrupt handler for vmxnet3 */ | 1710 | /* Interrupt handler for vmxnet3 */ |
1429 | static irqreturn_t | 1711 | static irqreturn_t |
@@ -1432,7 +1714,7 @@ vmxnet3_intr(int irq, void *dev_id) | |||
1432 | struct net_device *dev = dev_id; | 1714 | struct net_device *dev = dev_id; |
1433 | struct vmxnet3_adapter *adapter = netdev_priv(dev); | 1715 | struct vmxnet3_adapter *adapter = netdev_priv(dev); |
1434 | 1716 | ||
1435 | if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) { | 1717 | if (adapter->intr.type == VMXNET3_IT_INTX) { |
1436 | u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); | 1718 | u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); |
1437 | if (unlikely(icr == 0)) | 1719 | if (unlikely(icr == 0)) |
1438 | /* not ours */ | 1720 | /* not ours */ |
@@ -1442,77 +1724,144 @@ vmxnet3_intr(int irq, void *dev_id) | |||
1442 | 1724 | ||
1443 | /* disable intr if needed */ | 1725 | /* disable intr if needed */ |
1444 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) | 1726 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) |
1445 | vmxnet3_disable_intr(adapter, 0); | 1727 | vmxnet3_disable_all_intrs(adapter); |
1446 | 1728 | ||
1447 | napi_schedule(&adapter->napi); | 1729 | napi_schedule(&adapter->rx_queue[0].napi); |
1448 | 1730 | ||
1449 | return IRQ_HANDLED; | 1731 | return IRQ_HANDLED; |
1450 | } | 1732 | } |
1451 | 1733 | ||
1452 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1734 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1453 | 1735 | ||
1454 | |||
1455 | /* netpoll callback. */ | 1736 | /* netpoll callback. */ |
1456 | static void | 1737 | static void |
1457 | vmxnet3_netpoll(struct net_device *netdev) | 1738 | vmxnet3_netpoll(struct net_device *netdev) |
1458 | { | 1739 | { |
1459 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1740 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1460 | int irq; | ||
1461 | 1741 | ||
1462 | #ifdef CONFIG_PCI_MSI | 1742 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) |
1463 | if (adapter->intr.type == VMXNET3_IT_MSIX) | 1743 | vmxnet3_disable_all_intrs(adapter); |
1464 | irq = adapter->intr.msix_entries[0].vector; | 1744 | |
1465 | else | 1745 | vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); |
1466 | #endif | 1746 | vmxnet3_enable_all_intrs(adapter); |
1467 | irq = adapter->pdev->irq; | ||
1468 | 1747 | ||
1469 | disable_irq(irq); | ||
1470 | vmxnet3_intr(irq, netdev); | ||
1471 | enable_irq(irq); | ||
1472 | } | 1748 | } |
1473 | #endif | 1749 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
1474 | 1750 | ||
1475 | static int | 1751 | static int |
1476 | vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) | 1752 | vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) |
1477 | { | 1753 | { |
1478 | int err; | 1754 | struct vmxnet3_intr *intr = &adapter->intr; |
1755 | int err = 0, i; | ||
1756 | int vector = 0; | ||
1479 | 1757 | ||
1480 | #ifdef CONFIG_PCI_MSI | 1758 | #ifdef CONFIG_PCI_MSI |
1481 | if (adapter->intr.type == VMXNET3_IT_MSIX) { | 1759 | if (adapter->intr.type == VMXNET3_IT_MSIX) { |
1482 | /* we only use 1 MSI-X vector */ | 1760 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1483 | err = request_irq(adapter->intr.msix_entries[0].vector, | 1761 | if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { |
1484 | vmxnet3_intr, 0, adapter->netdev->name, | 1762 | sprintf(adapter->tx_queue[i].name, "%s-tx-%d", |
1485 | adapter->netdev); | 1763 | adapter->netdev->name, vector); |
1486 | } else if (adapter->intr.type == VMXNET3_IT_MSI) { | 1764 | err = request_irq( |
1765 | intr->msix_entries[vector].vector, | ||
1766 | vmxnet3_msix_tx, 0, | ||
1767 | adapter->tx_queue[i].name, | ||
1768 | &adapter->tx_queue[i]); | ||
1769 | } else { | ||
1770 | sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", | ||
1771 | adapter->netdev->name, vector); | ||
1772 | } | ||
1773 | if (err) { | ||
1774 | dev_err(&adapter->netdev->dev, | ||
1775 | "Failed to request irq for MSIX, %s, " | ||
1776 | "error %d\n", | ||
1777 | adapter->tx_queue[i].name, err); | ||
1778 | return err; | ||
1779 | } | ||
1780 | |||
1781 | /* Handle the case where only 1 MSIx was allocated for | ||
1782 | * all tx queues */ | ||
1783 | if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { | ||
1784 | for (; i < adapter->num_tx_queues; i++) | ||
1785 | adapter->tx_queue[i].comp_ring.intr_idx | ||
1786 | = vector; | ||
1787 | vector++; | ||
1788 | break; | ||
1789 | } else { | ||
1790 | adapter->tx_queue[i].comp_ring.intr_idx | ||
1791 | = vector++; | ||
1792 | } | ||
1793 | } | ||
1794 | if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) | ||
1795 | vector = 0; | ||
1796 | |||
1797 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1798 | if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) | ||
1799 | sprintf(adapter->rx_queue[i].name, "%s-rx-%d", | ||
1800 | adapter->netdev->name, vector); | ||
1801 | else | ||
1802 | sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", | ||
1803 | adapter->netdev->name, vector); | ||
1804 | err = request_irq(intr->msix_entries[vector].vector, | ||
1805 | vmxnet3_msix_rx, 0, | ||
1806 | adapter->rx_queue[i].name, | ||
1807 | &(adapter->rx_queue[i])); | ||
1808 | if (err) { | ||
1809 | printk(KERN_ERR "Failed to request irq for MSIX" | ||
1810 | ", %s, error %d\n", | ||
1811 | adapter->rx_queue[i].name, err); | ||
1812 | return err; | ||
1813 | } | ||
1814 | |||
1815 | adapter->rx_queue[i].comp_ring.intr_idx = vector++; | ||
1816 | } | ||
1817 | |||
1818 | sprintf(intr->event_msi_vector_name, "%s-event-%d", | ||
1819 | adapter->netdev->name, vector); | ||
1820 | err = request_irq(intr->msix_entries[vector].vector, | ||
1821 | vmxnet3_msix_event, 0, | ||
1822 | intr->event_msi_vector_name, adapter->netdev); | ||
1823 | intr->event_intr_idx = vector; | ||
1824 | |||
1825 | } else if (intr->type == VMXNET3_IT_MSI) { | ||
1826 | adapter->num_rx_queues = 1; | ||
1487 | err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, | 1827 | err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, |
1488 | adapter->netdev->name, adapter->netdev); | 1828 | adapter->netdev->name, adapter->netdev); |
1489 | } else | 1829 | } else { |
1490 | #endif | 1830 | #endif |
1491 | { | 1831 | adapter->num_rx_queues = 1; |
1492 | err = request_irq(adapter->pdev->irq, vmxnet3_intr, | 1832 | err = request_irq(adapter->pdev->irq, vmxnet3_intr, |
1493 | IRQF_SHARED, adapter->netdev->name, | 1833 | IRQF_SHARED, adapter->netdev->name, |
1494 | adapter->netdev); | 1834 | adapter->netdev); |
1835 | #ifdef CONFIG_PCI_MSI | ||
1495 | } | 1836 | } |
1496 | 1837 | #endif | |
1497 | if (err) | 1838 | intr->num_intrs = vector + 1; |
1839 | if (err) { | ||
1498 | printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" | 1840 | printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" |
1499 | ":%d\n", adapter->netdev->name, adapter->intr.type, err); | 1841 | ":%d\n", adapter->netdev->name, intr->type, err); |
1842 | } else { | ||
1843 | /* Number of rx queues will not change after this */ | ||
1844 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1845 | struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; | ||
1846 | rq->qid = i; | ||
1847 | rq->qid2 = i + adapter->num_rx_queues; | ||
1848 | } | ||
1500 | 1849 | ||
1501 | 1850 | ||
1502 | if (!err) { | ||
1503 | int i; | ||
1504 | /* init our intr settings */ | ||
1505 | for (i = 0; i < adapter->intr.num_intrs; i++) | ||
1506 | adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE; | ||
1507 | 1851 | ||
1508 | /* next setup intr index for all intr sources */ | 1852 | /* init our intr settings */ |
1509 | adapter->tx_queue.comp_ring.intr_idx = 0; | 1853 | for (i = 0; i < intr->num_intrs; i++) |
1510 | adapter->rx_queue.comp_ring.intr_idx = 0; | 1854 | intr->mod_levels[i] = UPT1_IML_ADAPTIVE; |
1511 | adapter->intr.event_intr_idx = 0; | 1855 | if (adapter->intr.type != VMXNET3_IT_MSIX) { |
1856 | adapter->intr.event_intr_idx = 0; | ||
1857 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
1858 | adapter->tx_queue[i].comp_ring.intr_idx = 0; | ||
1859 | adapter->rx_queue[0].comp_ring.intr_idx = 0; | ||
1860 | } | ||
1512 | 1861 | ||
1513 | printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " | 1862 | printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " |
1514 | "allocated\n", adapter->netdev->name, adapter->intr.type, | 1863 | "allocated\n", adapter->netdev->name, intr->type, |
1515 | adapter->intr.mask_mode, adapter->intr.num_intrs); | 1864 | intr->mask_mode, intr->num_intrs); |
1516 | } | 1865 | } |
1517 | 1866 | ||
1518 | return err; | 1867 | return err; |
@@ -1522,18 +1871,32 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) | |||
1522 | static void | 1871 | static void |
1523 | vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) | 1872 | vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) |
1524 | { | 1873 | { |
1525 | BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO || | 1874 | struct vmxnet3_intr *intr = &adapter->intr; |
1526 | adapter->intr.num_intrs <= 0); | 1875 | BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0); |
1527 | 1876 | ||
1528 | switch (adapter->intr.type) { | 1877 | switch (intr->type) { |
1529 | #ifdef CONFIG_PCI_MSI | 1878 | #ifdef CONFIG_PCI_MSI |
1530 | case VMXNET3_IT_MSIX: | 1879 | case VMXNET3_IT_MSIX: |
1531 | { | 1880 | { |
1532 | int i; | 1881 | int i, vector = 0; |
1882 | |||
1883 | if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { | ||
1884 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1885 | free_irq(intr->msix_entries[vector++].vector, | ||
1886 | &(adapter->tx_queue[i])); | ||
1887 | if (adapter->share_intr == VMXNET3_INTR_TXSHARE) | ||
1888 | break; | ||
1889 | } | ||
1890 | } | ||
1891 | |||
1892 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1893 | free_irq(intr->msix_entries[vector++].vector, | ||
1894 | &(adapter->rx_queue[i])); | ||
1895 | } | ||
1533 | 1896 | ||
1534 | for (i = 0; i < adapter->intr.num_intrs; i++) | 1897 | free_irq(intr->msix_entries[vector].vector, |
1535 | free_irq(adapter->intr.msix_entries[i].vector, | 1898 | adapter->netdev); |
1536 | adapter->netdev); | 1899 | BUG_ON(vector >= intr->num_intrs); |
1537 | break; | 1900 | break; |
1538 | } | 1901 | } |
1539 | #endif | 1902 | #endif |
@@ -1548,42 +1911,20 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) | |||
1548 | } | 1911 | } |
1549 | } | 1912 | } |
1550 | 1913 | ||
1551 | |||
1552 | inline void set_flag_le16(__le16 *data, u16 flag) | ||
1553 | { | ||
1554 | *data = cpu_to_le16(le16_to_cpu(*data) | flag); | ||
1555 | } | ||
1556 | |||
1557 | inline void set_flag_le64(__le64 *data, u64 flag) | ||
1558 | { | ||
1559 | *data = cpu_to_le64(le64_to_cpu(*data) | flag); | ||
1560 | } | ||
1561 | |||
1562 | inline void reset_flag_le64(__le64 *data, u64 flag) | ||
1563 | { | ||
1564 | *data = cpu_to_le64(le64_to_cpu(*data) & ~flag); | ||
1565 | } | ||
1566 | |||
1567 | |||
1568 | static void | 1914 | static void |
1569 | vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | 1915 | vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) |
1570 | { | 1916 | { |
1571 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1917 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1572 | struct Vmxnet3_DriverShared *shared = adapter->shared; | 1918 | struct Vmxnet3_DriverShared *shared = adapter->shared; |
1573 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1919 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1920 | unsigned long flags; | ||
1574 | 1921 | ||
1575 | if (grp) { | 1922 | if (grp) { |
1576 | /* add vlan rx stripping. */ | 1923 | /* add vlan rx stripping. */ |
1577 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { | 1924 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { |
1578 | int i; | 1925 | int i; |
1579 | struct Vmxnet3_DSDevRead *devRead = &shared->devRead; | ||
1580 | adapter->vlan_grp = grp; | 1926 | adapter->vlan_grp = grp; |
1581 | 1927 | ||
1582 | /* update FEATURES to device */ | ||
1583 | set_flag_le64(&devRead->misc.uptFeatures, | ||
1584 | UPT1_F_RXVLAN); | ||
1585 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1586 | VMXNET3_CMD_UPDATE_FEATURE); | ||
1587 | /* | 1928 | /* |
1588 | * Clear entire vfTable; then enable untagged pkts. | 1929 | * Clear entire vfTable; then enable untagged pkts. |
1589 | * Note: setting one entry in vfTable to non-zero turns | 1930 | * Note: setting one entry in vfTable to non-zero turns |
@@ -1593,8 +1934,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1593 | vfTable[i] = 0; | 1934 | vfTable[i] = 0; |
1594 | 1935 | ||
1595 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); | 1936 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); |
1937 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1596 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1938 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1597 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1939 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1940 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1598 | } else { | 1941 | } else { |
1599 | printk(KERN_ERR "%s: vlan_rx_register when device has " | 1942 | printk(KERN_ERR "%s: vlan_rx_register when device has " |
1600 | "no NETIF_F_HW_VLAN_RX\n", netdev->name); | 1943 | "no NETIF_F_HW_VLAN_RX\n", netdev->name); |
@@ -1604,7 +1947,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1604 | struct Vmxnet3_DSDevRead *devRead = &shared->devRead; | 1947 | struct Vmxnet3_DSDevRead *devRead = &shared->devRead; |
1605 | adapter->vlan_grp = NULL; | 1948 | adapter->vlan_grp = NULL; |
1606 | 1949 | ||
1607 | if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) { | 1950 | if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { |
1608 | int i; | 1951 | int i; |
1609 | 1952 | ||
1610 | for (i = 0; i < VMXNET3_VFT_SIZE; i++) { | 1953 | for (i = 0; i < VMXNET3_VFT_SIZE; i++) { |
@@ -1613,14 +1956,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1613 | */ | 1956 | */ |
1614 | vfTable[i] = 0; | 1957 | vfTable[i] = 0; |
1615 | } | 1958 | } |
1959 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1616 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1960 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1617 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1961 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1618 | 1962 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | |
1619 | /* update FEATURES to device */ | ||
1620 | reset_flag_le64(&devRead->misc.uptFeatures, | ||
1621 | UPT1_F_RXVLAN); | ||
1622 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1623 | VMXNET3_CMD_UPDATE_FEATURE); | ||
1624 | } | 1963 | } |
1625 | } | 1964 | } |
1626 | } | 1965 | } |
@@ -1634,7 +1973,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) | |||
1634 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1973 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1635 | bool activeVlan = false; | 1974 | bool activeVlan = false; |
1636 | 1975 | ||
1637 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 1976 | for (vid = 0; vid < VLAN_N_VID; vid++) { |
1638 | if (vlan_group_get_device(adapter->vlan_grp, vid)) { | 1977 | if (vlan_group_get_device(adapter->vlan_grp, vid)) { |
1639 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); | 1978 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); |
1640 | activeVlan = true; | 1979 | activeVlan = true; |
@@ -1653,10 +1992,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
1653 | { | 1992 | { |
1654 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1993 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1655 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1994 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1995 | unsigned long flags; | ||
1656 | 1996 | ||
1657 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); | 1997 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); |
1998 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1658 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1999 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1659 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 2000 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
2001 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1660 | } | 2002 | } |
1661 | 2003 | ||
1662 | 2004 | ||
@@ -1665,10 +2007,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
1665 | { | 2007 | { |
1666 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 2008 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1667 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 2009 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
2010 | unsigned long flags; | ||
1668 | 2011 | ||
1669 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); | 2012 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); |
2013 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1670 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2014 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1671 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 2015 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
2016 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1672 | } | 2017 | } |
1673 | 2018 | ||
1674 | 2019 | ||
@@ -1699,6 +2044,7 @@ static void | |||
1699 | vmxnet3_set_mc(struct net_device *netdev) | 2044 | vmxnet3_set_mc(struct net_device *netdev) |
1700 | { | 2045 | { |
1701 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 2046 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
2047 | unsigned long flags; | ||
1702 | struct Vmxnet3_RxFilterConf *rxConf = | 2048 | struct Vmxnet3_RxFilterConf *rxConf = |
1703 | &adapter->shared->devRead.rxFilterConf; | 2049 | &adapter->shared->devRead.rxFilterConf; |
1704 | u8 *new_table = NULL; | 2050 | u8 *new_table = NULL; |
@@ -1734,6 +2080,7 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
1734 | rxConf->mfTablePA = 0; | 2080 | rxConf->mfTablePA = 0; |
1735 | } | 2081 | } |
1736 | 2082 | ||
2083 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1737 | if (new_mode != rxConf->rxMode) { | 2084 | if (new_mode != rxConf->rxMode) { |
1738 | rxConf->rxMode = cpu_to_le32(new_mode); | 2085 | rxConf->rxMode = cpu_to_le32(new_mode); |
1739 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2086 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
@@ -1742,10 +2089,20 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
1742 | 2089 | ||
1743 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2090 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1744 | VMXNET3_CMD_UPDATE_MAC_FILTERS); | 2091 | VMXNET3_CMD_UPDATE_MAC_FILTERS); |
2092 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1745 | 2093 | ||
1746 | kfree(new_table); | 2094 | kfree(new_table); |
1747 | } | 2095 | } |
1748 | 2096 | ||
2097 | void | ||
2098 | vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter) | ||
2099 | { | ||
2100 | int i; | ||
2101 | |||
2102 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
2103 | vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); | ||
2104 | } | ||
2105 | |||
1749 | 2106 | ||
1750 | /* | 2107 | /* |
1751 | * Set up driver_shared based on settings in adapter. | 2108 | * Set up driver_shared based on settings in adapter. |
@@ -1778,55 +2135,85 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) | |||
1778 | devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); | 2135 | devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); |
1779 | 2136 | ||
1780 | /* set up feature flags */ | 2137 | /* set up feature flags */ |
1781 | if (adapter->rxcsum) | 2138 | if (adapter->netdev->features & NETIF_F_RXCSUM) |
1782 | set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM); | 2139 | devRead->misc.uptFeatures |= UPT1_F_RXCSUM; |
1783 | 2140 | ||
1784 | if (adapter->lro) { | 2141 | if (adapter->netdev->features & NETIF_F_LRO) { |
1785 | set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO); | 2142 | devRead->misc.uptFeatures |= UPT1_F_LRO; |
1786 | devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); | 2143 | devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); |
1787 | } | 2144 | } |
1788 | if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && | 2145 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) |
1789 | adapter->vlan_grp) { | 2146 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; |
1790 | set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN); | ||
1791 | } | ||
1792 | 2147 | ||
1793 | devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); | 2148 | devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); |
1794 | devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); | 2149 | devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); |
1795 | devRead->misc.queueDescLen = cpu_to_le32( | 2150 | devRead->misc.queueDescLen = cpu_to_le32( |
1796 | sizeof(struct Vmxnet3_TxQueueDesc) + | 2151 | adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + |
1797 | sizeof(struct Vmxnet3_RxQueueDesc)); | 2152 | adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); |
1798 | 2153 | ||
1799 | /* tx queue settings */ | 2154 | /* tx queue settings */ |
1800 | BUG_ON(adapter->tx_queue.tx_ring.base == NULL); | 2155 | devRead->misc.numTxQueues = adapter->num_tx_queues; |
1801 | 2156 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
1802 | devRead->misc.numTxQueues = 1; | 2157 | struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; |
1803 | tqc = &adapter->tqd_start->conf; | 2158 | BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); |
1804 | tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA); | 2159 | tqc = &adapter->tqd_start[i].conf; |
1805 | tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA); | 2160 | tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); |
1806 | tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA); | 2161 | tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); |
1807 | tqc->ddPA = cpu_to_le64(virt_to_phys( | 2162 | tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); |
1808 | adapter->tx_queue.buf_info)); | 2163 | tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); |
1809 | tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size); | 2164 | tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); |
1810 | tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size); | 2165 | tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); |
1811 | tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size); | 2166 | tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); |
1812 | tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) * | 2167 | tqc->ddLen = cpu_to_le32( |
1813 | tqc->txRingSize); | 2168 | sizeof(struct vmxnet3_tx_buf_info) * |
1814 | tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; | 2169 | tqc->txRingSize); |
2170 | tqc->intrIdx = tq->comp_ring.intr_idx; | ||
2171 | } | ||
1815 | 2172 | ||
1816 | /* rx queue settings */ | 2173 | /* rx queue settings */ |
1817 | devRead->misc.numRxQueues = 1; | 2174 | devRead->misc.numRxQueues = adapter->num_rx_queues; |
1818 | rqc = &adapter->rqd_start->conf; | 2175 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1819 | rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA); | 2176 | struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; |
1820 | rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA); | 2177 | rqc = &adapter->rqd_start[i].conf; |
1821 | rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA); | 2178 | rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); |
1822 | rqc->ddPA = cpu_to_le64(virt_to_phys( | 2179 | rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); |
1823 | adapter->rx_queue.buf_info)); | 2180 | rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); |
1824 | rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size); | 2181 | rqc->ddPA = cpu_to_le64(virt_to_phys( |
1825 | rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size); | 2182 | rq->buf_info)); |
1826 | rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size); | 2183 | rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); |
1827 | rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) * | 2184 | rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); |
1828 | (rqc->rxRingSize[0] + rqc->rxRingSize[1])); | 2185 | rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); |
1829 | rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; | 2186 | rqc->ddLen = cpu_to_le32( |
2187 | sizeof(struct vmxnet3_rx_buf_info) * | ||
2188 | (rqc->rxRingSize[0] + | ||
2189 | rqc->rxRingSize[1])); | ||
2190 | rqc->intrIdx = rq->comp_ring.intr_idx; | ||
2191 | } | ||
2192 | |||
2193 | #ifdef VMXNET3_RSS | ||
2194 | memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); | ||
2195 | |||
2196 | if (adapter->rss) { | ||
2197 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; | ||
2198 | devRead->misc.uptFeatures |= UPT1_F_RSS; | ||
2199 | devRead->misc.numRxQueues = adapter->num_rx_queues; | ||
2200 | rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | | ||
2201 | UPT1_RSS_HASH_TYPE_IPV4 | | ||
2202 | UPT1_RSS_HASH_TYPE_TCP_IPV6 | | ||
2203 | UPT1_RSS_HASH_TYPE_IPV6; | ||
2204 | rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; | ||
2205 | rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; | ||
2206 | rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; | ||
2207 | get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize); | ||
2208 | for (i = 0; i < rssConf->indTableSize; i++) | ||
2209 | rssConf->indTable[i] = i % adapter->num_rx_queues; | ||
2210 | |||
2211 | devRead->rssConfDesc.confVer = 1; | ||
2212 | devRead->rssConfDesc.confLen = sizeof(*rssConf); | ||
2213 | devRead->rssConfDesc.confPA = virt_to_phys(rssConf); | ||
2214 | } | ||
2215 | |||
2216 | #endif /* VMXNET3_RSS */ | ||
1830 | 2217 | ||
1831 | /* intr settings */ | 2218 | /* intr settings */ |
1832 | devRead->intrConf.autoMask = adapter->intr.mask_mode == | 2219 | devRead->intrConf.autoMask = adapter->intr.mask_mode == |
@@ -1841,6 +2228,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) | |||
1841 | /* rx filter settings */ | 2228 | /* rx filter settings */ |
1842 | devRead->rxFilterConf.rxMode = 0; | 2229 | devRead->rxFilterConf.rxMode = 0; |
1843 | vmxnet3_restore_vlan(adapter); | 2230 | vmxnet3_restore_vlan(adapter); |
2231 | vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); | ||
2232 | |||
1844 | /* the rest are already zeroed */ | 2233 | /* the rest are already zeroed */ |
1845 | } | 2234 | } |
1846 | 2235 | ||
@@ -1848,18 +2237,19 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) | |||
1848 | int | 2237 | int |
1849 | vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | 2238 | vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) |
1850 | { | 2239 | { |
1851 | int err; | 2240 | int err, i; |
1852 | u32 ret; | 2241 | u32 ret; |
2242 | unsigned long flags; | ||
1853 | 2243 | ||
1854 | dev_dbg(&adapter->netdev->dev, | 2244 | dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," |
1855 | "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" | 2245 | " ring sizes %u %u %u\n", adapter->netdev->name, |
1856 | " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, | 2246 | adapter->skb_buf_size, adapter->rx_buf_per_pkt, |
1857 | adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, | 2247 | adapter->tx_queue[0].tx_ring.size, |
1858 | adapter->rx_queue.rx_ring[0].size, | 2248 | adapter->rx_queue[0].rx_ring[0].size, |
1859 | adapter->rx_queue.rx_ring[1].size); | 2249 | adapter->rx_queue[0].rx_ring[1].size); |
1860 | 2250 | ||
1861 | vmxnet3_tq_init(&adapter->tx_queue, adapter); | 2251 | vmxnet3_tq_init_all(adapter); |
1862 | err = vmxnet3_rq_init(&adapter->rx_queue, adapter); | 2252 | err = vmxnet3_rq_init_all(adapter); |
1863 | if (err) { | 2253 | if (err) { |
1864 | printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", | 2254 | printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", |
1865 | adapter->netdev->name, err); | 2255 | adapter->netdev->name, err); |
@@ -1879,9 +2269,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | |||
1879 | adapter->shared_pa)); | 2269 | adapter->shared_pa)); |
1880 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( | 2270 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( |
1881 | adapter->shared_pa)); | 2271 | adapter->shared_pa)); |
2272 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1882 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2273 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1883 | VMXNET3_CMD_ACTIVATE_DEV); | 2274 | VMXNET3_CMD_ACTIVATE_DEV); |
1884 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 2275 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
2276 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1885 | 2277 | ||
1886 | if (ret != 0) { | 2278 | if (ret != 0) { |
1887 | printk(KERN_ERR "Failed to activate dev %s: error %u\n", | 2279 | printk(KERN_ERR "Failed to activate dev %s: error %u\n", |
@@ -1889,10 +2281,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | |||
1889 | err = -EINVAL; | 2281 | err = -EINVAL; |
1890 | goto activate_err; | 2282 | goto activate_err; |
1891 | } | 2283 | } |
1892 | VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD, | 2284 | |
1893 | adapter->rx_queue.rx_ring[0].next2fill); | 2285 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1894 | VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2, | 2286 | VMXNET3_WRITE_BAR0_REG(adapter, |
1895 | adapter->rx_queue.rx_ring[1].next2fill); | 2287 | VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN, |
2288 | adapter->rx_queue[i].rx_ring[0].next2fill); | ||
2289 | VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 + | ||
2290 | (i * VMXNET3_REG_ALIGN)), | ||
2291 | adapter->rx_queue[i].rx_ring[1].next2fill); | ||
2292 | } | ||
1896 | 2293 | ||
1897 | /* Apply the rx filter settins last. */ | 2294 | /* Apply the rx filter settins last. */ |
1898 | vmxnet3_set_mc(adapter->netdev); | 2295 | vmxnet3_set_mc(adapter->netdev); |
@@ -1902,8 +2299,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | |||
1902 | * tx queue if the link is up. | 2299 | * tx queue if the link is up. |
1903 | */ | 2300 | */ |
1904 | vmxnet3_check_link(adapter, true); | 2301 | vmxnet3_check_link(adapter, true); |
1905 | 2302 | for (i = 0; i < adapter->num_rx_queues; i++) | |
1906 | napi_enable(&adapter->napi); | 2303 | napi_enable(&adapter->rx_queue[i].napi); |
1907 | vmxnet3_enable_all_intrs(adapter); | 2304 | vmxnet3_enable_all_intrs(adapter); |
1908 | clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); | 2305 | clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); |
1909 | return 0; | 2306 | return 0; |
@@ -1915,7 +2312,7 @@ activate_err: | |||
1915 | irq_err: | 2312 | irq_err: |
1916 | rq_err: | 2313 | rq_err: |
1917 | /* free up buffers we allocated */ | 2314 | /* free up buffers we allocated */ |
1918 | vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); | 2315 | vmxnet3_rq_cleanup_all(adapter); |
1919 | return err; | 2316 | return err; |
1920 | } | 2317 | } |
1921 | 2318 | ||
@@ -1923,28 +2320,36 @@ rq_err: | |||
1923 | void | 2320 | void |
1924 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) | 2321 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) |
1925 | { | 2322 | { |
2323 | unsigned long flags; | ||
2324 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1926 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); | 2325 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); |
2326 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1927 | } | 2327 | } |
1928 | 2328 | ||
1929 | 2329 | ||
1930 | int | 2330 | int |
1931 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) | 2331 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) |
1932 | { | 2332 | { |
2333 | int i; | ||
2334 | unsigned long flags; | ||
1933 | if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) | 2335 | if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) |
1934 | return 0; | 2336 | return 0; |
1935 | 2337 | ||
1936 | 2338 | ||
2339 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1937 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2340 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1938 | VMXNET3_CMD_QUIESCE_DEV); | 2341 | VMXNET3_CMD_QUIESCE_DEV); |
2342 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1939 | vmxnet3_disable_all_intrs(adapter); | 2343 | vmxnet3_disable_all_intrs(adapter); |
1940 | 2344 | ||
1941 | napi_disable(&adapter->napi); | 2345 | for (i = 0; i < adapter->num_rx_queues; i++) |
2346 | napi_disable(&adapter->rx_queue[i].napi); | ||
1942 | netif_tx_disable(adapter->netdev); | 2347 | netif_tx_disable(adapter->netdev); |
1943 | adapter->link_speed = 0; | 2348 | adapter->link_speed = 0; |
1944 | netif_carrier_off(adapter->netdev); | 2349 | netif_carrier_off(adapter->netdev); |
1945 | 2350 | ||
1946 | vmxnet3_tq_cleanup(&adapter->tx_queue, adapter); | 2351 | vmxnet3_tq_cleanup_all(adapter); |
1947 | vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); | 2352 | vmxnet3_rq_cleanup_all(adapter); |
1948 | vmxnet3_free_irqs(adapter); | 2353 | vmxnet3_free_irqs(adapter); |
1949 | return 0; | 2354 | return 0; |
1950 | } | 2355 | } |
@@ -2066,7 +2471,9 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) | |||
2066 | static void | 2471 | static void |
2067 | vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) | 2472 | vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) |
2068 | { | 2473 | { |
2069 | size_t sz; | 2474 | size_t sz, i, ring0_size, ring1_size, comp_size; |
2475 | struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0]; | ||
2476 | |||
2070 | 2477 | ||
2071 | if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - | 2478 | if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - |
2072 | VMXNET3_MAX_ETH_HDR_SIZE) { | 2479 | VMXNET3_MAX_ETH_HDR_SIZE) { |
@@ -2088,11 +2495,19 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) | |||
2088 | * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN | 2495 | * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN |
2089 | */ | 2496 | */ |
2090 | sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; | 2497 | sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; |
2091 | adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size + | 2498 | ring0_size = adapter->rx_queue[0].rx_ring[0].size; |
2092 | sz - 1) / sz * sz; | 2499 | ring0_size = (ring0_size + sz - 1) / sz * sz; |
2093 | adapter->rx_queue.rx_ring[0].size = min_t(u32, | 2500 | ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / |
2094 | adapter->rx_queue.rx_ring[0].size, | 2501 | sz * sz); |
2095 | VMXNET3_RX_RING_MAX_SIZE / sz * sz); | 2502 | ring1_size = adapter->rx_queue[0].rx_ring[1].size; |
2503 | comp_size = ring0_size + ring1_size; | ||
2504 | |||
2505 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
2506 | rq = &adapter->rx_queue[i]; | ||
2507 | rq->rx_ring[0].size = ring0_size; | ||
2508 | rq->rx_ring[1].size = ring1_size; | ||
2509 | rq->comp_ring.size = comp_size; | ||
2510 | } | ||
2096 | } | 2511 | } |
2097 | 2512 | ||
2098 | 2513 | ||
@@ -2100,29 +2515,53 @@ int | |||
2100 | vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, | 2515 | vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, |
2101 | u32 rx_ring_size, u32 rx_ring2_size) | 2516 | u32 rx_ring_size, u32 rx_ring2_size) |
2102 | { | 2517 | { |
2103 | int err; | 2518 | int err = 0, i; |
2104 | 2519 | ||
2105 | adapter->tx_queue.tx_ring.size = tx_ring_size; | 2520 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2106 | adapter->tx_queue.data_ring.size = tx_ring_size; | 2521 | struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; |
2107 | adapter->tx_queue.comp_ring.size = tx_ring_size; | 2522 | tq->tx_ring.size = tx_ring_size; |
2108 | adapter->tx_queue.shared = &adapter->tqd_start->ctrl; | 2523 | tq->data_ring.size = tx_ring_size; |
2109 | adapter->tx_queue.stopped = true; | 2524 | tq->comp_ring.size = tx_ring_size; |
2110 | err = vmxnet3_tq_create(&adapter->tx_queue, adapter); | 2525 | tq->shared = &adapter->tqd_start[i].ctrl; |
2111 | if (err) | 2526 | tq->stopped = true; |
2112 | return err; | 2527 | tq->adapter = adapter; |
2528 | tq->qid = i; | ||
2529 | err = vmxnet3_tq_create(tq, adapter); | ||
2530 | /* | ||
2531 | * Too late to change num_tx_queues. We cannot do away with | ||
2532 | * lesser number of queues than what we asked for | ||
2533 | */ | ||
2534 | if (err) | ||
2535 | goto queue_err; | ||
2536 | } | ||
2113 | 2537 | ||
2114 | adapter->rx_queue.rx_ring[0].size = rx_ring_size; | 2538 | adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; |
2115 | adapter->rx_queue.rx_ring[1].size = rx_ring2_size; | 2539 | adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; |
2116 | vmxnet3_adjust_rx_ring_size(adapter); | 2540 | vmxnet3_adjust_rx_ring_size(adapter); |
2117 | adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size + | 2541 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2118 | adapter->rx_queue.rx_ring[1].size; | 2542 | struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; |
2119 | adapter->rx_queue.qid = 0; | 2543 | /* qid and qid2 for rx queues will be assigned later when num |
2120 | adapter->rx_queue.qid2 = 1; | 2544 | * of rx queues is finalized after allocating intrs */ |
2121 | adapter->rx_queue.shared = &adapter->rqd_start->ctrl; | 2545 | rq->shared = &adapter->rqd_start[i].ctrl; |
2122 | err = vmxnet3_rq_create(&adapter->rx_queue, adapter); | 2546 | rq->adapter = adapter; |
2123 | if (err) | 2547 | err = vmxnet3_rq_create(rq, adapter); |
2124 | vmxnet3_tq_destroy(&adapter->tx_queue, adapter); | 2548 | if (err) { |
2125 | 2549 | if (i == 0) { | |
2550 | printk(KERN_ERR "Could not allocate any rx" | ||
2551 | "queues. Aborting.\n"); | ||
2552 | goto queue_err; | ||
2553 | } else { | ||
2554 | printk(KERN_INFO "Number of rx queues changed " | ||
2555 | "to : %d.\n", i); | ||
2556 | adapter->num_rx_queues = i; | ||
2557 | err = 0; | ||
2558 | break; | ||
2559 | } | ||
2560 | } | ||
2561 | } | ||
2562 | return err; | ||
2563 | queue_err: | ||
2564 | vmxnet3_tq_destroy_all(adapter); | ||
2126 | return err; | 2565 | return err; |
2127 | } | 2566 | } |
2128 | 2567 | ||
@@ -2130,11 +2569,12 @@ static int | |||
2130 | vmxnet3_open(struct net_device *netdev) | 2569 | vmxnet3_open(struct net_device *netdev) |
2131 | { | 2570 | { |
2132 | struct vmxnet3_adapter *adapter; | 2571 | struct vmxnet3_adapter *adapter; |
2133 | int err; | 2572 | int err, i; |
2134 | 2573 | ||
2135 | adapter = netdev_priv(netdev); | 2574 | adapter = netdev_priv(netdev); |
2136 | 2575 | ||
2137 | spin_lock_init(&adapter->tx_queue.tx_lock); | 2576 | for (i = 0; i < adapter->num_tx_queues; i++) |
2577 | spin_lock_init(&adapter->tx_queue[i].tx_lock); | ||
2138 | 2578 | ||
2139 | err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, | 2579 | err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, |
2140 | VMXNET3_DEF_RX_RING_SIZE, | 2580 | VMXNET3_DEF_RX_RING_SIZE, |
@@ -2149,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev) | |||
2149 | return 0; | 2589 | return 0; |
2150 | 2590 | ||
2151 | activate_err: | 2591 | activate_err: |
2152 | vmxnet3_rq_destroy(&adapter->rx_queue, adapter); | 2592 | vmxnet3_rq_destroy_all(adapter); |
2153 | vmxnet3_tq_destroy(&adapter->tx_queue, adapter); | 2593 | vmxnet3_tq_destroy_all(adapter); |
2154 | queue_err: | 2594 | queue_err: |
2155 | return err; | 2595 | return err; |
2156 | } | 2596 | } |
@@ -2170,8 +2610,8 @@ vmxnet3_close(struct net_device *netdev) | |||
2170 | 2610 | ||
2171 | vmxnet3_quiesce_dev(adapter); | 2611 | vmxnet3_quiesce_dev(adapter); |
2172 | 2612 | ||
2173 | vmxnet3_rq_destroy(&adapter->rx_queue, adapter); | 2613 | vmxnet3_rq_destroy_all(adapter); |
2174 | vmxnet3_tq_destroy(&adapter->tx_queue, adapter); | 2614 | vmxnet3_tq_destroy_all(adapter); |
2175 | 2615 | ||
2176 | clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); | 2616 | clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); |
2177 | 2617 | ||
@@ -2183,6 +2623,8 @@ vmxnet3_close(struct net_device *netdev) | |||
2183 | void | 2623 | void |
2184 | vmxnet3_force_close(struct vmxnet3_adapter *adapter) | 2624 | vmxnet3_force_close(struct vmxnet3_adapter *adapter) |
2185 | { | 2625 | { |
2626 | int i; | ||
2627 | |||
2186 | /* | 2628 | /* |
2187 | * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise | 2629 | * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise |
2188 | * vmxnet3_close() will deadlock. | 2630 | * vmxnet3_close() will deadlock. |
@@ -2190,7 +2632,8 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter) | |||
2190 | BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); | 2632 | BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); |
2191 | 2633 | ||
2192 | /* we need to enable NAPI, otherwise dev_close will deadlock */ | 2634 | /* we need to enable NAPI, otherwise dev_close will deadlock */ |
2193 | napi_enable(&adapter->napi); | 2635 | for (i = 0; i < adapter->num_rx_queues; i++) |
2636 | napi_enable(&adapter->rx_queue[i].napi); | ||
2194 | dev_close(adapter->netdev); | 2637 | dev_close(adapter->netdev); |
2195 | } | 2638 | } |
2196 | 2639 | ||
@@ -2204,9 +2647,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) | |||
2204 | if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) | 2647 | if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) |
2205 | return -EINVAL; | 2648 | return -EINVAL; |
2206 | 2649 | ||
2207 | if (new_mtu > 1500 && !adapter->jumbo_frame) | ||
2208 | return -EINVAL; | ||
2209 | |||
2210 | netdev->mtu = new_mtu; | 2650 | netdev->mtu = new_mtu; |
2211 | 2651 | ||
2212 | /* | 2652 | /* |
@@ -2221,14 +2661,11 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) | |||
2221 | vmxnet3_reset_dev(adapter); | 2661 | vmxnet3_reset_dev(adapter); |
2222 | 2662 | ||
2223 | /* we need to re-create the rx queue based on the new mtu */ | 2663 | /* we need to re-create the rx queue based on the new mtu */ |
2224 | vmxnet3_rq_destroy(&adapter->rx_queue, adapter); | 2664 | vmxnet3_rq_destroy_all(adapter); |
2225 | vmxnet3_adjust_rx_ring_size(adapter); | 2665 | vmxnet3_adjust_rx_ring_size(adapter); |
2226 | adapter->rx_queue.comp_ring.size = | 2666 | err = vmxnet3_rq_create_all(adapter); |
2227 | adapter->rx_queue.rx_ring[0].size + | ||
2228 | adapter->rx_queue.rx_ring[1].size; | ||
2229 | err = vmxnet3_rq_create(&adapter->rx_queue, adapter); | ||
2230 | if (err) { | 2667 | if (err) { |
2231 | printk(KERN_ERR "%s: failed to re-create rx queue," | 2668 | printk(KERN_ERR "%s: failed to re-create rx queues," |
2232 | " error %d. Closing it.\n", netdev->name, err); | 2669 | " error %d. Closing it.\n", netdev->name, err); |
2233 | goto out; | 2670 | goto out; |
2234 | } | 2671 | } |
@@ -2255,28 +2692,18 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) | |||
2255 | { | 2692 | { |
2256 | struct net_device *netdev = adapter->netdev; | 2693 | struct net_device *netdev = adapter->netdev; |
2257 | 2694 | ||
2258 | netdev->features = NETIF_F_SG | | 2695 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | |
2259 | NETIF_F_HW_CSUM | | 2696 | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | |
2260 | NETIF_F_HW_VLAN_TX | | 2697 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO; |
2261 | NETIF_F_HW_VLAN_RX | | 2698 | if (dma64) |
2262 | NETIF_F_HW_VLAN_FILTER | | ||
2263 | NETIF_F_TSO | | ||
2264 | NETIF_F_TSO6 | | ||
2265 | NETIF_F_LRO; | ||
2266 | |||
2267 | printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro"); | ||
2268 | |||
2269 | adapter->rxcsum = true; | ||
2270 | adapter->jumbo_frame = true; | ||
2271 | adapter->lro = true; | ||
2272 | |||
2273 | if (dma64) { | ||
2274 | netdev->features |= NETIF_F_HIGHDMA; | 2699 | netdev->features |= NETIF_F_HIGHDMA; |
2275 | printk(" highDMA"); | 2700 | netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_TX; |
2276 | } | 2701 | netdev->features = netdev->hw_features | |
2702 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | ||
2277 | 2703 | ||
2278 | netdev->vlan_features = netdev->features; | 2704 | netdev_info(adapter->netdev, |
2279 | printk("\n"); | 2705 | "features: sg csum vlan jf tso tsoIPv6 lro%s\n", |
2706 | dma64 ? " highDMA" : ""); | ||
2280 | } | 2707 | } |
2281 | 2708 | ||
2282 | 2709 | ||
@@ -2293,16 +2720,68 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) | |||
2293 | mac[5] = (tmp >> 8) & 0xff; | 2720 | mac[5] = (tmp >> 8) & 0xff; |
2294 | } | 2721 | } |
2295 | 2722 | ||
2723 | #ifdef CONFIG_PCI_MSI | ||
2724 | |||
2725 | /* | ||
2726 | * Enable MSIx vectors. | ||
2727 | * Returns : | ||
2728 | * 0 on successful enabling of required vectors, | ||
2729 | * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required | ||
2730 | * could be enabled. | ||
2731 | * number of vectors which can be enabled otherwise (this number is smaller | ||
2732 | * than VMXNET3_LINUX_MIN_MSIX_VECT) | ||
2733 | */ | ||
2734 | |||
2735 | static int | ||
2736 | vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, | ||
2737 | int vectors) | ||
2738 | { | ||
2739 | int err = 0, vector_threshold; | ||
2740 | vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT; | ||
2741 | |||
2742 | while (vectors >= vector_threshold) { | ||
2743 | err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, | ||
2744 | vectors); | ||
2745 | if (!err) { | ||
2746 | adapter->intr.num_intrs = vectors; | ||
2747 | return 0; | ||
2748 | } else if (err < 0) { | ||
2749 | printk(KERN_ERR "Failed to enable MSI-X for %s, error" | ||
2750 | " %d\n", adapter->netdev->name, err); | ||
2751 | vectors = 0; | ||
2752 | } else if (err < vector_threshold) { | ||
2753 | break; | ||
2754 | } else { | ||
2755 | /* If fails to enable required number of MSI-x vectors | ||
2756 | * try enabling minimum number of vectors required. | ||
2757 | */ | ||
2758 | vectors = vector_threshold; | ||
2759 | printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" | ||
2760 | " %d instead\n", vectors, adapter->netdev->name, | ||
2761 | vector_threshold); | ||
2762 | } | ||
2763 | } | ||
2764 | |||
2765 | printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi" | ||
2766 | " are lower than min threshold required.\n"); | ||
2767 | return err; | ||
2768 | } | ||
2769 | |||
2770 | |||
2771 | #endif /* CONFIG_PCI_MSI */ | ||
2296 | 2772 | ||
2297 | static void | 2773 | static void |
2298 | vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | 2774 | vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) |
2299 | { | 2775 | { |
2300 | u32 cfg; | 2776 | u32 cfg; |
2777 | unsigned long flags; | ||
2301 | 2778 | ||
2302 | /* intr settings */ | 2779 | /* intr settings */ |
2780 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2303 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2781 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2304 | VMXNET3_CMD_GET_CONF_INTR); | 2782 | VMXNET3_CMD_GET_CONF_INTR); |
2305 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 2783 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
2784 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2306 | adapter->intr.type = cfg & 0x3; | 2785 | adapter->intr.type = cfg & 0x3; |
2307 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; | 2786 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; |
2308 | 2787 | ||
@@ -2312,16 +2791,47 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | |||
2312 | 2791 | ||
2313 | #ifdef CONFIG_PCI_MSI | 2792 | #ifdef CONFIG_PCI_MSI |
2314 | if (adapter->intr.type == VMXNET3_IT_MSIX) { | 2793 | if (adapter->intr.type == VMXNET3_IT_MSIX) { |
2315 | int err; | 2794 | int vector, err = 0; |
2316 | 2795 | ||
2317 | adapter->intr.msix_entries[0].entry = 0; | 2796 | adapter->intr.num_intrs = (adapter->share_intr == |
2318 | err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, | 2797 | VMXNET3_INTR_TXSHARE) ? 1 : |
2319 | VMXNET3_LINUX_MAX_MSIX_VECT); | 2798 | adapter->num_tx_queues; |
2320 | if (!err) { | 2799 | adapter->intr.num_intrs += (adapter->share_intr == |
2321 | adapter->intr.num_intrs = 1; | 2800 | VMXNET3_INTR_BUDDYSHARE) ? 0 : |
2322 | adapter->intr.type = VMXNET3_IT_MSIX; | 2801 | adapter->num_rx_queues; |
2802 | adapter->intr.num_intrs += 1; /* for link event */ | ||
2803 | |||
2804 | adapter->intr.num_intrs = (adapter->intr.num_intrs > | ||
2805 | VMXNET3_LINUX_MIN_MSIX_VECT | ||
2806 | ? adapter->intr.num_intrs : | ||
2807 | VMXNET3_LINUX_MIN_MSIX_VECT); | ||
2808 | |||
2809 | for (vector = 0; vector < adapter->intr.num_intrs; vector++) | ||
2810 | adapter->intr.msix_entries[vector].entry = vector; | ||
2811 | |||
2812 | err = vmxnet3_acquire_msix_vectors(adapter, | ||
2813 | adapter->intr.num_intrs); | ||
2814 | /* If we cannot allocate one MSIx vector per queue | ||
2815 | * then limit the number of rx queues to 1 | ||
2816 | */ | ||
2817 | if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { | ||
2818 | if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE | ||
2819 | || adapter->num_rx_queues != 1) { | ||
2820 | adapter->share_intr = VMXNET3_INTR_TXSHARE; | ||
2821 | printk(KERN_ERR "Number of rx queues : 1\n"); | ||
2822 | adapter->num_rx_queues = 1; | ||
2823 | adapter->intr.num_intrs = | ||
2824 | VMXNET3_LINUX_MIN_MSIX_VECT; | ||
2825 | } | ||
2323 | return; | 2826 | return; |
2324 | } | 2827 | } |
2828 | if (!err) | ||
2829 | return; | ||
2830 | |||
2831 | /* If we cannot allocate MSIx vectors use only one rx queue */ | ||
2832 | printk(KERN_INFO "Failed to enable MSI-X for %s, error %d." | ||
2833 | "#rx queues : 1, try MSI\n", adapter->netdev->name, err); | ||
2834 | |||
2325 | adapter->intr.type = VMXNET3_IT_MSI; | 2835 | adapter->intr.type = VMXNET3_IT_MSI; |
2326 | } | 2836 | } |
2327 | 2837 | ||
@@ -2329,12 +2839,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | |||
2329 | int err; | 2839 | int err; |
2330 | err = pci_enable_msi(adapter->pdev); | 2840 | err = pci_enable_msi(adapter->pdev); |
2331 | if (!err) { | 2841 | if (!err) { |
2842 | adapter->num_rx_queues = 1; | ||
2332 | adapter->intr.num_intrs = 1; | 2843 | adapter->intr.num_intrs = 1; |
2333 | return; | 2844 | return; |
2334 | } | 2845 | } |
2335 | } | 2846 | } |
2336 | #endif /* CONFIG_PCI_MSI */ | 2847 | #endif /* CONFIG_PCI_MSI */ |
2337 | 2848 | ||
2849 | adapter->num_rx_queues = 1; | ||
2850 | printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n"); | ||
2338 | adapter->intr.type = VMXNET3_IT_INTX; | 2851 | adapter->intr.type = VMXNET3_IT_INTX; |
2339 | 2852 | ||
2340 | /* INT-X related setting */ | 2853 | /* INT-X related setting */ |
@@ -2362,6 +2875,7 @@ vmxnet3_tx_timeout(struct net_device *netdev) | |||
2362 | 2875 | ||
2363 | printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); | 2876 | printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); |
2364 | schedule_work(&adapter->work); | 2877 | schedule_work(&adapter->work); |
2878 | netif_wake_queue(adapter->netdev); | ||
2365 | } | 2879 | } |
2366 | 2880 | ||
2367 | 2881 | ||
@@ -2402,6 +2916,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2402 | .ndo_start_xmit = vmxnet3_xmit_frame, | 2916 | .ndo_start_xmit = vmxnet3_xmit_frame, |
2403 | .ndo_set_mac_address = vmxnet3_set_mac_addr, | 2917 | .ndo_set_mac_address = vmxnet3_set_mac_addr, |
2404 | .ndo_change_mtu = vmxnet3_change_mtu, | 2918 | .ndo_change_mtu = vmxnet3_change_mtu, |
2919 | .ndo_set_features = vmxnet3_set_features, | ||
2405 | .ndo_get_stats = vmxnet3_get_stats, | 2920 | .ndo_get_stats = vmxnet3_get_stats, |
2406 | .ndo_tx_timeout = vmxnet3_tx_timeout, | 2921 | .ndo_tx_timeout = vmxnet3_tx_timeout, |
2407 | .ndo_set_multicast_list = vmxnet3_set_mc, | 2922 | .ndo_set_multicast_list = vmxnet3_set_mc, |
@@ -2418,8 +2933,34 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2418 | struct net_device *netdev; | 2933 | struct net_device *netdev; |
2419 | struct vmxnet3_adapter *adapter; | 2934 | struct vmxnet3_adapter *adapter; |
2420 | u8 mac[ETH_ALEN]; | 2935 | u8 mac[ETH_ALEN]; |
2936 | int size; | ||
2937 | int num_tx_queues; | ||
2938 | int num_rx_queues; | ||
2939 | |||
2940 | if (!pci_msi_enabled()) | ||
2941 | enable_mq = 0; | ||
2942 | |||
2943 | #ifdef VMXNET3_RSS | ||
2944 | if (enable_mq) | ||
2945 | num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, | ||
2946 | (int)num_online_cpus()); | ||
2947 | else | ||
2948 | #endif | ||
2949 | num_rx_queues = 1; | ||
2950 | num_rx_queues = rounddown_pow_of_two(num_rx_queues); | ||
2951 | |||
2952 | if (enable_mq) | ||
2953 | num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES, | ||
2954 | (int)num_online_cpus()); | ||
2955 | else | ||
2956 | num_tx_queues = 1; | ||
2957 | |||
2958 | num_tx_queues = rounddown_pow_of_two(num_tx_queues); | ||
2959 | netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), | ||
2960 | max(num_tx_queues, num_rx_queues)); | ||
2961 | printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n", | ||
2962 | num_tx_queues, num_rx_queues); | ||
2421 | 2963 | ||
2422 | netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter)); | ||
2423 | if (!netdev) { | 2964 | if (!netdev) { |
2424 | printk(KERN_ERR "Failed to alloc ethernet device for adapter " | 2965 | printk(KERN_ERR "Failed to alloc ethernet device for adapter " |
2425 | "%s\n", pci_name(pdev)); | 2966 | "%s\n", pci_name(pdev)); |
@@ -2431,6 +2972,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2431 | adapter->netdev = netdev; | 2972 | adapter->netdev = netdev; |
2432 | adapter->pdev = pdev; | 2973 | adapter->pdev = pdev; |
2433 | 2974 | ||
2975 | spin_lock_init(&adapter->cmd_lock); | ||
2434 | adapter->shared = pci_alloc_consistent(adapter->pdev, | 2976 | adapter->shared = pci_alloc_consistent(adapter->pdev, |
2435 | sizeof(struct Vmxnet3_DriverShared), | 2977 | sizeof(struct Vmxnet3_DriverShared), |
2436 | &adapter->shared_pa); | 2978 | &adapter->shared_pa); |
@@ -2441,9 +2983,12 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2441 | goto err_alloc_shared; | 2983 | goto err_alloc_shared; |
2442 | } | 2984 | } |
2443 | 2985 | ||
2444 | adapter->tqd_start = pci_alloc_consistent(adapter->pdev, | 2986 | adapter->num_rx_queues = num_rx_queues; |
2445 | sizeof(struct Vmxnet3_TxQueueDesc) + | 2987 | adapter->num_tx_queues = num_tx_queues; |
2446 | sizeof(struct Vmxnet3_RxQueueDesc), | 2988 | |
2989 | size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; | ||
2990 | size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; | ||
2991 | adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, | ||
2447 | &adapter->queue_desc_pa); | 2992 | &adapter->queue_desc_pa); |
2448 | 2993 | ||
2449 | if (!adapter->tqd_start) { | 2994 | if (!adapter->tqd_start) { |
@@ -2452,8 +2997,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2452 | err = -ENOMEM; | 2997 | err = -ENOMEM; |
2453 | goto err_alloc_queue_desc; | 2998 | goto err_alloc_queue_desc; |
2454 | } | 2999 | } |
2455 | adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start | 3000 | adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + |
2456 | + 1); | 3001 | adapter->num_tx_queues); |
2457 | 3002 | ||
2458 | adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); | 3003 | adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); |
2459 | if (adapter->pm_conf == NULL) { | 3004 | if (adapter->pm_conf == NULL) { |
@@ -2463,6 +3008,17 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2463 | goto err_alloc_pm; | 3008 | goto err_alloc_pm; |
2464 | } | 3009 | } |
2465 | 3010 | ||
3011 | #ifdef VMXNET3_RSS | ||
3012 | |||
3013 | adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); | ||
3014 | if (adapter->rss_conf == NULL) { | ||
3015 | printk(KERN_ERR "Failed to allocate memory for %s\n", | ||
3016 | pci_name(pdev)); | ||
3017 | err = -ENOMEM; | ||
3018 | goto err_alloc_rss; | ||
3019 | } | ||
3020 | #endif /* VMXNET3_RSS */ | ||
3021 | |||
2466 | err = vmxnet3_alloc_pci_resources(adapter, &dma64); | 3022 | err = vmxnet3_alloc_pci_resources(adapter, &dma64); |
2467 | if (err < 0) | 3023 | if (err < 0) |
2468 | goto err_alloc_pci; | 3024 | goto err_alloc_pci; |
@@ -2490,18 +3046,48 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2490 | vmxnet3_declare_features(adapter, dma64); | 3046 | vmxnet3_declare_features(adapter, dma64); |
2491 | 3047 | ||
2492 | adapter->dev_number = atomic_read(&devices_found); | 3048 | adapter->dev_number = atomic_read(&devices_found); |
3049 | |||
3050 | adapter->share_intr = irq_share_mode; | ||
3051 | if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE && | ||
3052 | adapter->num_tx_queues != adapter->num_rx_queues) | ||
3053 | adapter->share_intr = VMXNET3_INTR_DONTSHARE; | ||
3054 | |||
2493 | vmxnet3_alloc_intr_resources(adapter); | 3055 | vmxnet3_alloc_intr_resources(adapter); |
2494 | 3056 | ||
3057 | #ifdef VMXNET3_RSS | ||
3058 | if (adapter->num_rx_queues > 1 && | ||
3059 | adapter->intr.type == VMXNET3_IT_MSIX) { | ||
3060 | adapter->rss = true; | ||
3061 | printk(KERN_INFO "RSS is enabled.\n"); | ||
3062 | } else { | ||
3063 | adapter->rss = false; | ||
3064 | } | ||
3065 | #endif | ||
3066 | |||
2495 | vmxnet3_read_mac_addr(adapter, mac); | 3067 | vmxnet3_read_mac_addr(adapter, mac); |
2496 | memcpy(netdev->dev_addr, mac, netdev->addr_len); | 3068 | memcpy(netdev->dev_addr, mac, netdev->addr_len); |
2497 | 3069 | ||
2498 | netdev->netdev_ops = &vmxnet3_netdev_ops; | 3070 | netdev->netdev_ops = &vmxnet3_netdev_ops; |
2499 | netdev->watchdog_timeo = 5 * HZ; | ||
2500 | vmxnet3_set_ethtool_ops(netdev); | 3071 | vmxnet3_set_ethtool_ops(netdev); |
3072 | netdev->watchdog_timeo = 5 * HZ; | ||
2501 | 3073 | ||
2502 | INIT_WORK(&adapter->work, vmxnet3_reset_work); | 3074 | INIT_WORK(&adapter->work, vmxnet3_reset_work); |
2503 | 3075 | ||
2504 | netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64); | 3076 | if (adapter->intr.type == VMXNET3_IT_MSIX) { |
3077 | int i; | ||
3078 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
3079 | netif_napi_add(adapter->netdev, | ||
3080 | &adapter->rx_queue[i].napi, | ||
3081 | vmxnet3_poll_rx_only, 64); | ||
3082 | } | ||
3083 | } else { | ||
3084 | netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, | ||
3085 | vmxnet3_poll, 64); | ||
3086 | } | ||
3087 | |||
3088 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | ||
3089 | netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); | ||
3090 | |||
2505 | SET_NETDEV_DEV(netdev, &pdev->dev); | 3091 | SET_NETDEV_DEV(netdev, &pdev->dev); |
2506 | err = register_netdev(netdev); | 3092 | err = register_netdev(netdev); |
2507 | 3093 | ||
@@ -2521,11 +3107,14 @@ err_register: | |||
2521 | err_ver: | 3107 | err_ver: |
2522 | vmxnet3_free_pci_resources(adapter); | 3108 | vmxnet3_free_pci_resources(adapter); |
2523 | err_alloc_pci: | 3109 | err_alloc_pci: |
3110 | #ifdef VMXNET3_RSS | ||
3111 | kfree(adapter->rss_conf); | ||
3112 | err_alloc_rss: | ||
3113 | #endif | ||
2524 | kfree(adapter->pm_conf); | 3114 | kfree(adapter->pm_conf); |
2525 | err_alloc_pm: | 3115 | err_alloc_pm: |
2526 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + | 3116 | pci_free_consistent(adapter->pdev, size, adapter->tqd_start, |
2527 | sizeof(struct Vmxnet3_RxQueueDesc), | 3117 | adapter->queue_desc_pa); |
2528 | adapter->tqd_start, adapter->queue_desc_pa); | ||
2529 | err_alloc_queue_desc: | 3118 | err_alloc_queue_desc: |
2530 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), | 3119 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), |
2531 | adapter->shared, adapter->shared_pa); | 3120 | adapter->shared, adapter->shared_pa); |
@@ -2541,17 +3130,33 @@ vmxnet3_remove_device(struct pci_dev *pdev) | |||
2541 | { | 3130 | { |
2542 | struct net_device *netdev = pci_get_drvdata(pdev); | 3131 | struct net_device *netdev = pci_get_drvdata(pdev); |
2543 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 3132 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
3133 | int size = 0; | ||
3134 | int num_rx_queues; | ||
3135 | |||
3136 | #ifdef VMXNET3_RSS | ||
3137 | if (enable_mq) | ||
3138 | num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, | ||
3139 | (int)num_online_cpus()); | ||
3140 | else | ||
3141 | #endif | ||
3142 | num_rx_queues = 1; | ||
3143 | num_rx_queues = rounddown_pow_of_two(num_rx_queues); | ||
2544 | 3144 | ||
2545 | flush_scheduled_work(); | 3145 | cancel_work_sync(&adapter->work); |
2546 | 3146 | ||
2547 | unregister_netdev(netdev); | 3147 | unregister_netdev(netdev); |
2548 | 3148 | ||
2549 | vmxnet3_free_intr_resources(adapter); | 3149 | vmxnet3_free_intr_resources(adapter); |
2550 | vmxnet3_free_pci_resources(adapter); | 3150 | vmxnet3_free_pci_resources(adapter); |
3151 | #ifdef VMXNET3_RSS | ||
3152 | kfree(adapter->rss_conf); | ||
3153 | #endif | ||
2551 | kfree(adapter->pm_conf); | 3154 | kfree(adapter->pm_conf); |
2552 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + | 3155 | |
2553 | sizeof(struct Vmxnet3_RxQueueDesc), | 3156 | size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; |
2554 | adapter->tqd_start, adapter->queue_desc_pa); | 3157 | size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; |
3158 | pci_free_consistent(adapter->pdev, size, adapter->tqd_start, | ||
3159 | adapter->queue_desc_pa); | ||
2555 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), | 3160 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), |
2556 | adapter->shared, adapter->shared_pa); | 3161 | adapter->shared, adapter->shared_pa); |
2557 | free_netdev(netdev); | 3162 | free_netdev(netdev); |
@@ -2572,17 +3177,21 @@ vmxnet3_suspend(struct device *device) | |||
2572 | u8 *arpreq; | 3177 | u8 *arpreq; |
2573 | struct in_device *in_dev; | 3178 | struct in_device *in_dev; |
2574 | struct in_ifaddr *ifa; | 3179 | struct in_ifaddr *ifa; |
3180 | unsigned long flags; | ||
2575 | int i = 0; | 3181 | int i = 0; |
2576 | 3182 | ||
2577 | if (!netif_running(netdev)) | 3183 | if (!netif_running(netdev)) |
2578 | return 0; | 3184 | return 0; |
2579 | 3185 | ||
3186 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3187 | napi_disable(&adapter->rx_queue[i].napi); | ||
3188 | |||
2580 | vmxnet3_disable_all_intrs(adapter); | 3189 | vmxnet3_disable_all_intrs(adapter); |
2581 | vmxnet3_free_irqs(adapter); | 3190 | vmxnet3_free_irqs(adapter); |
2582 | vmxnet3_free_intr_resources(adapter); | 3191 | vmxnet3_free_intr_resources(adapter); |
2583 | 3192 | ||
2584 | netif_device_detach(netdev); | 3193 | netif_device_detach(netdev); |
2585 | netif_stop_queue(netdev); | 3194 | netif_tx_stop_all_queues(netdev); |
2586 | 3195 | ||
2587 | /* Create wake-up filters. */ | 3196 | /* Create wake-up filters. */ |
2588 | pmConf = adapter->pm_conf; | 3197 | pmConf = adapter->pm_conf; |
@@ -2594,7 +3203,7 @@ vmxnet3_suspend(struct device *device) | |||
2594 | memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); | 3203 | memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); |
2595 | pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ | 3204 | pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ |
2596 | 3205 | ||
2597 | set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER); | 3206 | pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; |
2598 | i++; | 3207 | i++; |
2599 | } | 3208 | } |
2600 | 3209 | ||
@@ -2636,13 +3245,13 @@ vmxnet3_suspend(struct device *device) | |||
2636 | pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ | 3245 | pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ |
2637 | in_dev_put(in_dev); | 3246 | in_dev_put(in_dev); |
2638 | 3247 | ||
2639 | set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER); | 3248 | pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; |
2640 | i++; | 3249 | i++; |
2641 | } | 3250 | } |
2642 | 3251 | ||
2643 | skip_arp: | 3252 | skip_arp: |
2644 | if (adapter->wol & WAKE_MAGIC) | 3253 | if (adapter->wol & WAKE_MAGIC) |
2645 | set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC); | 3254 | pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; |
2646 | 3255 | ||
2647 | pmConf->numFilters = i; | 3256 | pmConf->numFilters = i; |
2648 | 3257 | ||
@@ -2652,8 +3261,10 @@ skip_arp: | |||
2652 | adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( | 3261 | adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( |
2653 | pmConf)); | 3262 | pmConf)); |
2654 | 3263 | ||
3264 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2655 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 3265 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2656 | VMXNET3_CMD_UPDATE_PMCFG); | 3266 | VMXNET3_CMD_UPDATE_PMCFG); |
3267 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2657 | 3268 | ||
2658 | pci_save_state(pdev); | 3269 | pci_save_state(pdev); |
2659 | pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), | 3270 | pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), |
@@ -2668,7 +3279,8 @@ skip_arp: | |||
2668 | static int | 3279 | static int |
2669 | vmxnet3_resume(struct device *device) | 3280 | vmxnet3_resume(struct device *device) |
2670 | { | 3281 | { |
2671 | int err; | 3282 | int err, i = 0; |
3283 | unsigned long flags; | ||
2672 | struct pci_dev *pdev = to_pci_dev(device); | 3284 | struct pci_dev *pdev = to_pci_dev(device); |
2673 | struct net_device *netdev = pci_get_drvdata(pdev); | 3285 | struct net_device *netdev = pci_get_drvdata(pdev); |
2674 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 3286 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
@@ -2684,7 +3296,7 @@ vmxnet3_resume(struct device *device) | |||
2684 | adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); | 3296 | adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); |
2685 | adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( | 3297 | adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( |
2686 | *pmConf)); | 3298 | *pmConf)); |
2687 | adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys( | 3299 | adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( |
2688 | pmConf)); | 3300 | pmConf)); |
2689 | 3301 | ||
2690 | netif_device_attach(netdev); | 3302 | netif_device_attach(netdev); |
@@ -2696,10 +3308,14 @@ vmxnet3_resume(struct device *device) | |||
2696 | 3308 | ||
2697 | pci_enable_wake(pdev, PCI_D0, 0); | 3309 | pci_enable_wake(pdev, PCI_D0, 0); |
2698 | 3310 | ||
3311 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2699 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 3312 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2700 | VMXNET3_CMD_UPDATE_PMCFG); | 3313 | VMXNET3_CMD_UPDATE_PMCFG); |
3314 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2701 | vmxnet3_alloc_intr_resources(adapter); | 3315 | vmxnet3_alloc_intr_resources(adapter); |
2702 | vmxnet3_request_irqs(adapter); | 3316 | vmxnet3_request_irqs(adapter); |
3317 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3318 | napi_enable(&adapter->rx_queue[i].napi); | ||
2703 | vmxnet3_enable_all_intrs(adapter); | 3319 | vmxnet3_enable_all_intrs(adapter); |
2704 | 3320 | ||
2705 | return 0; | 3321 | return 0; |
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 7e4b5a89165a..dc959fe27aa5 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -33,113 +33,82 @@ struct vmxnet3_stat_desc { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | 35 | ||
36 | static u32 | ||
37 | vmxnet3_get_rx_csum(struct net_device *netdev) | ||
38 | { | ||
39 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
40 | return adapter->rxcsum; | ||
41 | } | ||
42 | |||
43 | |||
44 | static int | ||
45 | vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | ||
46 | { | ||
47 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
48 | |||
49 | if (adapter->rxcsum != val) { | ||
50 | adapter->rxcsum = val; | ||
51 | if (netif_running(netdev)) { | ||
52 | if (val) | ||
53 | set_flag_le64( | ||
54 | &adapter->shared->devRead.misc.uptFeatures, | ||
55 | UPT1_F_RXCSUM); | ||
56 | else | ||
57 | reset_flag_le64( | ||
58 | &adapter->shared->devRead.misc.uptFeatures, | ||
59 | UPT1_F_RXCSUM); | ||
60 | |||
61 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
62 | VMXNET3_CMD_UPDATE_FEATURE); | ||
63 | } | ||
64 | } | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | |||
69 | /* per tq stats maintained by the device */ | 36 | /* per tq stats maintained by the device */ |
70 | static const struct vmxnet3_stat_desc | 37 | static const struct vmxnet3_stat_desc |
71 | vmxnet3_tq_dev_stats[] = { | 38 | vmxnet3_tq_dev_stats[] = { |
72 | /* description, offset */ | 39 | /* description, offset */ |
73 | { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, | 40 | { "Tx Queue#", 0 }, |
74 | { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, | 41 | { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, |
75 | { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, | 42 | { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, |
76 | { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, | 43 | { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, |
77 | { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, | 44 | { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, |
78 | { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, | 45 | { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, |
79 | { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, | 46 | { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, |
80 | { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, | 47 | { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, |
81 | { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, | 48 | { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, |
82 | { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | 49 | { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, |
50 | { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | ||
83 | }; | 51 | }; |
84 | 52 | ||
85 | /* per tq stats maintained by the driver */ | 53 | /* per tq stats maintained by the driver */ |
86 | static const struct vmxnet3_stat_desc | 54 | static const struct vmxnet3_stat_desc |
87 | vmxnet3_tq_driver_stats[] = { | 55 | vmxnet3_tq_driver_stats[] = { |
88 | /* description, offset */ | 56 | /* description, offset */ |
89 | {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, | 57 | {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, |
90 | drop_total) }, | 58 | drop_total) }, |
91 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, | 59 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, |
92 | drop_too_many_frags) }, | 60 | drop_too_many_frags) }, |
93 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | 61 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, |
94 | drop_oversized_hdr) }, | 62 | drop_oversized_hdr) }, |
95 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, | 63 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, |
96 | drop_hdr_inspect_err) }, | 64 | drop_hdr_inspect_err) }, |
97 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, | 65 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, |
98 | drop_tso) }, | 66 | drop_tso) }, |
99 | { "ring full", offsetof(struct vmxnet3_tq_driver_stats, | 67 | { " ring full", offsetof(struct vmxnet3_tq_driver_stats, |
100 | tx_ring_full) }, | 68 | tx_ring_full) }, |
101 | { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, | 69 | { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, |
102 | linearized) }, | 70 | linearized) }, |
103 | { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, | 71 | { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, |
104 | copy_skb_header) }, | 72 | copy_skb_header) }, |
105 | { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | 73 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, |
106 | oversized_hdr) }, | 74 | oversized_hdr) }, |
107 | }; | 75 | }; |
108 | 76 | ||
109 | /* per rq stats maintained by the device */ | 77 | /* per rq stats maintained by the device */ |
110 | static const struct vmxnet3_stat_desc | 78 | static const struct vmxnet3_stat_desc |
111 | vmxnet3_rq_dev_stats[] = { | 79 | vmxnet3_rq_dev_stats[] = { |
112 | { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, | 80 | { "Rx Queue#", 0 }, |
113 | { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, | 81 | { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, |
114 | { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, | 82 | { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, |
115 | { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, | 83 | { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, |
116 | { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, | 84 | { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, |
117 | { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, | 85 | { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, |
118 | { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, | 86 | { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, |
119 | { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, | 87 | { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, |
120 | { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, | 88 | { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, |
121 | { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | 89 | { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, |
90 | { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | ||
122 | }; | 91 | }; |
123 | 92 | ||
124 | /* per rq stats maintained by the driver */ | 93 | /* per rq stats maintained by the driver */ |
125 | static const struct vmxnet3_stat_desc | 94 | static const struct vmxnet3_stat_desc |
126 | vmxnet3_rq_driver_stats[] = { | 95 | vmxnet3_rq_driver_stats[] = { |
127 | /* description, offset */ | 96 | /* description, offset */ |
128 | { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, | 97 | { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, |
129 | drop_total) }, | 98 | drop_total) }, |
130 | { " err", offsetof(struct vmxnet3_rq_driver_stats, | 99 | { " err", offsetof(struct vmxnet3_rq_driver_stats, |
131 | drop_err) }, | 100 | drop_err) }, |
132 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, | 101 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, |
133 | drop_fcs) }, | 102 | drop_fcs) }, |
134 | { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, | 103 | { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, |
135 | rx_buf_alloc_failure) }, | 104 | rx_buf_alloc_failure) }, |
136 | }; | 105 | }; |
137 | 106 | ||
138 | /* gloabl stats maintained by the driver */ | 107 | /* gloabl stats maintained by the driver */ |
139 | static const struct vmxnet3_stat_desc | 108 | static const struct vmxnet3_stat_desc |
140 | vmxnet3_global_stats[] = { | 109 | vmxnet3_global_stats[] = { |
141 | /* description, offset */ | 110 | /* description, offset */ |
142 | { "tx timeout count", offsetof(struct vmxnet3_adapter, | 111 | { "tx timeout count", offsetof(struct vmxnet3_adapter, |
143 | tx_timeout_count) } | 112 | tx_timeout_count) } |
144 | }; | 113 | }; |
145 | 114 | ||
@@ -153,56 +122,60 @@ vmxnet3_get_stats(struct net_device *netdev) | |||
153 | struct UPT1_TxStats *devTxStats; | 122 | struct UPT1_TxStats *devTxStats; |
154 | struct UPT1_RxStats *devRxStats; | 123 | struct UPT1_RxStats *devRxStats; |
155 | struct net_device_stats *net_stats = &netdev->stats; | 124 | struct net_device_stats *net_stats = &netdev->stats; |
125 | unsigned long flags; | ||
126 | int i; | ||
156 | 127 | ||
157 | adapter = netdev_priv(netdev); | 128 | adapter = netdev_priv(netdev); |
158 | 129 | ||
159 | /* Collect the dev stats into the shared area */ | 130 | /* Collect the dev stats into the shared area */ |
131 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
160 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | 132 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); |
161 | 133 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | |
162 | /* Assuming that we have a single queue device */ | ||
163 | devTxStats = &adapter->tqd_start->stats; | ||
164 | devRxStats = &adapter->rqd_start->stats; | ||
165 | |||
166 | /* Get access to the driver stats per queue */ | ||
167 | drvTxStats = &adapter->tx_queue.stats; | ||
168 | drvRxStats = &adapter->rx_queue.stats; | ||
169 | 134 | ||
170 | memset(net_stats, 0, sizeof(*net_stats)); | 135 | memset(net_stats, 0, sizeof(*net_stats)); |
136 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
137 | devTxStats = &adapter->tqd_start[i].stats; | ||
138 | drvTxStats = &adapter->tx_queue[i].stats; | ||
139 | net_stats->tx_packets += devTxStats->ucastPktsTxOK + | ||
140 | devTxStats->mcastPktsTxOK + | ||
141 | devTxStats->bcastPktsTxOK; | ||
142 | net_stats->tx_bytes += devTxStats->ucastBytesTxOK + | ||
143 | devTxStats->mcastBytesTxOK + | ||
144 | devTxStats->bcastBytesTxOK; | ||
145 | net_stats->tx_errors += devTxStats->pktsTxError; | ||
146 | net_stats->tx_dropped += drvTxStats->drop_total; | ||
147 | } | ||
171 | 148 | ||
172 | net_stats->rx_packets = devRxStats->ucastPktsRxOK + | 149 | for (i = 0; i < adapter->num_rx_queues; i++) { |
173 | devRxStats->mcastPktsRxOK + | 150 | devRxStats = &adapter->rqd_start[i].stats; |
174 | devRxStats->bcastPktsRxOK; | 151 | drvRxStats = &adapter->rx_queue[i].stats; |
175 | 152 | net_stats->rx_packets += devRxStats->ucastPktsRxOK + | |
176 | net_stats->tx_packets = devTxStats->ucastPktsTxOK + | 153 | devRxStats->mcastPktsRxOK + |
177 | devTxStats->mcastPktsTxOK + | 154 | devRxStats->bcastPktsRxOK; |
178 | devTxStats->bcastPktsTxOK; | ||
179 | |||
180 | net_stats->rx_bytes = devRxStats->ucastBytesRxOK + | ||
181 | devRxStats->mcastBytesRxOK + | ||
182 | devRxStats->bcastBytesRxOK; | ||
183 | |||
184 | net_stats->tx_bytes = devTxStats->ucastBytesTxOK + | ||
185 | devTxStats->mcastBytesTxOK + | ||
186 | devTxStats->bcastBytesTxOK; | ||
187 | 155 | ||
188 | net_stats->rx_errors = devRxStats->pktsRxError; | 156 | net_stats->rx_bytes += devRxStats->ucastBytesRxOK + |
189 | net_stats->tx_errors = devTxStats->pktsTxError; | 157 | devRxStats->mcastBytesRxOK + |
190 | net_stats->rx_dropped = drvRxStats->drop_total; | 158 | devRxStats->bcastBytesRxOK; |
191 | net_stats->tx_dropped = drvTxStats->drop_total; | ||
192 | net_stats->multicast = devRxStats->mcastPktsRxOK; | ||
193 | 159 | ||
160 | net_stats->rx_errors += devRxStats->pktsRxError; | ||
161 | net_stats->rx_dropped += drvRxStats->drop_total; | ||
162 | net_stats->multicast += devRxStats->mcastPktsRxOK; | ||
163 | } | ||
194 | return net_stats; | 164 | return net_stats; |
195 | } | 165 | } |
196 | 166 | ||
197 | static int | 167 | static int |
198 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) | 168 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) |
199 | { | 169 | { |
170 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
200 | switch (sset) { | 171 | switch (sset) { |
201 | case ETH_SS_STATS: | 172 | case ETH_SS_STATS: |
202 | return ARRAY_SIZE(vmxnet3_tq_dev_stats) + | 173 | return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + |
203 | ARRAY_SIZE(vmxnet3_tq_driver_stats) + | 174 | ARRAY_SIZE(vmxnet3_tq_driver_stats)) * |
204 | ARRAY_SIZE(vmxnet3_rq_dev_stats) + | 175 | adapter->num_tx_queues + |
205 | ARRAY_SIZE(vmxnet3_rq_driver_stats) + | 176 | (ARRAY_SIZE(vmxnet3_rq_dev_stats) + |
177 | ARRAY_SIZE(vmxnet3_rq_driver_stats)) * | ||
178 | adapter->num_rx_queues + | ||
206 | ARRAY_SIZE(vmxnet3_global_stats); | 179 | ARRAY_SIZE(vmxnet3_global_stats); |
207 | default: | 180 | default: |
208 | return -EOPNOTSUPP; | 181 | return -EOPNOTSUPP; |
@@ -210,10 +183,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset) | |||
210 | } | 183 | } |
211 | 184 | ||
212 | 185 | ||
186 | /* Should be multiple of 4 */ | ||
187 | #define NUM_TX_REGS 8 | ||
188 | #define NUM_RX_REGS 12 | ||
189 | |||
213 | static int | 190 | static int |
214 | vmxnet3_get_regs_len(struct net_device *netdev) | 191 | vmxnet3_get_regs_len(struct net_device *netdev) |
215 | { | 192 | { |
216 | return 20 * sizeof(u32); | 193 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
194 | return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + | ||
195 | adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); | ||
217 | } | 196 | } |
218 | 197 | ||
219 | 198 | ||
@@ -244,29 +223,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |||
244 | static void | 223 | static void |
245 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) | 224 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) |
246 | { | 225 | { |
226 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
247 | if (stringset == ETH_SS_STATS) { | 227 | if (stringset == ETH_SS_STATS) { |
248 | int i; | 228 | int i, j; |
249 | 229 | for (j = 0; j < adapter->num_tx_queues; j++) { | |
250 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { | 230 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { |
251 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, | 231 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, |
252 | ETH_GSTRING_LEN); | 232 | ETH_GSTRING_LEN); |
253 | buf += ETH_GSTRING_LEN; | 233 | buf += ETH_GSTRING_LEN; |
254 | } | 234 | } |
255 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { | 235 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); |
256 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, | 236 | i++) { |
257 | ETH_GSTRING_LEN); | 237 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, |
258 | buf += ETH_GSTRING_LEN; | 238 | ETH_GSTRING_LEN); |
259 | } | 239 | buf += ETH_GSTRING_LEN; |
260 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { | 240 | } |
261 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, | ||
262 | ETH_GSTRING_LEN); | ||
263 | buf += ETH_GSTRING_LEN; | ||
264 | } | 241 | } |
265 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { | 242 | |
266 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | 243 | for (j = 0; j < adapter->num_rx_queues; j++) { |
267 | ETH_GSTRING_LEN); | 244 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { |
268 | buf += ETH_GSTRING_LEN; | 245 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, |
246 | ETH_GSTRING_LEN); | ||
247 | buf += ETH_GSTRING_LEN; | ||
248 | } | ||
249 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); | ||
250 | i++) { | ||
251 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | ||
252 | ETH_GSTRING_LEN); | ||
253 | buf += ETH_GSTRING_LEN; | ||
254 | } | ||
269 | } | 255 | } |
256 | |||
270 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { | 257 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { |
271 | memcpy(buf, vmxnet3_global_stats[i].desc, | 258 | memcpy(buf, vmxnet3_global_stats[i].desc, |
272 | ETH_GSTRING_LEN); | 259 | ETH_GSTRING_LEN); |
@@ -275,29 +262,32 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) | |||
275 | } | 262 | } |
276 | } | 263 | } |
277 | 264 | ||
278 | static int | 265 | int vmxnet3_set_features(struct net_device *netdev, u32 features) |
279 | vmxnet3_set_flags(struct net_device *netdev, u32 data) | ||
280 | { | 266 | { |
281 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 267 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
282 | u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; | 268 | unsigned long flags; |
283 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; | 269 | u32 changed = features ^ netdev->features; |
284 | |||
285 | if (data & ~ETH_FLAG_LRO) | ||
286 | return -EOPNOTSUPP; | ||
287 | 270 | ||
288 | if (lro_requested ^ lro_present) { | 271 | if (changed & (NETIF_F_RXCSUM|NETIF_F_LRO)) { |
289 | /* toggle the LRO feature*/ | 272 | if (features & NETIF_F_RXCSUM) |
290 | netdev->features ^= NETIF_F_LRO; | 273 | adapter->shared->devRead.misc.uptFeatures |= |
274 | UPT1_F_RXCSUM; | ||
275 | else | ||
276 | adapter->shared->devRead.misc.uptFeatures &= | ||
277 | ~UPT1_F_RXCSUM; | ||
291 | 278 | ||
292 | /* update harware LRO capability accordingly */ | 279 | /* update harware LRO capability accordingly */ |
293 | if (lro_requested) | 280 | if (features & NETIF_F_LRO) |
294 | adapter->shared->devRead.misc.uptFeatures |= | 281 | adapter->shared->devRead.misc.uptFeatures |= |
295 | cpu_to_le64(UPT1_F_LRO); | 282 | UPT1_F_LRO; |
296 | else | 283 | else |
297 | adapter->shared->devRead.misc.uptFeatures &= | 284 | adapter->shared->devRead.misc.uptFeatures &= |
298 | cpu_to_le64(~UPT1_F_LRO); | 285 | ~UPT1_F_LRO; |
286 | |||
287 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
299 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 288 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
300 | VMXNET3_CMD_UPDATE_FEATURE); | 289 | VMXNET3_CMD_UPDATE_FEATURE); |
290 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
301 | } | 291 | } |
302 | return 0; | 292 | return 0; |
303 | } | 293 | } |
@@ -307,28 +297,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev, | |||
307 | struct ethtool_stats *stats, u64 *buf) | 297 | struct ethtool_stats *stats, u64 *buf) |
308 | { | 298 | { |
309 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 299 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
300 | unsigned long flags; | ||
310 | u8 *base; | 301 | u8 *base; |
311 | int i; | 302 | int i; |
303 | int j = 0; | ||
312 | 304 | ||
305 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
313 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | 306 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); |
307 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
314 | 308 | ||
315 | /* this does assume each counter is 64-bit wide */ | 309 | /* this does assume each counter is 64-bit wide */ |
310 | for (j = 0; j < adapter->num_tx_queues; j++) { | ||
311 | base = (u8 *)&adapter->tqd_start[j].stats; | ||
312 | *buf++ = (u64)j; | ||
313 | for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) | ||
314 | *buf++ = *(u64 *)(base + | ||
315 | vmxnet3_tq_dev_stats[i].offset); | ||
316 | |||
317 | base = (u8 *)&adapter->tx_queue[j].stats; | ||
318 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) | ||
319 | *buf++ = *(u64 *)(base + | ||
320 | vmxnet3_tq_driver_stats[i].offset); | ||
321 | } | ||
316 | 322 | ||
317 | base = (u8 *)&adapter->tqd_start->stats; | 323 | for (j = 0; j < adapter->num_tx_queues; j++) { |
318 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) | 324 | base = (u8 *)&adapter->rqd_start[j].stats; |
319 | *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); | 325 | *buf++ = (u64) j; |
320 | 326 | for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | |
321 | base = (u8 *)&adapter->tx_queue.stats; | 327 | *buf++ = *(u64 *)(base + |
322 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) | 328 | vmxnet3_rq_dev_stats[i].offset); |
323 | *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); | 329 | |
324 | 330 | base = (u8 *)&adapter->rx_queue[j].stats; | |
325 | base = (u8 *)&adapter->rqd_start->stats; | 331 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) |
326 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | 332 | *buf++ = *(u64 *)(base + |
327 | *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); | 333 | vmxnet3_rq_driver_stats[i].offset); |
328 | 334 | } | |
329 | base = (u8 *)&adapter->rx_queue.stats; | ||
330 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) | ||
331 | *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); | ||
332 | 335 | ||
333 | base = (u8 *)adapter; | 336 | base = (u8 *)adapter; |
334 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) | 337 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) |
@@ -341,6 +344,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | |||
341 | { | 344 | { |
342 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 345 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
343 | u32 *buf = p; | 346 | u32 *buf = p; |
347 | int i = 0, j = 0; | ||
344 | 348 | ||
345 | memset(p, 0, vmxnet3_get_regs_len(netdev)); | 349 | memset(p, 0, vmxnet3_get_regs_len(netdev)); |
346 | 350 | ||
@@ -349,30 +353,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | |||
349 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ | 353 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ |
350 | 354 | ||
351 | /* make each ring use multiple of 16 bytes */ | 355 | /* make each ring use multiple of 16 bytes */ |
352 | buf[0] = adapter->tx_queue.tx_ring.next2fill; | 356 | for (i = 0; i < adapter->num_tx_queues; i++) { |
353 | buf[1] = adapter->tx_queue.tx_ring.next2comp; | 357 | buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; |
354 | buf[2] = adapter->tx_queue.tx_ring.gen; | 358 | buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; |
355 | buf[3] = 0; | 359 | buf[j++] = adapter->tx_queue[i].tx_ring.gen; |
356 | 360 | buf[j++] = 0; | |
357 | buf[4] = adapter->tx_queue.comp_ring.next2proc; | 361 | |
358 | buf[5] = adapter->tx_queue.comp_ring.gen; | 362 | buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; |
359 | buf[6] = adapter->tx_queue.stopped; | 363 | buf[j++] = adapter->tx_queue[i].comp_ring.gen; |
360 | buf[7] = 0; | 364 | buf[j++] = adapter->tx_queue[i].stopped; |
361 | 365 | buf[j++] = 0; | |
362 | buf[8] = adapter->rx_queue.rx_ring[0].next2fill; | 366 | } |
363 | buf[9] = adapter->rx_queue.rx_ring[0].next2comp; | 367 | |
364 | buf[10] = adapter->rx_queue.rx_ring[0].gen; | 368 | for (i = 0; i < adapter->num_rx_queues; i++) { |
365 | buf[11] = 0; | 369 | buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; |
366 | 370 | buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; | |
367 | buf[12] = adapter->rx_queue.rx_ring[1].next2fill; | 371 | buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; |
368 | buf[13] = adapter->rx_queue.rx_ring[1].next2comp; | 372 | buf[j++] = 0; |
369 | buf[14] = adapter->rx_queue.rx_ring[1].gen; | 373 | |
370 | buf[15] = 0; | 374 | buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; |
371 | 375 | buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; | |
372 | buf[16] = adapter->rx_queue.comp_ring.next2proc; | 376 | buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; |
373 | buf[17] = adapter->rx_queue.comp_ring.gen; | 377 | buf[j++] = 0; |
374 | buf[18] = 0; | 378 | |
375 | buf[19] = 0; | 379 | buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; |
380 | buf[j++] = adapter->rx_queue[i].comp_ring.gen; | ||
381 | buf[j++] = 0; | ||
382 | buf[j++] = 0; | ||
383 | } | ||
384 | |||
376 | } | 385 | } |
377 | 386 | ||
378 | 387 | ||
@@ -416,10 +425,10 @@ vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
416 | ecmd->transceiver = XCVR_INTERNAL; | 425 | ecmd->transceiver = XCVR_INTERNAL; |
417 | 426 | ||
418 | if (adapter->link_speed) { | 427 | if (adapter->link_speed) { |
419 | ecmd->speed = adapter->link_speed; | 428 | ethtool_cmd_speed_set(ecmd, adapter->link_speed); |
420 | ecmd->duplex = DUPLEX_FULL; | 429 | ecmd->duplex = DUPLEX_FULL; |
421 | } else { | 430 | } else { |
422 | ecmd->speed = -1; | 431 | ethtool_cmd_speed_set(ecmd, -1); |
423 | ecmd->duplex = -1; | 432 | ecmd->duplex = -1; |
424 | } | 433 | } |
425 | return 0; | 434 | return 0; |
@@ -437,8 +446,10 @@ vmxnet3_get_ringparam(struct net_device *netdev, | |||
437 | param->rx_mini_max_pending = 0; | 446 | param->rx_mini_max_pending = 0; |
438 | param->rx_jumbo_max_pending = 0; | 447 | param->rx_jumbo_max_pending = 0; |
439 | 448 | ||
440 | param->rx_pending = adapter->rx_queue.rx_ring[0].size; | 449 | param->rx_pending = adapter->rx_queue[0].rx_ring[0].size * |
441 | param->tx_pending = adapter->tx_queue.tx_ring.size; | 450 | adapter->num_rx_queues; |
451 | param->tx_pending = adapter->tx_queue[0].tx_ring.size * | ||
452 | adapter->num_tx_queues; | ||
442 | param->rx_mini_pending = 0; | 453 | param->rx_mini_pending = 0; |
443 | param->rx_jumbo_pending = 0; | 454 | param->rx_jumbo_pending = 0; |
444 | } | 455 | } |
@@ -482,8 +493,8 @@ vmxnet3_set_ringparam(struct net_device *netdev, | |||
482 | sz) != 0) | 493 | sz) != 0) |
483 | return -EINVAL; | 494 | return -EINVAL; |
484 | 495 | ||
485 | if (new_tx_ring_size == adapter->tx_queue.tx_ring.size && | 496 | if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size && |
486 | new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) { | 497 | new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) { |
487 | return 0; | 498 | return 0; |
488 | } | 499 | } |
489 | 500 | ||
@@ -500,11 +511,12 @@ vmxnet3_set_ringparam(struct net_device *netdev, | |||
500 | 511 | ||
501 | /* recreate the rx queue and the tx queue based on the | 512 | /* recreate the rx queue and the tx queue based on the |
502 | * new sizes */ | 513 | * new sizes */ |
503 | vmxnet3_tq_destroy(&adapter->tx_queue, adapter); | 514 | vmxnet3_tq_destroy_all(adapter); |
504 | vmxnet3_rq_destroy(&adapter->rx_queue, adapter); | 515 | vmxnet3_rq_destroy_all(adapter); |
505 | 516 | ||
506 | err = vmxnet3_create_queues(adapter, new_tx_ring_size, | 517 | err = vmxnet3_create_queues(adapter, new_tx_ring_size, |
507 | new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); | 518 | new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); |
519 | |||
508 | if (err) { | 520 | if (err) { |
509 | /* failed, most likely because of OOM, try default | 521 | /* failed, most likely because of OOM, try default |
510 | * size */ | 522 | * size */ |
@@ -537,6 +549,69 @@ out: | |||
537 | } | 549 | } |
538 | 550 | ||
539 | 551 | ||
552 | static int | ||
553 | vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, | ||
554 | void *rules) | ||
555 | { | ||
556 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
557 | switch (info->cmd) { | ||
558 | case ETHTOOL_GRXRINGS: | ||
559 | info->data = adapter->num_rx_queues; | ||
560 | return 0; | ||
561 | } | ||
562 | return -EOPNOTSUPP; | ||
563 | } | ||
564 | |||
565 | #ifdef VMXNET3_RSS | ||
566 | static int | ||
567 | vmxnet3_get_rss_indir(struct net_device *netdev, | ||
568 | struct ethtool_rxfh_indir *p) | ||
569 | { | ||
570 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
571 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; | ||
572 | unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize); | ||
573 | |||
574 | p->size = rssConf->indTableSize; | ||
575 | while (n--) | ||
576 | p->ring_index[n] = rssConf->indTable[n]; | ||
577 | return 0; | ||
578 | |||
579 | } | ||
580 | |||
581 | static int | ||
582 | vmxnet3_set_rss_indir(struct net_device *netdev, | ||
583 | const struct ethtool_rxfh_indir *p) | ||
584 | { | ||
585 | unsigned int i; | ||
586 | unsigned long flags; | ||
587 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
588 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; | ||
589 | |||
590 | if (p->size != rssConf->indTableSize) | ||
591 | return -EINVAL; | ||
592 | for (i = 0; i < rssConf->indTableSize; i++) { | ||
593 | /* | ||
594 | * Return with error code if any of the queue indices | ||
595 | * is out of range | ||
596 | */ | ||
597 | if (p->ring_index[i] < 0 || | ||
598 | p->ring_index[i] >= adapter->num_rx_queues) | ||
599 | return -EINVAL; | ||
600 | } | ||
601 | |||
602 | for (i = 0; i < rssConf->indTableSize; i++) | ||
603 | rssConf->indTable[i] = p->ring_index[i]; | ||
604 | |||
605 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
606 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
607 | VMXNET3_CMD_UPDATE_RSSIDT); | ||
608 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
609 | |||
610 | return 0; | ||
611 | |||
612 | } | ||
613 | #endif | ||
614 | |||
540 | static struct ethtool_ops vmxnet3_ethtool_ops = { | 615 | static struct ethtool_ops vmxnet3_ethtool_ops = { |
541 | .get_settings = vmxnet3_get_settings, | 616 | .get_settings = vmxnet3_get_settings, |
542 | .get_drvinfo = vmxnet3_get_drvinfo, | 617 | .get_drvinfo = vmxnet3_get_drvinfo, |
@@ -545,21 +620,16 @@ static struct ethtool_ops vmxnet3_ethtool_ops = { | |||
545 | .get_wol = vmxnet3_get_wol, | 620 | .get_wol = vmxnet3_get_wol, |
546 | .set_wol = vmxnet3_set_wol, | 621 | .set_wol = vmxnet3_set_wol, |
547 | .get_link = ethtool_op_get_link, | 622 | .get_link = ethtool_op_get_link, |
548 | .get_rx_csum = vmxnet3_get_rx_csum, | ||
549 | .set_rx_csum = vmxnet3_set_rx_csum, | ||
550 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
551 | .set_tx_csum = ethtool_op_set_tx_hw_csum, | ||
552 | .get_sg = ethtool_op_get_sg, | ||
553 | .set_sg = ethtool_op_set_sg, | ||
554 | .get_tso = ethtool_op_get_tso, | ||
555 | .set_tso = ethtool_op_set_tso, | ||
556 | .get_strings = vmxnet3_get_strings, | 623 | .get_strings = vmxnet3_get_strings, |
557 | .get_flags = ethtool_op_get_flags, | ||
558 | .set_flags = vmxnet3_set_flags, | ||
559 | .get_sset_count = vmxnet3_get_sset_count, | 624 | .get_sset_count = vmxnet3_get_sset_count, |
560 | .get_ethtool_stats = vmxnet3_get_ethtool_stats, | 625 | .get_ethtool_stats = vmxnet3_get_ethtool_stats, |
561 | .get_ringparam = vmxnet3_get_ringparam, | 626 | .get_ringparam = vmxnet3_get_ringparam, |
562 | .set_ringparam = vmxnet3_set_ringparam, | 627 | .set_ringparam = vmxnet3_set_ringparam, |
628 | .get_rxnfc = vmxnet3_get_rxnfc, | ||
629 | #ifdef VMXNET3_RSS | ||
630 | .get_rxfh_indir = vmxnet3_get_rss_indir, | ||
631 | .set_rxfh_indir = vmxnet3_set_rss_indir, | ||
632 | #endif | ||
563 | }; | 633 | }; |
564 | 634 | ||
565 | void vmxnet3_set_ethtool_ops(struct net_device *netdev) | 635 | void vmxnet3_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 2121c735cabd..e08d75e3f170 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/if_vlan.h> | 55 | #include <linux/if_vlan.h> |
56 | #include <linux/if_arp.h> | 56 | #include <linux/if_arp.h> |
57 | #include <linux/inetdevice.h> | 57 | #include <linux/inetdevice.h> |
58 | #include <linux/log2.h> | ||
58 | 59 | ||
59 | #include "vmxnet3_defs.h" | 60 | #include "vmxnet3_defs.h" |
60 | 61 | ||
@@ -68,11 +69,15 @@ | |||
68 | /* | 69 | /* |
69 | * Version numbers | 70 | * Version numbers |
70 | */ | 71 | */ |
71 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k" |
72 | 73 | ||
73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01000E00 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01011200 |
75 | 76 | ||
77 | #if defined(CONFIG_PCI_MSI) | ||
78 | /* RSS only makes sense if MSI-X is supported. */ | ||
79 | #define VMXNET3_RSS | ||
80 | #endif | ||
76 | 81 | ||
77 | /* | 82 | /* |
78 | * Capabilities | 83 | * Capabilities |
@@ -218,16 +223,19 @@ struct vmxnet3_tx_ctx { | |||
218 | }; | 223 | }; |
219 | 224 | ||
220 | struct vmxnet3_tx_queue { | 225 | struct vmxnet3_tx_queue { |
226 | char name[IFNAMSIZ+8]; /* To identify interrupt */ | ||
227 | struct vmxnet3_adapter *adapter; | ||
221 | spinlock_t tx_lock; | 228 | spinlock_t tx_lock; |
222 | struct vmxnet3_cmd_ring tx_ring; | 229 | struct vmxnet3_cmd_ring tx_ring; |
223 | struct vmxnet3_tx_buf_info *buf_info; | 230 | struct vmxnet3_tx_buf_info *buf_info; |
224 | struct vmxnet3_tx_data_ring data_ring; | 231 | struct vmxnet3_tx_data_ring data_ring; |
225 | struct vmxnet3_comp_ring comp_ring; | 232 | struct vmxnet3_comp_ring comp_ring; |
226 | struct Vmxnet3_TxQueueCtrl *shared; | 233 | struct Vmxnet3_TxQueueCtrl *shared; |
227 | struct vmxnet3_tq_driver_stats stats; | 234 | struct vmxnet3_tq_driver_stats stats; |
228 | bool stopped; | 235 | bool stopped; |
229 | int num_stop; /* # of times the queue is | 236 | int num_stop; /* # of times the queue is |
230 | * stopped */ | 237 | * stopped */ |
238 | int qid; | ||
231 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 239 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
232 | 240 | ||
233 | enum vmxnet3_rx_buf_type { | 241 | enum vmxnet3_rx_buf_type { |
@@ -259,6 +267,9 @@ struct vmxnet3_rq_driver_stats { | |||
259 | }; | 267 | }; |
260 | 268 | ||
261 | struct vmxnet3_rx_queue { | 269 | struct vmxnet3_rx_queue { |
270 | char name[IFNAMSIZ + 8]; /* To identify interrupt */ | ||
271 | struct vmxnet3_adapter *adapter; | ||
272 | struct napi_struct napi; | ||
262 | struct vmxnet3_cmd_ring rx_ring[2]; | 273 | struct vmxnet3_cmd_ring rx_ring[2]; |
263 | struct vmxnet3_comp_ring comp_ring; | 274 | struct vmxnet3_comp_ring comp_ring; |
264 | struct vmxnet3_rx_ctx rx_ctx; | 275 | struct vmxnet3_rx_ctx rx_ctx; |
@@ -271,7 +282,16 @@ struct vmxnet3_rx_queue { | |||
271 | struct vmxnet3_rq_driver_stats stats; | 282 | struct vmxnet3_rq_driver_stats stats; |
272 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 283 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
273 | 284 | ||
274 | #define VMXNET3_LINUX_MAX_MSIX_VECT 1 | 285 | #define VMXNET3_DEVICE_MAX_TX_QUEUES 8 |
286 | #define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */ | ||
287 | |||
288 | /* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */ | ||
289 | #define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4) | ||
290 | |||
291 | #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ | ||
292 | VMXNET3_DEVICE_MAX_RX_QUEUES + 1) | ||
293 | #define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */ | ||
294 | |||
275 | 295 | ||
276 | struct vmxnet3_intr { | 296 | struct vmxnet3_intr { |
277 | enum vmxnet3_intr_mask_mode mask_mode; | 297 | enum vmxnet3_intr_mask_mode mask_mode; |
@@ -279,35 +299,43 @@ struct vmxnet3_intr { | |||
279 | u8 num_intrs; /* # of intr vectors */ | 299 | u8 num_intrs; /* # of intr vectors */ |
280 | u8 event_intr_idx; /* idx of the intr vector for event */ | 300 | u8 event_intr_idx; /* idx of the intr vector for event */ |
281 | u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ | 301 | u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ |
302 | char event_msi_vector_name[IFNAMSIZ+11]; | ||
282 | #ifdef CONFIG_PCI_MSI | 303 | #ifdef CONFIG_PCI_MSI |
283 | struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; | 304 | struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; |
284 | #endif | 305 | #endif |
285 | }; | 306 | }; |
286 | 307 | ||
308 | /* Interrupt sharing schemes, share_intr */ | ||
309 | #define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */ | ||
310 | #define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */ | ||
311 | #define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */ | ||
312 | |||
313 | |||
287 | #define VMXNET3_STATE_BIT_RESETTING 0 | 314 | #define VMXNET3_STATE_BIT_RESETTING 0 |
288 | #define VMXNET3_STATE_BIT_QUIESCED 1 | 315 | #define VMXNET3_STATE_BIT_QUIESCED 1 |
289 | struct vmxnet3_adapter { | 316 | struct vmxnet3_adapter { |
290 | struct vmxnet3_tx_queue tx_queue; | 317 | struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES]; |
291 | struct vmxnet3_rx_queue rx_queue; | 318 | struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; |
292 | struct napi_struct napi; | 319 | struct vlan_group *vlan_grp; |
293 | struct vlan_group *vlan_grp; | 320 | struct vmxnet3_intr intr; |
294 | 321 | spinlock_t cmd_lock; | |
295 | struct vmxnet3_intr intr; | 322 | struct Vmxnet3_DriverShared *shared; |
296 | 323 | struct Vmxnet3_PMConf *pm_conf; | |
297 | struct Vmxnet3_DriverShared *shared; | 324 | struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ |
298 | struct Vmxnet3_PMConf *pm_conf; | 325 | struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */ |
299 | struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */ | 326 | struct net_device *netdev; |
300 | struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */ | 327 | struct net_device_stats net_stats; |
301 | struct net_device *netdev; | 328 | struct pci_dev *pdev; |
302 | struct pci_dev *pdev; | 329 | |
303 | 330 | u8 __iomem *hw_addr0; /* for BAR 0 */ | |
304 | u8 *hw_addr0; /* for BAR 0 */ | 331 | u8 __iomem *hw_addr1; /* for BAR 1 */ |
305 | u8 *hw_addr1; /* for BAR 1 */ | 332 | |
306 | 333 | #ifdef VMXNET3_RSS | |
307 | /* feature control */ | 334 | struct UPT1_RSSConf *rss_conf; |
308 | bool rxcsum; | 335 | bool rss; |
309 | bool lro; | 336 | #endif |
310 | bool jumbo_frame; | 337 | u32 num_rx_queues; |
338 | u32 num_tx_queues; | ||
311 | 339 | ||
312 | /* rx buffer related */ | 340 | /* rx buffer related */ |
313 | unsigned skb_buf_size; | 341 | unsigned skb_buf_size; |
@@ -327,17 +355,18 @@ struct vmxnet3_adapter { | |||
327 | unsigned long state; /* VMXNET3_STATE_BIT_xxx */ | 355 | unsigned long state; /* VMXNET3_STATE_BIT_xxx */ |
328 | 356 | ||
329 | int dev_number; | 357 | int dev_number; |
358 | int share_intr; | ||
330 | }; | 359 | }; |
331 | 360 | ||
332 | #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ | 361 | #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ |
333 | writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg)) | 362 | writel((val), (adapter)->hw_addr0 + (reg)) |
334 | #define VMXNET3_READ_BAR0_REG(adapter, reg) \ | 363 | #define VMXNET3_READ_BAR0_REG(adapter, reg) \ |
335 | le32_to_cpu(readl((adapter)->hw_addr0 + (reg))) | 364 | readl((adapter)->hw_addr0 + (reg)) |
336 | 365 | ||
337 | #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ | 366 | #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ |
338 | writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg)) | 367 | writel((val), (adapter)->hw_addr1 + (reg)) |
339 | #define VMXNET3_READ_BAR1_REG(adapter, reg) \ | 368 | #define VMXNET3_READ_BAR1_REG(adapter, reg) \ |
340 | le32_to_cpu(readl((adapter)->hw_addr1 + (reg))) | 369 | readl((adapter)->hw_addr1 + (reg)) |
341 | 370 | ||
342 | #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) | 371 | #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) |
343 | #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ | 372 | #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ |
@@ -353,10 +382,6 @@ struct vmxnet3_adapter { | |||
353 | #define VMXNET3_MAX_ETH_HDR_SIZE 22 | 382 | #define VMXNET3_MAX_ETH_HDR_SIZE 22 |
354 | #define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) | 383 | #define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) |
355 | 384 | ||
356 | void set_flag_le16(__le16 *data, u16 flag); | ||
357 | void set_flag_le64(__le64 *data, u64 flag); | ||
358 | void reset_flag_le64(__le64 *data, u64 flag); | ||
359 | |||
360 | int | 385 | int |
361 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); | 386 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); |
362 | 387 | ||
@@ -370,12 +395,13 @@ void | |||
370 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); | 395 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); |
371 | 396 | ||
372 | void | 397 | void |
373 | vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, | 398 | vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter); |
374 | struct vmxnet3_adapter *adapter); | ||
375 | 399 | ||
376 | void | 400 | void |
377 | vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, | 401 | vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter); |
378 | struct vmxnet3_adapter *adapter); | 402 | |
403 | int | ||
404 | vmxnet3_set_features(struct net_device *netdev, u32 features); | ||
379 | 405 | ||
380 | int | 406 | int |
381 | vmxnet3_create_queues(struct vmxnet3_adapter *adapter, | 407 | vmxnet3_create_queues(struct vmxnet3_adapter *adapter, |