diff options
Diffstat (limited to 'drivers/net/myri10ge/myri10ge.c')
-rw-r--r-- | drivers/net/myri10ge/myri10ge.c | 152 |
1 files changed, 118 insertions, 34 deletions
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 005f2aa7501..ab7755abd20 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -102,6 +102,8 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
102 | #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE) | 102 | #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE) |
103 | #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1) | 103 | #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1) |
104 | 104 | ||
105 | #define MYRI10GE_MAX_SLICES 32 | ||
106 | |||
105 | struct myri10ge_rx_buffer_state { | 107 | struct myri10ge_rx_buffer_state { |
106 | struct page *page; | 108 | struct page *page; |
107 | int page_offset; | 109 | int page_offset; |
@@ -138,6 +140,8 @@ struct myri10ge_rx_buf { | |||
138 | 140 | ||
139 | struct myri10ge_tx_buf { | 141 | struct myri10ge_tx_buf { |
140 | struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */ | 142 | struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */ |
143 | __be32 __iomem *send_go; /* "go" doorbell ptr */ | ||
144 | __be32 __iomem *send_stop; /* "stop" doorbell ptr */ | ||
141 | struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */ | 145 | struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */ |
142 | char *req_bytes; | 146 | char *req_bytes; |
143 | struct myri10ge_tx_buffer_state *info; | 147 | struct myri10ge_tx_buffer_state *info; |
@@ -149,6 +153,7 @@ struct myri10ge_tx_buf { | |||
149 | int done ____cacheline_aligned; /* transmit slots completed */ | 153 | int done ____cacheline_aligned; /* transmit slots completed */ |
150 | int pkt_done; /* packets completed */ | 154 | int pkt_done; /* packets completed */ |
151 | int wake_queue; | 155 | int wake_queue; |
156 | int queue_active; | ||
152 | }; | 157 | }; |
153 | 158 | ||
154 | struct myri10ge_rx_done { | 159 | struct myri10ge_rx_done { |
@@ -418,6 +423,12 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, | |||
418 | return -ENOSYS; | 423 | return -ENOSYS; |
419 | } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) { | 424 | } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) { |
420 | return -E2BIG; | 425 | return -E2BIG; |
426 | } else if (result == MXGEFW_CMD_ERROR_RANGE && | ||
427 | cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES && | ||
428 | (data-> | ||
429 | data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) != | ||
430 | 0) { | ||
431 | return -ERANGE; | ||
421 | } else { | 432 | } else { |
422 | dev_err(&mgp->pdev->dev, | 433 | dev_err(&mgp->pdev->dev, |
423 | "command %d failed, result = %d\n", | 434 | "command %d failed, result = %d\n", |
@@ -947,9 +958,24 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) | |||
947 | */ | 958 | */ |
948 | 959 | ||
949 | cmd.data0 = mgp->num_slices; | 960 | cmd.data0 = mgp->num_slices; |
950 | cmd.data1 = 1; /* use MSI-X */ | 961 | cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; |
962 | if (mgp->dev->real_num_tx_queues > 1) | ||
963 | cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; | ||
951 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, | 964 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, |
952 | &cmd, 0); | 965 | &cmd, 0); |
966 | |||
967 | /* Firmware older than 1.4.32 only supports multiple | ||
968 | * RX queues, so if we get an error, first retry using a | ||
969 | * single TX queue before giving up */ | ||
970 | if (status != 0 && mgp->dev->real_num_tx_queues > 1) { | ||
971 | mgp->dev->real_num_tx_queues = 1; | ||
972 | cmd.data0 = mgp->num_slices; | ||
973 | cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; | ||
974 | status = myri10ge_send_cmd(mgp, | ||
975 | MXGEFW_CMD_ENABLE_RSS_QUEUES, | ||
976 | &cmd, 0); | ||
977 | } | ||
978 | |||
953 | if (status != 0) { | 979 | if (status != 0) { |
954 | dev_err(&mgp->pdev->dev, | 980 | dev_err(&mgp->pdev->dev, |
955 | "failed to set number of slices\n"); | 981 | "failed to set number of slices\n"); |
@@ -1317,6 +1343,7 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) | |||
1317 | { | 1343 | { |
1318 | struct pci_dev *pdev = ss->mgp->pdev; | 1344 | struct pci_dev *pdev = ss->mgp->pdev; |
1319 | struct myri10ge_tx_buf *tx = &ss->tx; | 1345 | struct myri10ge_tx_buf *tx = &ss->tx; |
1346 | struct netdev_queue *dev_queue; | ||
1320 | struct sk_buff *skb; | 1347 | struct sk_buff *skb; |
1321 | int idx, len; | 1348 | int idx, len; |
1322 | 1349 | ||
@@ -1350,11 +1377,31 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) | |||
1350 | PCI_DMA_TODEVICE); | 1377 | PCI_DMA_TODEVICE); |
1351 | } | 1378 | } |
1352 | } | 1379 | } |
1380 | |||
1381 | dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss); | ||
1382 | /* | ||
1383 | * Make a minimal effort to prevent the NIC from polling an | ||
1384 | * idle tx queue. If we can't get the lock we leave the queue | ||
1385 | * active. In this case, either a thread was about to start | ||
1386 | * using the queue anyway, or we lost a race and the NIC will | ||
1387 | * waste some of its resources polling an inactive queue for a | ||
1388 | * while. | ||
1389 | */ | ||
1390 | |||
1391 | if ((ss->mgp->dev->real_num_tx_queues > 1) && | ||
1392 | __netif_tx_trylock(dev_queue)) { | ||
1393 | if (tx->req == tx->done) { | ||
1394 | tx->queue_active = 0; | ||
1395 | put_be32(htonl(1), tx->send_stop); | ||
1396 | } | ||
1397 | __netif_tx_unlock(dev_queue); | ||
1398 | } | ||
1399 | |||
1353 | /* start the queue if we've stopped it */ | 1400 | /* start the queue if we've stopped it */ |
1354 | if (netif_queue_stopped(ss->dev) | 1401 | if (netif_tx_queue_stopped(dev_queue) |
1355 | && tx->req - tx->done < (tx->mask >> 1)) { | 1402 | && tx->req - tx->done < (tx->mask >> 1)) { |
1356 | tx->wake_queue++; | 1403 | tx->wake_queue++; |
1357 | netif_wake_queue(ss->dev); | 1404 | netif_tx_wake_queue(dev_queue); |
1358 | } | 1405 | } |
1359 | } | 1406 | } |
1360 | 1407 | ||
@@ -1482,9 +1529,9 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
1482 | u32 send_done_count; | 1529 | u32 send_done_count; |
1483 | int i; | 1530 | int i; |
1484 | 1531 | ||
1485 | /* an interrupt on a non-zero slice is implicitly valid | 1532 | /* an interrupt on a non-zero receive-only slice is implicitly |
1486 | * since MSI-X irqs are not shared */ | 1533 | * valid since MSI-X irqs are not shared */ |
1487 | if (ss != mgp->ss) { | 1534 | if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { |
1488 | netif_rx_schedule(ss->dev, &ss->napi); | 1535 | netif_rx_schedule(ss->dev, &ss->napi); |
1489 | return (IRQ_HANDLED); | 1536 | return (IRQ_HANDLED); |
1490 | } | 1537 | } |
@@ -1526,7 +1573,9 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
1526 | barrier(); | 1573 | barrier(); |
1527 | } | 1574 | } |
1528 | 1575 | ||
1529 | myri10ge_check_statblock(mgp); | 1576 | /* Only slice 0 updates stats */ |
1577 | if (ss == mgp->ss) | ||
1578 | myri10ge_check_statblock(mgp); | ||
1530 | 1579 | ||
1531 | put_be32(htonl(3), ss->irq_claim + 1); | 1580 | put_be32(htonl(3), ss->irq_claim + 1); |
1532 | return (IRQ_HANDLED); | 1581 | return (IRQ_HANDLED); |
@@ -1884,6 +1933,7 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) | |||
1884 | /* ensure req_list entries are aligned to 8 bytes */ | 1933 | /* ensure req_list entries are aligned to 8 bytes */ |
1885 | ss->tx.req_list = (struct mcp_kreq_ether_send *) | 1934 | ss->tx.req_list = (struct mcp_kreq_ether_send *) |
1886 | ALIGN((unsigned long)ss->tx.req_bytes, 8); | 1935 | ALIGN((unsigned long)ss->tx.req_bytes, 8); |
1936 | ss->tx.queue_active = 0; | ||
1887 | 1937 | ||
1888 | bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); | 1938 | bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); |
1889 | ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); | 1939 | ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); |
@@ -2201,11 +2251,14 @@ static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) | |||
2201 | int status; | 2251 | int status; |
2202 | 2252 | ||
2203 | ss = &mgp->ss[slice]; | 2253 | ss = &mgp->ss[slice]; |
2204 | cmd.data0 = 0; /* single slice for now */ | 2254 | status = 0; |
2205 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); | 2255 | if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) { |
2206 | ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) | 2256 | cmd.data0 = slice; |
2207 | (mgp->sram + cmd.data0); | 2257 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, |
2208 | 2258 | &cmd, 0); | |
2259 | ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) | ||
2260 | (mgp->sram + cmd.data0); | ||
2261 | } | ||
2209 | cmd.data0 = slice; | 2262 | cmd.data0 = slice; |
2210 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, | 2263 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, |
2211 | &cmd, 0); | 2264 | &cmd, 0); |
@@ -2217,6 +2270,10 @@ static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) | |||
2217 | ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) | 2270 | ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) |
2218 | (mgp->sram + cmd.data0); | 2271 | (mgp->sram + cmd.data0); |
2219 | 2272 | ||
2273 | ss->tx.send_go = (__iomem __be32 *) | ||
2274 | (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice); | ||
2275 | ss->tx.send_stop = (__iomem __be32 *) | ||
2276 | (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice); | ||
2220 | return status; | 2277 | return status; |
2221 | 2278 | ||
2222 | } | 2279 | } |
@@ -2230,7 +2287,7 @@ static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice) | |||
2230 | ss = &mgp->ss[slice]; | 2287 | ss = &mgp->ss[slice]; |
2231 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); | 2288 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); |
2232 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); | 2289 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); |
2233 | cmd.data2 = sizeof(struct mcp_irq_data); | 2290 | cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16); |
2234 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); | 2291 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); |
2235 | if (status == -ENOSYS) { | 2292 | if (status == -ENOSYS) { |
2236 | dma_addr_t bus = ss->fw_stats_bus; | 2293 | dma_addr_t bus = ss->fw_stats_bus; |
@@ -2271,7 +2328,9 @@ static int myri10ge_open(struct net_device *dev) | |||
2271 | 2328 | ||
2272 | if (mgp->num_slices > 1) { | 2329 | if (mgp->num_slices > 1) { |
2273 | cmd.data0 = mgp->num_slices; | 2330 | cmd.data0 = mgp->num_slices; |
2274 | cmd.data1 = 1; /* use MSI-X */ | 2331 | cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; |
2332 | if (mgp->dev->real_num_tx_queues > 1) | ||
2333 | cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; | ||
2275 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, | 2334 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, |
2276 | &cmd, 0); | 2335 | &cmd, 0); |
2277 | if (status != 0) { | 2336 | if (status != 0) { |
@@ -2292,6 +2351,7 @@ static int myri10ge_open(struct net_device *dev) | |||
2292 | printk(KERN_ERR | 2351 | printk(KERN_ERR |
2293 | "myri10ge: %s: failed to setup rss tables\n", | 2352 | "myri10ge: %s: failed to setup rss tables\n", |
2294 | dev->name); | 2353 | dev->name); |
2354 | goto abort_with_nothing; | ||
2295 | } | 2355 | } |
2296 | 2356 | ||
2297 | /* just enable an identity mapping */ | 2357 | /* just enable an identity mapping */ |
@@ -2362,7 +2422,11 @@ static int myri10ge_open(struct net_device *dev) | |||
2362 | status = myri10ge_allocate_rings(ss); | 2422 | status = myri10ge_allocate_rings(ss); |
2363 | if (status != 0) | 2423 | if (status != 0) |
2364 | goto abort_with_rings; | 2424 | goto abort_with_rings; |
2365 | if (slice == 0) | 2425 | |
2426 | /* only firmware which supports multiple TX queues | ||
2427 | * supports setting up the tx stats on non-zero | ||
2428 | * slices */ | ||
2429 | if (slice == 0 || mgp->dev->real_num_tx_queues > 1) | ||
2366 | status = myri10ge_set_stats(mgp, slice); | 2430 | status = myri10ge_set_stats(mgp, slice); |
2367 | if (status) { | 2431 | if (status) { |
2368 | printk(KERN_ERR | 2432 | printk(KERN_ERR |
@@ -2428,7 +2492,8 @@ static int myri10ge_open(struct net_device *dev) | |||
2428 | mgp->running = MYRI10GE_ETH_RUNNING; | 2492 | mgp->running = MYRI10GE_ETH_RUNNING; |
2429 | mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; | 2493 | mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; |
2430 | add_timer(&mgp->watchdog_timer); | 2494 | add_timer(&mgp->watchdog_timer); |
2431 | netif_wake_queue(dev); | 2495 | netif_tx_wake_all_queues(dev); |
2496 | |||
2432 | return 0; | 2497 | return 0; |
2433 | 2498 | ||
2434 | abort_with_rings: | 2499 | abort_with_rings: |
@@ -2461,7 +2526,8 @@ static int myri10ge_close(struct net_device *dev) | |||
2461 | napi_disable(&mgp->ss[i].napi); | 2526 | napi_disable(&mgp->ss[i].napi); |
2462 | } | 2527 | } |
2463 | netif_carrier_off(dev); | 2528 | netif_carrier_off(dev); |
2464 | netif_stop_queue(dev); | 2529 | |
2530 | netif_tx_stop_all_queues(dev); | ||
2465 | old_down_cnt = mgp->down_cnt; | 2531 | old_down_cnt = mgp->down_cnt; |
2466 | mb(); | 2532 | mb(); |
2467 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0); | 2533 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0); |
@@ -2566,18 +2632,23 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2566 | struct mcp_kreq_ether_send *req; | 2632 | struct mcp_kreq_ether_send *req; |
2567 | struct myri10ge_tx_buf *tx; | 2633 | struct myri10ge_tx_buf *tx; |
2568 | struct skb_frag_struct *frag; | 2634 | struct skb_frag_struct *frag; |
2635 | struct netdev_queue *netdev_queue; | ||
2569 | dma_addr_t bus; | 2636 | dma_addr_t bus; |
2570 | u32 low; | 2637 | u32 low; |
2571 | __be32 high_swapped; | 2638 | __be32 high_swapped; |
2572 | unsigned int len; | 2639 | unsigned int len; |
2573 | int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; | 2640 | int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; |
2574 | u16 pseudo_hdr_offset, cksum_offset; | 2641 | u16 pseudo_hdr_offset, cksum_offset, queue; |
2575 | int cum_len, seglen, boundary, rdma_count; | 2642 | int cum_len, seglen, boundary, rdma_count; |
2576 | u8 flags, odd_flag; | 2643 | u8 flags, odd_flag; |
2577 | 2644 | ||
2578 | /* always transmit through slot 0 */ | 2645 | queue = skb_get_queue_mapping(skb); |
2579 | ss = mgp->ss; | 2646 | queue &= (mgp->num_slices - 1); |
2647 | |||
2648 | ss = &mgp->ss[queue]; | ||
2649 | netdev_queue = netdev_get_tx_queue(mgp->dev, queue); | ||
2580 | tx = &ss->tx; | 2650 | tx = &ss->tx; |
2651 | |||
2581 | again: | 2652 | again: |
2582 | req = tx->req_list; | 2653 | req = tx->req_list; |
2583 | avail = tx->mask - 1 - (tx->req - tx->done); | 2654 | avail = tx->mask - 1 - (tx->req - tx->done); |
@@ -2593,7 +2664,7 @@ again: | |||
2593 | if ((unlikely(avail < max_segments))) { | 2664 | if ((unlikely(avail < max_segments))) { |
2594 | /* we are out of transmit resources */ | 2665 | /* we are out of transmit resources */ |
2595 | tx->stop_queue++; | 2666 | tx->stop_queue++; |
2596 | netif_stop_queue(dev); | 2667 | netif_tx_stop_queue(netdev_queue); |
2597 | return 1; | 2668 | return 1; |
2598 | } | 2669 | } |
2599 | 2670 | ||
@@ -2786,10 +2857,16 @@ again: | |||
2786 | idx = ((count - 1) + tx->req) & tx->mask; | 2857 | idx = ((count - 1) + tx->req) & tx->mask; |
2787 | tx->info[idx].last = 1; | 2858 | tx->info[idx].last = 1; |
2788 | myri10ge_submit_req(tx, tx->req_list, count); | 2859 | myri10ge_submit_req(tx, tx->req_list, count); |
2860 | /* if using multiple tx queues, make sure NIC polls the | ||
2861 | * current slice */ | ||
2862 | if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) { | ||
2863 | tx->queue_active = 1; | ||
2864 | put_be32(htonl(1), tx->send_go); | ||
2865 | } | ||
2789 | tx->pkt_start++; | 2866 | tx->pkt_start++; |
2790 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { | 2867 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { |
2791 | tx->stop_queue++; | 2868 | tx->stop_queue++; |
2792 | netif_stop_queue(dev); | 2869 | netif_tx_stop_queue(netdev_queue); |
2793 | } | 2870 | } |
2794 | dev->trans_start = jiffies; | 2871 | dev->trans_start = jiffies; |
2795 | return 0; | 2872 | return 0; |
@@ -3367,20 +3444,21 @@ static void myri10ge_watchdog(struct work_struct *work) | |||
3367 | for (i = 0; i < mgp->num_slices; i++) { | 3444 | for (i = 0; i < mgp->num_slices; i++) { |
3368 | tx = &mgp->ss[i].tx; | 3445 | tx = &mgp->ss[i].tx; |
3369 | printk(KERN_INFO | 3446 | printk(KERN_INFO |
3370 | "myri10ge: %s: (%d): %d %d %d %d %d\n", | 3447 | "myri10ge: %s: (%d): %d %d %d %d %d %d\n", |
3371 | mgp->dev->name, i, tx->req, tx->done, | 3448 | mgp->dev->name, i, tx->queue_active, tx->req, |
3372 | tx->pkt_start, tx->pkt_done, | 3449 | tx->done, tx->pkt_start, tx->pkt_done, |
3373 | (int)ntohl(mgp->ss[i].fw_stats-> | 3450 | (int)ntohl(mgp->ss[i].fw_stats-> |
3374 | send_done_count)); | 3451 | send_done_count)); |
3375 | msleep(2000); | 3452 | msleep(2000); |
3376 | printk(KERN_INFO | 3453 | printk(KERN_INFO |
3377 | "myri10ge: %s: (%d): %d %d %d %d %d\n", | 3454 | "myri10ge: %s: (%d): %d %d %d %d %d %d\n", |
3378 | mgp->dev->name, i, tx->req, tx->done, | 3455 | mgp->dev->name, i, tx->queue_active, tx->req, |
3379 | tx->pkt_start, tx->pkt_done, | 3456 | tx->done, tx->pkt_start, tx->pkt_done, |
3380 | (int)ntohl(mgp->ss[i].fw_stats-> | 3457 | (int)ntohl(mgp->ss[i].fw_stats-> |
3381 | send_done_count)); | 3458 | send_done_count)); |
3382 | } | 3459 | } |
3383 | } | 3460 | } |
3461 | |||
3384 | rtnl_lock(); | 3462 | rtnl_lock(); |
3385 | myri10ge_close(mgp->dev); | 3463 | myri10ge_close(mgp->dev); |
3386 | status = myri10ge_load_firmware(mgp, 1); | 3464 | status = myri10ge_load_firmware(mgp, 1); |
@@ -3435,10 +3513,14 @@ static void myri10ge_watchdog_timer(unsigned long arg) | |||
3435 | /* nic seems like it might be stuck.. */ | 3513 | /* nic seems like it might be stuck.. */ |
3436 | if (rx_pause_cnt != mgp->watchdog_pause) { | 3514 | if (rx_pause_cnt != mgp->watchdog_pause) { |
3437 | if (net_ratelimit()) | 3515 | if (net_ratelimit()) |
3438 | printk(KERN_WARNING "myri10ge %s:" | 3516 | printk(KERN_WARNING |
3517 | "myri10ge %s slice %d:" | ||
3439 | "TX paused, check link partner\n", | 3518 | "TX paused, check link partner\n", |
3440 | mgp->dev->name); | 3519 | mgp->dev->name, i); |
3441 | } else { | 3520 | } else { |
3521 | printk(KERN_WARNING | ||
3522 | "myri10ge %s slice %d stuck:", | ||
3523 | mgp->dev->name, i); | ||
3442 | reset_needed = 1; | 3524 | reset_needed = 1; |
3443 | } | 3525 | } |
3444 | } | 3526 | } |
@@ -3653,7 +3735,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3653 | int status = -ENXIO; | 3735 | int status = -ENXIO; |
3654 | int dac_enabled; | 3736 | int dac_enabled; |
3655 | 3737 | ||
3656 | netdev = alloc_etherdev(sizeof(*mgp)); | 3738 | netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES); |
3657 | if (netdev == NULL) { | 3739 | if (netdev == NULL) { |
3658 | dev_err(dev, "Could not allocate ethernet device\n"); | 3740 | dev_err(dev, "Could not allocate ethernet device\n"); |
3659 | return -ENOMEM; | 3741 | return -ENOMEM; |
@@ -3758,7 +3840,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3758 | dev_err(&pdev->dev, "failed to alloc slice state\n"); | 3840 | dev_err(&pdev->dev, "failed to alloc slice state\n"); |
3759 | goto abort_with_firmware; | 3841 | goto abort_with_firmware; |
3760 | } | 3842 | } |
3761 | 3843 | netdev->real_num_tx_queues = mgp->num_slices; | |
3762 | status = myri10ge_reset(mgp); | 3844 | status = myri10ge_reset(mgp); |
3763 | if (status != 0) { | 3845 | if (status != 0) { |
3764 | dev_err(&pdev->dev, "failed reset\n"); | 3846 | dev_err(&pdev->dev, "failed reset\n"); |
@@ -3782,6 +3864,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3782 | netdev->set_multicast_list = myri10ge_set_multicast_list; | 3864 | netdev->set_multicast_list = myri10ge_set_multicast_list; |
3783 | netdev->set_mac_address = myri10ge_set_mac_address; | 3865 | netdev->set_mac_address = myri10ge_set_mac_address; |
3784 | netdev->features = mgp->features; | 3866 | netdev->features = mgp->features; |
3867 | |||
3785 | if (dac_enabled) | 3868 | if (dac_enabled) |
3786 | netdev->features |= NETIF_F_HIGHDMA; | 3869 | netdev->features |= NETIF_F_HIGHDMA; |
3787 | 3870 | ||
@@ -3937,8 +4020,7 @@ static __init int myri10ge_init_module(void) | |||
3937 | printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name, | 4020 | printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name, |
3938 | MYRI10GE_VERSION_STR); | 4021 | MYRI10GE_VERSION_STR); |
3939 | 4022 | ||
3940 | if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_SRC_PORT || | 4023 | if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) { |
3941 | myri10ge_rss_hash < MXGEFW_RSS_HASH_TYPE_IPV4) { | ||
3942 | printk(KERN_ERR | 4024 | printk(KERN_ERR |
3943 | "%s: Illegal rssh hash type %d, defaulting to source port\n", | 4025 | "%s: Illegal rssh hash type %d, defaulting to source port\n", |
3944 | myri10ge_driver.name, myri10ge_rss_hash); | 4026 | myri10ge_driver.name, myri10ge_rss_hash); |
@@ -3947,6 +4029,8 @@ static __init int myri10ge_init_module(void) | |||
3947 | #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) | 4029 | #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) |
3948 | dca_register_notify(&myri10ge_dca_notifier); | 4030 | dca_register_notify(&myri10ge_dca_notifier); |
3949 | #endif | 4031 | #endif |
4032 | if (myri10ge_max_slices > MYRI10GE_MAX_SLICES) | ||
4033 | myri10ge_max_slices = MYRI10GE_MAX_SLICES; | ||
3950 | 4034 | ||
3951 | return pci_register_driver(&myri10ge_driver); | 4035 | return pci_register_driver(&myri10ge_driver); |
3952 | } | 4036 | } |