diff options
Diffstat (limited to 'drivers/net/igbvf/netdev.c')
-rw-r--r-- | drivers/net/igbvf/netdev.c | 94 |
1 files changed, 44 insertions, 50 deletions
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index a77afd8a14bb..7012e3da3bdf 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/netdevice.h> | 35 | #include <linux/netdevice.h> |
36 | #include <linux/tcp.h> | 36 | #include <linux/tcp.h> |
37 | #include <linux/ipv6.h> | 37 | #include <linux/ipv6.h> |
38 | #include <linux/slab.h> | ||
38 | #include <net/checksum.h> | 39 | #include <net/checksum.h> |
39 | #include <net/ip6_checksum.h> | 40 | #include <net/ip6_checksum.h> |
40 | #include <linux/mii.h> | 41 | #include <linux/mii.h> |
@@ -163,10 +164,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |||
163 | buffer_info->page_offset ^= PAGE_SIZE / 2; | 164 | buffer_info->page_offset ^= PAGE_SIZE / 2; |
164 | } | 165 | } |
165 | buffer_info->page_dma = | 166 | buffer_info->page_dma = |
166 | pci_map_page(pdev, buffer_info->page, | 167 | dma_map_page(&pdev->dev, buffer_info->page, |
167 | buffer_info->page_offset, | 168 | buffer_info->page_offset, |
168 | PAGE_SIZE / 2, | 169 | PAGE_SIZE / 2, |
169 | PCI_DMA_FROMDEVICE); | 170 | DMA_FROM_DEVICE); |
170 | } | 171 | } |
171 | 172 | ||
172 | if (!buffer_info->skb) { | 173 | if (!buffer_info->skb) { |
@@ -177,9 +178,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |||
177 | } | 178 | } |
178 | 179 | ||
179 | buffer_info->skb = skb; | 180 | buffer_info->skb = skb; |
180 | buffer_info->dma = pci_map_single(pdev, skb->data, | 181 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, |
181 | bufsz, | 182 | bufsz, |
182 | PCI_DMA_FROMDEVICE); | 183 | DMA_FROM_DEVICE); |
183 | } | 184 | } |
184 | /* Refresh the desc even if buffer_addrs didn't change because | 185 | /* Refresh the desc even if buffer_addrs didn't change because |
185 | * each write-back erases this info. */ | 186 | * each write-back erases this info. */ |
@@ -267,28 +268,28 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | |||
267 | prefetch(skb->data - NET_IP_ALIGN); | 268 | prefetch(skb->data - NET_IP_ALIGN); |
268 | buffer_info->skb = NULL; | 269 | buffer_info->skb = NULL; |
269 | if (!adapter->rx_ps_hdr_size) { | 270 | if (!adapter->rx_ps_hdr_size) { |
270 | pci_unmap_single(pdev, buffer_info->dma, | 271 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
271 | adapter->rx_buffer_len, | 272 | adapter->rx_buffer_len, |
272 | PCI_DMA_FROMDEVICE); | 273 | DMA_FROM_DEVICE); |
273 | buffer_info->dma = 0; | 274 | buffer_info->dma = 0; |
274 | skb_put(skb, length); | 275 | skb_put(skb, length); |
275 | goto send_up; | 276 | goto send_up; |
276 | } | 277 | } |
277 | 278 | ||
278 | if (!skb_shinfo(skb)->nr_frags) { | 279 | if (!skb_shinfo(skb)->nr_frags) { |
279 | pci_unmap_single(pdev, buffer_info->dma, | 280 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
280 | adapter->rx_ps_hdr_size, | 281 | adapter->rx_ps_hdr_size, |
281 | PCI_DMA_FROMDEVICE); | 282 | DMA_FROM_DEVICE); |
282 | skb_put(skb, hlen); | 283 | skb_put(skb, hlen); |
283 | } | 284 | } |
284 | 285 | ||
285 | if (length) { | 286 | if (length) { |
286 | pci_unmap_page(pdev, buffer_info->page_dma, | 287 | dma_unmap_page(&pdev->dev, buffer_info->page_dma, |
287 | PAGE_SIZE / 2, | 288 | PAGE_SIZE / 2, |
288 | PCI_DMA_FROMDEVICE); | 289 | DMA_FROM_DEVICE); |
289 | buffer_info->page_dma = 0; | 290 | buffer_info->page_dma = 0; |
290 | 291 | ||
291 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, | 292 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
292 | buffer_info->page, | 293 | buffer_info->page, |
293 | buffer_info->page_offset, | 294 | buffer_info->page_offset, |
294 | length); | 295 | length); |
@@ -368,15 +369,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter, | |||
368 | { | 369 | { |
369 | if (buffer_info->dma) { | 370 | if (buffer_info->dma) { |
370 | if (buffer_info->mapped_as_page) | 371 | if (buffer_info->mapped_as_page) |
371 | pci_unmap_page(adapter->pdev, | 372 | dma_unmap_page(&adapter->pdev->dev, |
372 | buffer_info->dma, | 373 | buffer_info->dma, |
373 | buffer_info->length, | 374 | buffer_info->length, |
374 | PCI_DMA_TODEVICE); | 375 | DMA_TO_DEVICE); |
375 | else | 376 | else |
376 | pci_unmap_single(adapter->pdev, | 377 | dma_unmap_single(&adapter->pdev->dev, |
377 | buffer_info->dma, | 378 | buffer_info->dma, |
378 | buffer_info->length, | 379 | buffer_info->length, |
379 | PCI_DMA_TODEVICE); | 380 | DMA_TO_DEVICE); |
380 | buffer_info->dma = 0; | 381 | buffer_info->dma = 0; |
381 | } | 382 | } |
382 | if (buffer_info->skb) { | 383 | if (buffer_info->skb) { |
@@ -437,8 +438,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, | |||
437 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); | 438 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); |
438 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 439 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
439 | 440 | ||
440 | tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, | 441 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, |
441 | &tx_ring->dma); | 442 | &tx_ring->dma, GFP_KERNEL); |
442 | 443 | ||
443 | if (!tx_ring->desc) | 444 | if (!tx_ring->desc) |
444 | goto err; | 445 | goto err; |
@@ -479,8 +480,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, | |||
479 | rx_ring->size = rx_ring->count * desc_len; | 480 | rx_ring->size = rx_ring->count * desc_len; |
480 | rx_ring->size = ALIGN(rx_ring->size, 4096); | 481 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
481 | 482 | ||
482 | rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, | 483 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, |
483 | &rx_ring->dma); | 484 | &rx_ring->dma, GFP_KERNEL); |
484 | 485 | ||
485 | if (!rx_ring->desc) | 486 | if (!rx_ring->desc) |
486 | goto err; | 487 | goto err; |
@@ -548,7 +549,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) | |||
548 | vfree(tx_ring->buffer_info); | 549 | vfree(tx_ring->buffer_info); |
549 | tx_ring->buffer_info = NULL; | 550 | tx_ring->buffer_info = NULL; |
550 | 551 | ||
551 | pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); | 552 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, |
553 | tx_ring->dma); | ||
552 | 554 | ||
553 | tx_ring->desc = NULL; | 555 | tx_ring->desc = NULL; |
554 | } | 556 | } |
@@ -573,13 +575,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) | |||
573 | buffer_info = &rx_ring->buffer_info[i]; | 575 | buffer_info = &rx_ring->buffer_info[i]; |
574 | if (buffer_info->dma) { | 576 | if (buffer_info->dma) { |
575 | if (adapter->rx_ps_hdr_size){ | 577 | if (adapter->rx_ps_hdr_size){ |
576 | pci_unmap_single(pdev, buffer_info->dma, | 578 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
577 | adapter->rx_ps_hdr_size, | 579 | adapter->rx_ps_hdr_size, |
578 | PCI_DMA_FROMDEVICE); | 580 | DMA_FROM_DEVICE); |
579 | } else { | 581 | } else { |
580 | pci_unmap_single(pdev, buffer_info->dma, | 582 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
581 | adapter->rx_buffer_len, | 583 | adapter->rx_buffer_len, |
582 | PCI_DMA_FROMDEVICE); | 584 | DMA_FROM_DEVICE); |
583 | } | 585 | } |
584 | buffer_info->dma = 0; | 586 | buffer_info->dma = 0; |
585 | } | 587 | } |
@@ -591,9 +593,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) | |||
591 | 593 | ||
592 | if (buffer_info->page) { | 594 | if (buffer_info->page) { |
593 | if (buffer_info->page_dma) | 595 | if (buffer_info->page_dma) |
594 | pci_unmap_page(pdev, buffer_info->page_dma, | 596 | dma_unmap_page(&pdev->dev, |
597 | buffer_info->page_dma, | ||
595 | PAGE_SIZE / 2, | 598 | PAGE_SIZE / 2, |
596 | PCI_DMA_FROMDEVICE); | 599 | DMA_FROM_DEVICE); |
597 | put_page(buffer_info->page); | 600 | put_page(buffer_info->page); |
598 | buffer_info->page = NULL; | 601 | buffer_info->page = NULL; |
599 | buffer_info->page_dma = 0; | 602 | buffer_info->page_dma = 0; |
@@ -1304,8 +1307,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) | |||
1304 | 1307 | ||
1305 | /* enable Report Status bit */ | 1308 | /* enable Report Status bit */ |
1306 | adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; | 1309 | adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; |
1307 | |||
1308 | adapter->tx_queue_len = adapter->netdev->tx_queue_len; | ||
1309 | } | 1310 | } |
1310 | 1311 | ||
1311 | /** | 1312 | /** |
@@ -1399,7 +1400,7 @@ static void igbvf_set_multi(struct net_device *netdev) | |||
1399 | { | 1400 | { |
1400 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 1401 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
1401 | struct e1000_hw *hw = &adapter->hw; | 1402 | struct e1000_hw *hw = &adapter->hw; |
1402 | struct dev_mc_list *mc_ptr; | 1403 | struct netdev_hw_addr *ha; |
1403 | u8 *mta_list = NULL; | 1404 | u8 *mta_list = NULL; |
1404 | int i; | 1405 | int i; |
1405 | 1406 | ||
@@ -1414,8 +1415,8 @@ static void igbvf_set_multi(struct net_device *netdev) | |||
1414 | 1415 | ||
1415 | /* prepare a packed array of only addresses. */ | 1416 | /* prepare a packed array of only addresses. */ |
1416 | i = 0; | 1417 | i = 0; |
1417 | netdev_for_each_mc_addr(mc_ptr, netdev) | 1418 | netdev_for_each_mc_addr(ha, netdev) |
1418 | memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); | 1419 | memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); |
1419 | 1420 | ||
1420 | hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); | 1421 | hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); |
1421 | kfree(mta_list); | 1422 | kfree(mta_list); |
@@ -1524,7 +1525,6 @@ void igbvf_down(struct igbvf_adapter *adapter) | |||
1524 | 1525 | ||
1525 | del_timer_sync(&adapter->watchdog_timer); | 1526 | del_timer_sync(&adapter->watchdog_timer); |
1526 | 1527 | ||
1527 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1528 | netif_carrier_off(netdev); | 1528 | netif_carrier_off(netdev); |
1529 | 1529 | ||
1530 | /* record the stats before reset*/ | 1530 | /* record the stats before reset*/ |
@@ -1857,21 +1857,15 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1857 | &adapter->link_duplex); | 1857 | &adapter->link_duplex); |
1858 | igbvf_print_link_info(adapter); | 1858 | igbvf_print_link_info(adapter); |
1859 | 1859 | ||
1860 | /* | 1860 | /* adjust timeout factor according to speed/duplex */ |
1861 | * tweak tx_queue_len according to speed/duplex | ||
1862 | * and adjust the timeout factor | ||
1863 | */ | ||
1864 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1865 | adapter->tx_timeout_factor = 1; | 1861 | adapter->tx_timeout_factor = 1; |
1866 | switch (adapter->link_speed) { | 1862 | switch (adapter->link_speed) { |
1867 | case SPEED_10: | 1863 | case SPEED_10: |
1868 | txb2b = 0; | 1864 | txb2b = 0; |
1869 | netdev->tx_queue_len = 10; | ||
1870 | adapter->tx_timeout_factor = 16; | 1865 | adapter->tx_timeout_factor = 16; |
1871 | break; | 1866 | break; |
1872 | case SPEED_100: | 1867 | case SPEED_100: |
1873 | txb2b = 0; | 1868 | txb2b = 0; |
1874 | netdev->tx_queue_len = 100; | ||
1875 | /* maybe add some timeout factor ? */ | 1869 | /* maybe add some timeout factor ? */ |
1876 | break; | 1870 | break; |
1877 | } | 1871 | } |
@@ -2112,9 +2106,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2112 | buffer_info->time_stamp = jiffies; | 2106 | buffer_info->time_stamp = jiffies; |
2113 | buffer_info->next_to_watch = i; | 2107 | buffer_info->next_to_watch = i; |
2114 | buffer_info->mapped_as_page = false; | 2108 | buffer_info->mapped_as_page = false; |
2115 | buffer_info->dma = pci_map_single(pdev, skb->data, len, | 2109 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, |
2116 | PCI_DMA_TODEVICE); | 2110 | DMA_TO_DEVICE); |
2117 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 2111 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
2118 | goto dma_error; | 2112 | goto dma_error; |
2119 | 2113 | ||
2120 | 2114 | ||
@@ -2135,12 +2129,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2135 | buffer_info->time_stamp = jiffies; | 2129 | buffer_info->time_stamp = jiffies; |
2136 | buffer_info->next_to_watch = i; | 2130 | buffer_info->next_to_watch = i; |
2137 | buffer_info->mapped_as_page = true; | 2131 | buffer_info->mapped_as_page = true; |
2138 | buffer_info->dma = pci_map_page(pdev, | 2132 | buffer_info->dma = dma_map_page(&pdev->dev, |
2139 | frag->page, | 2133 | frag->page, |
2140 | frag->page_offset, | 2134 | frag->page_offset, |
2141 | len, | 2135 | len, |
2142 | PCI_DMA_TODEVICE); | 2136 | DMA_TO_DEVICE); |
2143 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 2137 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
2144 | goto dma_error; | 2138 | goto dma_error; |
2145 | } | 2139 | } |
2146 | 2140 | ||
@@ -2652,16 +2646,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, | |||
2652 | return err; | 2646 | return err; |
2653 | 2647 | ||
2654 | pci_using_dac = 0; | 2648 | pci_using_dac = 0; |
2655 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | 2649 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
2656 | if (!err) { | 2650 | if (!err) { |
2657 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 2651 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
2658 | if (!err) | 2652 | if (!err) |
2659 | pci_using_dac = 1; | 2653 | pci_using_dac = 1; |
2660 | } else { | 2654 | } else { |
2661 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 2655 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
2662 | if (err) { | 2656 | if (err) { |
2663 | err = pci_set_consistent_dma_mask(pdev, | 2657 | err = dma_set_coherent_mask(&pdev->dev, |
2664 | DMA_BIT_MASK(32)); | 2658 | DMA_BIT_MASK(32)); |
2665 | if (err) { | 2659 | if (err) { |
2666 | dev_err(&pdev->dev, "No usable DMA " | 2660 | dev_err(&pdev->dev, "No usable DMA " |
2667 | "configuration, aborting\n"); | 2661 | "configuration, aborting\n"); |