aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igbvf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
commitf8965467f366fd18f01feafb5db10512d7b4422c (patch)
tree3706a9cd779859271ca61b85c63a1bc3f82d626e /drivers/net/igbvf
parenta26272e5200765691e67d6780e52b32498fdb659 (diff)
parent2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits) qlcnic: adding co maintainer ixgbe: add support for active DA cables ixgbe: dcb, do not tag tc_prio_control frames ixgbe: fix ixgbe_tx_is_paused logic ixgbe: always enable vlan strip/insert when DCB is enabled ixgbe: remove some redundant code in setting FCoE FIP filter ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp ixgbe: fix header len when unsplit packet overflows to data buffer ipv6: Never schedule DAD timer on dead address ipv6: Use POSTDAD state ipv6: Use state_lock to protect ifa state ipv6: Replace inet6_ifaddr->dead with state cxgb4: notify upper drivers if the device is already up when they load cxgb4: keep interrupts available when the ports are brought down cxgb4: fix initial addition of MAC address cnic: Return SPQ credit to bnx2x after ring setup and shutdown. cnic: Convert cnic_local_flags to atomic ops. can: Fix SJA1000 command register writes on SMP systems bridge: fix build for CONFIG_SYSFS disabled ARCNET: Limit com20020 PCI ID matches for SOHARD cards ... Fix up various conflicts with pcmcia tree drivers/net/ {pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and wireless/orinoco/spectrum_cs.c} and feature removal (Documentation/feature-removal-schedule.txt). Also fix a non-content conflict due to pm_qos_requirement getting renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
Diffstat (limited to 'drivers/net/igbvf')
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/netdev.c82
2 files changed, 42 insertions, 42 deletions
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 8afff07ff559..103b3aa1afc2 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -390,8 +390,6 @@ static void igbvf_get_wol(struct net_device *netdev,
390{ 390{
391 wol->supported = 0; 391 wol->supported = 0;
392 wol->wolopts = 0; 392 wol->wolopts = 0;
393
394 return;
395} 393}
396 394
397static int igbvf_set_wol(struct net_device *netdev, 395static int igbvf_set_wol(struct net_device *netdev,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index f16e981812a9..5e2b2a8c56c6 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -165,10 +165,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
165 buffer_info->page_offset ^= PAGE_SIZE / 2; 165 buffer_info->page_offset ^= PAGE_SIZE / 2;
166 } 166 }
167 buffer_info->page_dma = 167 buffer_info->page_dma =
168 pci_map_page(pdev, buffer_info->page, 168 dma_map_page(&pdev->dev, buffer_info->page,
169 buffer_info->page_offset, 169 buffer_info->page_offset,
170 PAGE_SIZE / 2, 170 PAGE_SIZE / 2,
171 PCI_DMA_FROMDEVICE); 171 DMA_FROM_DEVICE);
172 } 172 }
173 173
174 if (!buffer_info->skb) { 174 if (!buffer_info->skb) {
@@ -179,9 +179,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
179 } 179 }
180 180
181 buffer_info->skb = skb; 181 buffer_info->skb = skb;
182 buffer_info->dma = pci_map_single(pdev, skb->data, 182 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
183 bufsz, 183 bufsz,
184 PCI_DMA_FROMDEVICE); 184 DMA_FROM_DEVICE);
185 } 185 }
186 /* Refresh the desc even if buffer_addrs didn't change because 186 /* Refresh the desc even if buffer_addrs didn't change because
187 * each write-back erases this info. */ 187 * each write-back erases this info. */
@@ -269,28 +269,28 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
269 prefetch(skb->data - NET_IP_ALIGN); 269 prefetch(skb->data - NET_IP_ALIGN);
270 buffer_info->skb = NULL; 270 buffer_info->skb = NULL;
271 if (!adapter->rx_ps_hdr_size) { 271 if (!adapter->rx_ps_hdr_size) {
272 pci_unmap_single(pdev, buffer_info->dma, 272 dma_unmap_single(&pdev->dev, buffer_info->dma,
273 adapter->rx_buffer_len, 273 adapter->rx_buffer_len,
274 PCI_DMA_FROMDEVICE); 274 DMA_FROM_DEVICE);
275 buffer_info->dma = 0; 275 buffer_info->dma = 0;
276 skb_put(skb, length); 276 skb_put(skb, length);
277 goto send_up; 277 goto send_up;
278 } 278 }
279 279
280 if (!skb_shinfo(skb)->nr_frags) { 280 if (!skb_shinfo(skb)->nr_frags) {
281 pci_unmap_single(pdev, buffer_info->dma, 281 dma_unmap_single(&pdev->dev, buffer_info->dma,
282 adapter->rx_ps_hdr_size, 282 adapter->rx_ps_hdr_size,
283 PCI_DMA_FROMDEVICE); 283 DMA_FROM_DEVICE);
284 skb_put(skb, hlen); 284 skb_put(skb, hlen);
285 } 285 }
286 286
287 if (length) { 287 if (length) {
288 pci_unmap_page(pdev, buffer_info->page_dma, 288 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
289 PAGE_SIZE / 2, 289 PAGE_SIZE / 2,
290 PCI_DMA_FROMDEVICE); 290 DMA_FROM_DEVICE);
291 buffer_info->page_dma = 0; 291 buffer_info->page_dma = 0;
292 292
293 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 293 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
294 buffer_info->page, 294 buffer_info->page,
295 buffer_info->page_offset, 295 buffer_info->page_offset,
296 length); 296 length);
@@ -370,15 +370,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
370{ 370{
371 if (buffer_info->dma) { 371 if (buffer_info->dma) {
372 if (buffer_info->mapped_as_page) 372 if (buffer_info->mapped_as_page)
373 pci_unmap_page(adapter->pdev, 373 dma_unmap_page(&adapter->pdev->dev,
374 buffer_info->dma, 374 buffer_info->dma,
375 buffer_info->length, 375 buffer_info->length,
376 PCI_DMA_TODEVICE); 376 DMA_TO_DEVICE);
377 else 377 else
378 pci_unmap_single(adapter->pdev, 378 dma_unmap_single(&adapter->pdev->dev,
379 buffer_info->dma, 379 buffer_info->dma,
380 buffer_info->length, 380 buffer_info->length,
381 PCI_DMA_TODEVICE); 381 DMA_TO_DEVICE);
382 buffer_info->dma = 0; 382 buffer_info->dma = 0;
383 } 383 }
384 if (buffer_info->skb) { 384 if (buffer_info->skb) {
@@ -439,8 +439,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
439 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 439 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
440 tx_ring->size = ALIGN(tx_ring->size, 4096); 440 tx_ring->size = ALIGN(tx_ring->size, 4096);
441 441
442 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 442 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
443 &tx_ring->dma); 443 &tx_ring->dma, GFP_KERNEL);
444 444
445 if (!tx_ring->desc) 445 if (!tx_ring->desc)
446 goto err; 446 goto err;
@@ -481,8 +481,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
481 rx_ring->size = rx_ring->count * desc_len; 481 rx_ring->size = rx_ring->count * desc_len;
482 rx_ring->size = ALIGN(rx_ring->size, 4096); 482 rx_ring->size = ALIGN(rx_ring->size, 4096);
483 483
484 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 484 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
485 &rx_ring->dma); 485 &rx_ring->dma, GFP_KERNEL);
486 486
487 if (!rx_ring->desc) 487 if (!rx_ring->desc)
488 goto err; 488 goto err;
@@ -550,7 +550,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
550 vfree(tx_ring->buffer_info); 550 vfree(tx_ring->buffer_info);
551 tx_ring->buffer_info = NULL; 551 tx_ring->buffer_info = NULL;
552 552
553 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 553 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
554 tx_ring->dma);
554 555
555 tx_ring->desc = NULL; 556 tx_ring->desc = NULL;
556} 557}
@@ -575,13 +576,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
575 buffer_info = &rx_ring->buffer_info[i]; 576 buffer_info = &rx_ring->buffer_info[i];
576 if (buffer_info->dma) { 577 if (buffer_info->dma) {
577 if (adapter->rx_ps_hdr_size){ 578 if (adapter->rx_ps_hdr_size){
578 pci_unmap_single(pdev, buffer_info->dma, 579 dma_unmap_single(&pdev->dev, buffer_info->dma,
579 adapter->rx_ps_hdr_size, 580 adapter->rx_ps_hdr_size,
580 PCI_DMA_FROMDEVICE); 581 DMA_FROM_DEVICE);
581 } else { 582 } else {
582 pci_unmap_single(pdev, buffer_info->dma, 583 dma_unmap_single(&pdev->dev, buffer_info->dma,
583 adapter->rx_buffer_len, 584 adapter->rx_buffer_len,
584 PCI_DMA_FROMDEVICE); 585 DMA_FROM_DEVICE);
585 } 586 }
586 buffer_info->dma = 0; 587 buffer_info->dma = 0;
587 } 588 }
@@ -593,9 +594,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
593 594
594 if (buffer_info->page) { 595 if (buffer_info->page) {
595 if (buffer_info->page_dma) 596 if (buffer_info->page_dma)
596 pci_unmap_page(pdev, buffer_info->page_dma, 597 dma_unmap_page(&pdev->dev,
598 buffer_info->page_dma,
597 PAGE_SIZE / 2, 599 PAGE_SIZE / 2,
598 PCI_DMA_FROMDEVICE); 600 DMA_FROM_DEVICE);
599 put_page(buffer_info->page); 601 put_page(buffer_info->page);
600 buffer_info->page = NULL; 602 buffer_info->page = NULL;
601 buffer_info->page_dma = 0; 603 buffer_info->page_dma = 0;
@@ -1399,7 +1401,7 @@ static void igbvf_set_multi(struct net_device *netdev)
1399{ 1401{
1400 struct igbvf_adapter *adapter = netdev_priv(netdev); 1402 struct igbvf_adapter *adapter = netdev_priv(netdev);
1401 struct e1000_hw *hw = &adapter->hw; 1403 struct e1000_hw *hw = &adapter->hw;
1402 struct dev_mc_list *mc_ptr; 1404 struct netdev_hw_addr *ha;
1403 u8 *mta_list = NULL; 1405 u8 *mta_list = NULL;
1404 int i; 1406 int i;
1405 1407
@@ -1414,8 +1416,8 @@ static void igbvf_set_multi(struct net_device *netdev)
1414 1416
1415 /* prepare a packed array of only addresses. */ 1417 /* prepare a packed array of only addresses. */
1416 i = 0; 1418 i = 0;
1417 netdev_for_each_mc_addr(mc_ptr, netdev) 1419 netdev_for_each_mc_addr(ha, netdev)
1418 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 1420 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1419 1421
1420 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1422 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1421 kfree(mta_list); 1423 kfree(mta_list);
@@ -2105,9 +2107,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2105 buffer_info->time_stamp = jiffies; 2107 buffer_info->time_stamp = jiffies;
2106 buffer_info->next_to_watch = i; 2108 buffer_info->next_to_watch = i;
2107 buffer_info->mapped_as_page = false; 2109 buffer_info->mapped_as_page = false;
2108 buffer_info->dma = pci_map_single(pdev, skb->data, len, 2110 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2109 PCI_DMA_TODEVICE); 2111 DMA_TO_DEVICE);
2110 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2112 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2111 goto dma_error; 2113 goto dma_error;
2112 2114
2113 2115
@@ -2128,12 +2130,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2128 buffer_info->time_stamp = jiffies; 2130 buffer_info->time_stamp = jiffies;
2129 buffer_info->next_to_watch = i; 2131 buffer_info->next_to_watch = i;
2130 buffer_info->mapped_as_page = true; 2132 buffer_info->mapped_as_page = true;
2131 buffer_info->dma = pci_map_page(pdev, 2133 buffer_info->dma = dma_map_page(&pdev->dev,
2132 frag->page, 2134 frag->page,
2133 frag->page_offset, 2135 frag->page_offset,
2134 len, 2136 len,
2135 PCI_DMA_TODEVICE); 2137 DMA_TO_DEVICE);
2136 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2138 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2137 goto dma_error; 2139 goto dma_error;
2138 } 2140 }
2139 2141
@@ -2645,16 +2647,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2645 return err; 2647 return err;
2646 2648
2647 pci_using_dac = 0; 2649 pci_using_dac = 0;
2648 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2650 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2649 if (!err) { 2651 if (!err) {
2650 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2652 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2651 if (!err) 2653 if (!err)
2652 pci_using_dac = 1; 2654 pci_using_dac = 1;
2653 } else { 2655 } else {
2654 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2656 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2655 if (err) { 2657 if (err) {
2656 err = pci_set_consistent_dma_mask(pdev, 2658 err = dma_set_coherent_mask(&pdev->dev,
2657 DMA_BIT_MASK(32)); 2659 DMA_BIT_MASK(32));
2658 if (err) { 2660 if (err) {
2659 dev_err(&pdev->dev, "No usable DMA " 2661 dev_err(&pdev->dev, "No usable DMA "
2660 "configuration, aborting\n"); 2662 "configuration, aborting\n");