aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbevf/ixgbevf_main.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
commitf8965467f366fd18f01feafb5db10512d7b4422c (patch)
tree3706a9cd779859271ca61b85c63a1bc3f82d626e /drivers/net/ixgbevf/ixgbevf_main.c
parenta26272e5200765691e67d6780e52b32498fdb659 (diff)
parent2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits) qlcnic: adding co maintainer ixgbe: add support for active DA cables ixgbe: dcb, do not tag tc_prio_control frames ixgbe: fix ixgbe_tx_is_paused logic ixgbe: always enable vlan strip/insert when DCB is enabled ixgbe: remove some redundant code in setting FCoE FIP filter ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp ixgbe: fix header len when unsplit packet overflows to data buffer ipv6: Never schedule DAD timer on dead address ipv6: Use POSTDAD state ipv6: Use state_lock to protect ifa state ipv6: Replace inet6_ifaddr->dead with state cxgb4: notify upper drivers if the device is already up when they load cxgb4: keep interrupts available when the ports are brought down cxgb4: fix initial addition of MAC address cnic: Return SPQ credit to bnx2x after ring setup and shutdown. cnic: Convert cnic_local_flags to atomic ops. can: Fix SJA1000 command register writes on SMP systems bridge: fix build for CONFIG_SYSFS disabled ARCNET: Limit com20020 PCI ID matches for SOHARD cards ... Fix up various conflicts with pcmcia tree drivers/net/ {pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and wireless/orinoco/spectrum_cs.c} and feature removal (Documentation/feature-removal-schedule.txt). Also fix a non-content conflict due to pm_qos_requirement getting renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
Diffstat (limited to 'drivers/net/ixgbevf/ixgbevf_main.c')
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c125
1 files changed, 57 insertions, 68 deletions
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 0cd6202dfacc..a16cff7e54a3 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
139{ 139{
140 if (tx_buffer_info->dma) { 140 if (tx_buffer_info->dma) {
141 if (tx_buffer_info->mapped_as_page) 141 if (tx_buffer_info->mapped_as_page)
142 pci_unmap_page(adapter->pdev, 142 dma_unmap_page(&adapter->pdev->dev,
143 tx_buffer_info->dma, 143 tx_buffer_info->dma,
144 tx_buffer_info->length, 144 tx_buffer_info->length,
145 PCI_DMA_TODEVICE); 145 DMA_TO_DEVICE);
146 else 146 else
147 pci_unmap_single(adapter->pdev, 147 dma_unmap_single(&adapter->pdev->dev,
148 tx_buffer_info->dma, 148 tx_buffer_info->dma,
149 tx_buffer_info->length, 149 tx_buffer_info->length,
150 PCI_DMA_TODEVICE); 150 DMA_TO_DEVICE);
151 tx_buffer_info->dma = 0; 151 tx_buffer_info->dma = 0;
152 } 152 }
153 if (tx_buffer_info->skb) { 153 if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
416 bi->page_offset ^= (PAGE_SIZE / 2); 416 bi->page_offset ^= (PAGE_SIZE / 2);
417 } 417 }
418 418
419 bi->page_dma = pci_map_page(pdev, bi->page, 419 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
420 bi->page_offset, 420 bi->page_offset,
421 (PAGE_SIZE / 2), 421 (PAGE_SIZE / 2),
422 PCI_DMA_FROMDEVICE); 422 DMA_FROM_DEVICE);
423 } 423 }
424 424
425 skb = bi->skb; 425 skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
442 bi->skb = skb; 442 bi->skb = skb;
443 } 443 }
444 if (!bi->dma) { 444 if (!bi->dma) {
445 bi->dma = pci_map_single(pdev, skb->data, 445 bi->dma = dma_map_single(&pdev->dev, skb->data,
446 rx_ring->rx_buf_len, 446 rx_ring->rx_buf_len,
447 PCI_DMA_FROMDEVICE); 447 DMA_FROM_DEVICE);
448 } 448 }
449 /* Refresh the desc even if buffer_addrs didn't change because 449 /* Refresh the desc even if buffer_addrs didn't change because
450 * each write-back erases this info. */ 450 * each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
536 rx_buffer_info->skb = NULL; 536 rx_buffer_info->skb = NULL;
537 537
538 if (rx_buffer_info->dma) { 538 if (rx_buffer_info->dma) {
539 pci_unmap_single(pdev, rx_buffer_info->dma, 539 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
540 rx_ring->rx_buf_len, 540 rx_ring->rx_buf_len,
541 PCI_DMA_FROMDEVICE); 541 DMA_FROM_DEVICE);
542 rx_buffer_info->dma = 0; 542 rx_buffer_info->dma = 0;
543 skb_put(skb, len); 543 skb_put(skb, len);
544 } 544 }
545 545
546 if (upper_len) { 546 if (upper_len) {
547 pci_unmap_page(pdev, rx_buffer_info->page_dma, 547 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
548 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 548 PAGE_SIZE / 2, DMA_FROM_DEVICE);
549 rx_buffer_info->page_dma = 0; 549 rx_buffer_info->page_dma = 0;
550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
551 rx_buffer_info->page, 551 rx_buffer_info->page,
@@ -604,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
604 * packets not getting split correctly 604 * packets not getting split correctly
605 */ 605 */
606 if (staterr & IXGBE_RXD_STAT_LB) { 606 if (staterr & IXGBE_RXD_STAT_LB) {
607 u32 header_fixup_len = skb->len - skb->data_len; 607 u32 header_fixup_len = skb_headlen(skb);
608 if (header_fixup_len < 14) 608 if (header_fixup_len < 14)
609 skb_push(skb, header_fixup_len); 609 skb_push(skb, header_fixup_len);
610 } 610 }
611 skb->protocol = eth_type_trans(skb, adapter->netdev); 611 skb->protocol = eth_type_trans(skb, adapter->netdev);
612 612
613 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 613 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
614 adapter->netdev->last_rx = jiffies;
615 614
616next_desc: 615next_desc:
617 rx_desc->wb.upper.status_error = 0; 616 rx_desc->wb.upper.status_error = 0;
@@ -947,8 +946,6 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
947 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 946 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
948 ixgbevf_write_eitr(adapter, v_idx, itr_reg); 947 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
949 } 948 }
950
951 return;
952} 949}
953 950
954static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) 951static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
@@ -962,12 +959,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
962 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); 959 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
963 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); 960 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
964 961
962 if (!hw->mbx.ops.check_for_ack(hw)) {
963 /*
964 * checking for the ack clears the PFACK bit. Place
965 * it back in the v2p_mailbox cache so that anyone
966 * polling for an ack will not miss it. Also
967 * avoid the read below because the code to read
968 * the mailbox will also clear the ack bit. This was
969 * causing lost acks. Just cache the bit and exit
970 * the IRQ handler.
971 */
972 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
973 goto out;
974 }
975
976 /* Not an ack interrupt, go ahead and read the message */
965 hw->mbx.ops.read(hw, &msg, 1); 977 hw->mbx.ops.read(hw, &msg, 1);
966 978
967 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 979 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
968 mod_timer(&adapter->watchdog_timer, 980 mod_timer(&adapter->watchdog_timer,
969 round_jiffies(jiffies + 1)); 981 round_jiffies(jiffies + 1));
970 982
983out:
971 return IRQ_HANDLED; 984 return IRQ_HANDLED;
972} 985}
973 986
@@ -1496,22 +1509,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1496 } 1509 }
1497} 1510}
1498 1511
1499static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1500 u32 *vmdq)
1501{
1502 struct dev_mc_list *mc_ptr;
1503 u8 *addr = *mc_addr_ptr;
1504 *vmdq = 0;
1505
1506 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1507 if (mc_ptr->next)
1508 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1509 else
1510 *mc_addr_ptr = NULL;
1511
1512 return addr;
1513}
1514
1515/** 1512/**
1516 * ixgbevf_set_rx_mode - Multicast set 1513 * ixgbevf_set_rx_mode - Multicast set
1517 * @netdev: network interface device structure 1514 * @netdev: network interface device structure
@@ -1524,16 +1521,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
1524{ 1521{
1525 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1522 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1526 struct ixgbe_hw *hw = &adapter->hw; 1523 struct ixgbe_hw *hw = &adapter->hw;
1527 u8 *addr_list = NULL;
1528 int addr_count = 0;
1529 1524
1530 /* reprogram multicast list */ 1525 /* reprogram multicast list */
1531 addr_count = netdev_mc_count(netdev);
1532 if (addr_count)
1533 addr_list = netdev->mc_list->dmi_addr;
1534 if (hw->mac.ops.update_mc_addr_list) 1526 if (hw->mac.ops.update_mc_addr_list)
1535 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 1527 hw->mac.ops.update_mc_addr_list(hw, netdev);
1536 ixgbevf_addr_list_itr);
1537} 1528}
1538 1529
1539static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1530static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1744,9 +1735,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1744 1735
1745 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1736 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1746 if (rx_buffer_info->dma) { 1737 if (rx_buffer_info->dma) {
1747 pci_unmap_single(pdev, rx_buffer_info->dma, 1738 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1748 rx_ring->rx_buf_len, 1739 rx_ring->rx_buf_len,
1749 PCI_DMA_FROMDEVICE); 1740 DMA_FROM_DEVICE);
1750 rx_buffer_info->dma = 0; 1741 rx_buffer_info->dma = 0;
1751 } 1742 }
1752 if (rx_buffer_info->skb) { 1743 if (rx_buffer_info->skb) {
@@ -1760,8 +1751,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1760 } 1751 }
1761 if (!rx_buffer_info->page) 1752 if (!rx_buffer_info->page)
1762 continue; 1753 continue;
1763 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, 1754 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1764 PCI_DMA_FROMDEVICE); 1755 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1765 rx_buffer_info->page_dma = 0; 1756 rx_buffer_info->page_dma = 0;
1766 put_page(rx_buffer_info->page); 1757 put_page(rx_buffer_info->page);
1767 rx_buffer_info->page = NULL; 1758 rx_buffer_info->page = NULL;
@@ -2158,8 +2149,6 @@ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2158 pci_disable_msix(adapter->pdev); 2149 pci_disable_msix(adapter->pdev);
2159 kfree(adapter->msix_entries); 2150 kfree(adapter->msix_entries);
2160 adapter->msix_entries = NULL; 2151 adapter->msix_entries = NULL;
2161
2162 return;
2163} 2152}
2164 2153
2165/** 2154/**
@@ -2418,9 +2407,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2418 2407
2419 if (link_up) { 2408 if (link_up) {
2420 if (!netif_carrier_ok(netdev)) { 2409 if (!netif_carrier_ok(netdev)) {
2421 hw_dbg(&adapter->hw, "NIC Link is Up %s, ", 2410 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2422 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2411 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2423 "10 Gbps\n" : "1 Gbps\n")); 2412 10 : 1);
2424 netif_carrier_on(netdev); 2413 netif_carrier_on(netdev);
2425 netif_tx_wake_all_queues(netdev); 2414 netif_tx_wake_all_queues(netdev);
2426 } else { 2415 } else {
@@ -2468,7 +2457,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2468 vfree(tx_ring->tx_buffer_info); 2457 vfree(tx_ring->tx_buffer_info);
2469 tx_ring->tx_buffer_info = NULL; 2458 tx_ring->tx_buffer_info = NULL;
2470 2459
2471 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2460 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2461 tx_ring->dma);
2472 2462
2473 tx_ring->desc = NULL; 2463 tx_ring->desc = NULL;
2474} 2464}
@@ -2513,8 +2503,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2513 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2503 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2514 tx_ring->size = ALIGN(tx_ring->size, 4096); 2504 tx_ring->size = ALIGN(tx_ring->size, 4096);
2515 2505
2516 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 2506 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2517 &tx_ring->dma); 2507 &tx_ring->dma, GFP_KERNEL);
2518 if (!tx_ring->desc) 2508 if (!tx_ring->desc)
2519 goto err; 2509 goto err;
2520 2510
@@ -2584,8 +2574,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2584 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2574 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2585 rx_ring->size = ALIGN(rx_ring->size, 4096); 2575 rx_ring->size = ALIGN(rx_ring->size, 4096);
2586 2576
2587 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2577 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2588 &rx_ring->dma); 2578 &rx_ring->dma, GFP_KERNEL);
2589 2579
2590 if (!rx_ring->desc) { 2580 if (!rx_ring->desc) {
2591 hw_dbg(&adapter->hw, 2581 hw_dbg(&adapter->hw,
@@ -2646,7 +2636,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2646 vfree(rx_ring->rx_buffer_info); 2636 vfree(rx_ring->rx_buffer_info);
2647 rx_ring->rx_buffer_info = NULL; 2637 rx_ring->rx_buffer_info = NULL;
2648 2638
2649 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2639 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2640 rx_ring->dma);
2650 2641
2651 rx_ring->desc = NULL; 2642 rx_ring->desc = NULL;
2652} 2643}
@@ -2958,10 +2949,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2958 2949
2959 tx_buffer_info->length = size; 2950 tx_buffer_info->length = size;
2960 tx_buffer_info->mapped_as_page = false; 2951 tx_buffer_info->mapped_as_page = false;
2961 tx_buffer_info->dma = pci_map_single(adapter->pdev, 2952 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2962 skb->data + offset, 2953 skb->data + offset,
2963 size, PCI_DMA_TODEVICE); 2954 size, DMA_TO_DEVICE);
2964 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 2955 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2965 goto dma_error; 2956 goto dma_error;
2966 tx_buffer_info->time_stamp = jiffies; 2957 tx_buffer_info->time_stamp = jiffies;
2967 tx_buffer_info->next_to_watch = i; 2958 tx_buffer_info->next_to_watch = i;
@@ -2987,13 +2978,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2987 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2978 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2988 2979
2989 tx_buffer_info->length = size; 2980 tx_buffer_info->length = size;
2990 tx_buffer_info->dma = pci_map_page(adapter->pdev, 2981 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
2991 frag->page, 2982 frag->page,
2992 offset, 2983 offset,
2993 size, 2984 size,
2994 PCI_DMA_TODEVICE); 2985 DMA_TO_DEVICE);
2995 tx_buffer_info->mapped_as_page = true; 2986 tx_buffer_info->mapped_as_page = true;
2996 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 2987 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2997 goto dma_error; 2988 goto dma_error;
2998 tx_buffer_info->time_stamp = jiffies; 2989 tx_buffer_info->time_stamp = jiffies;
2999 tx_buffer_info->next_to_watch = i; 2990 tx_buffer_info->next_to_watch = i;
@@ -3189,8 +3180,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3189 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), 3180 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3190 skb->len, hdr_len); 3181 skb->len, hdr_len);
3191 3182
3192 netdev->trans_start = jiffies;
3193
3194 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 3183 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3195 3184
3196 return NETDEV_TX_OK; 3185 return NETDEV_TX_OK;
@@ -3334,14 +3323,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3334 if (err) 3323 if (err)
3335 return err; 3324 return err;
3336 3325
3337 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3326 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3338 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3327 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3339 pci_using_dac = 1; 3328 pci_using_dac = 1;
3340 } else { 3329 } else {
3341 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3330 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3342 if (err) { 3331 if (err) {
3343 err = pci_set_consistent_dma_mask(pdev, 3332 err = dma_set_coherent_mask(&pdev->dev,
3344 DMA_BIT_MASK(32)); 3333 DMA_BIT_MASK(32));
3345 if (err) { 3334 if (err) {
3346 dev_err(&pdev->dev, "No usable DMA " 3335 dev_err(&pdev->dev, "No usable DMA "
3347 "configuration, aborting\n"); 3336 "configuration, aborting\n");
@@ -3482,7 +3471,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3482 3471
3483 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3472 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3484 3473
3485 hw_dbg(hw, "LRO is disabled \n"); 3474 hw_dbg(hw, "LRO is disabled\n");
3486 3475
3487 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3476 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3488 cards_found++; 3477 cards_found++;