diff options
Diffstat (limited to 'drivers/net/ixgbevf')
-rw-r--r-- | drivers/net/ixgbevf/defines.h | 12 | ||||
-rw-r--r-- | drivers/net/ixgbevf/ixgbevf_main.c | 84 | ||||
-rw-r--r-- | drivers/net/ixgbevf/vf.c | 3 |
3 files changed, 60 insertions, 39 deletions
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h index c44fdb05447a..ca2c81f49a05 100644 --- a/drivers/net/ixgbevf/defines.h +++ b/drivers/net/ixgbevf/defines.h | |||
@@ -41,11 +41,13 @@ typedef u32 ixgbe_link_speed; | |||
41 | #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 | 41 | #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 |
42 | #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 | 42 | #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 |
43 | 43 | ||
44 | #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ | 44 | #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ |
45 | #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ | 45 | #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ |
46 | #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ | 46 | #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ |
47 | #define IXGBE_LINKS_UP 0x40000000 | 47 | #define IXGBE_LINKS_UP 0x40000000 |
48 | #define IXGBE_LINKS_SPEED 0x20000000 | 48 | #define IXGBE_LINKS_SPEED_82599 0x30000000 |
49 | #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 | ||
50 | #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 | ||
49 | 51 | ||
50 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ | 52 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ |
51 | #define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 | 53 | #define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 |
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index f484161418b6..460c37fee965 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c | |||
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, | |||
139 | { | 139 | { |
140 | if (tx_buffer_info->dma) { | 140 | if (tx_buffer_info->dma) { |
141 | if (tx_buffer_info->mapped_as_page) | 141 | if (tx_buffer_info->mapped_as_page) |
142 | pci_unmap_page(adapter->pdev, | 142 | dma_unmap_page(&adapter->pdev->dev, |
143 | tx_buffer_info->dma, | 143 | tx_buffer_info->dma, |
144 | tx_buffer_info->length, | 144 | tx_buffer_info->length, |
145 | PCI_DMA_TODEVICE); | 145 | DMA_TO_DEVICE); |
146 | else | 146 | else |
147 | pci_unmap_single(adapter->pdev, | 147 | dma_unmap_single(&adapter->pdev->dev, |
148 | tx_buffer_info->dma, | 148 | tx_buffer_info->dma, |
149 | tx_buffer_info->length, | 149 | tx_buffer_info->length, |
150 | PCI_DMA_TODEVICE); | 150 | DMA_TO_DEVICE); |
151 | tx_buffer_info->dma = 0; | 151 | tx_buffer_info->dma = 0; |
152 | } | 152 | } |
153 | if (tx_buffer_info->skb) { | 153 | if (tx_buffer_info->skb) { |
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, | |||
416 | bi->page_offset ^= (PAGE_SIZE / 2); | 416 | bi->page_offset ^= (PAGE_SIZE / 2); |
417 | } | 417 | } |
418 | 418 | ||
419 | bi->page_dma = pci_map_page(pdev, bi->page, | 419 | bi->page_dma = dma_map_page(&pdev->dev, bi->page, |
420 | bi->page_offset, | 420 | bi->page_offset, |
421 | (PAGE_SIZE / 2), | 421 | (PAGE_SIZE / 2), |
422 | PCI_DMA_FROMDEVICE); | 422 | DMA_FROM_DEVICE); |
423 | } | 423 | } |
424 | 424 | ||
425 | skb = bi->skb; | 425 | skb = bi->skb; |
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, | |||
442 | bi->skb = skb; | 442 | bi->skb = skb; |
443 | } | 443 | } |
444 | if (!bi->dma) { | 444 | if (!bi->dma) { |
445 | bi->dma = pci_map_single(pdev, skb->data, | 445 | bi->dma = dma_map_single(&pdev->dev, skb->data, |
446 | rx_ring->rx_buf_len, | 446 | rx_ring->rx_buf_len, |
447 | PCI_DMA_FROMDEVICE); | 447 | DMA_FROM_DEVICE); |
448 | } | 448 | } |
449 | /* Refresh the desc even if buffer_addrs didn't change because | 449 | /* Refresh the desc even if buffer_addrs didn't change because |
450 | * each write-back erases this info. */ | 450 | * each write-back erases this info. */ |
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
536 | rx_buffer_info->skb = NULL; | 536 | rx_buffer_info->skb = NULL; |
537 | 537 | ||
538 | if (rx_buffer_info->dma) { | 538 | if (rx_buffer_info->dma) { |
539 | pci_unmap_single(pdev, rx_buffer_info->dma, | 539 | dma_unmap_single(&pdev->dev, rx_buffer_info->dma, |
540 | rx_ring->rx_buf_len, | 540 | rx_ring->rx_buf_len, |
541 | PCI_DMA_FROMDEVICE); | 541 | DMA_FROM_DEVICE); |
542 | rx_buffer_info->dma = 0; | 542 | rx_buffer_info->dma = 0; |
543 | skb_put(skb, len); | 543 | skb_put(skb, len); |
544 | } | 544 | } |
545 | 545 | ||
546 | if (upper_len) { | 546 | if (upper_len) { |
547 | pci_unmap_page(pdev, rx_buffer_info->page_dma, | 547 | dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, |
548 | PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); | 548 | PAGE_SIZE / 2, DMA_FROM_DEVICE); |
549 | rx_buffer_info->page_dma = 0; | 549 | rx_buffer_info->page_dma = 0; |
550 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 550 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
551 | rx_buffer_info->page, | 551 | rx_buffer_info->page, |
@@ -961,12 +961,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) | |||
961 | eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); | 961 | eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); |
962 | IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); | 962 | IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); |
963 | 963 | ||
964 | if (!hw->mbx.ops.check_for_ack(hw)) { | ||
965 | /* | ||
966 | * checking for the ack clears the PFACK bit. Place | ||
967 | * it back in the v2p_mailbox cache so that anyone | ||
968 | * polling for an ack will not miss it. Also | ||
969 | * avoid the read below because the code to read | ||
970 | * the mailbox will also clear the ack bit. This was | ||
971 | * causing lost acks. Just cache the bit and exit | ||
972 | * the IRQ handler. | ||
973 | */ | ||
974 | hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; | ||
975 | goto out; | ||
976 | } | ||
977 | |||
978 | /* Not an ack interrupt, go ahead and read the message */ | ||
964 | hw->mbx.ops.read(hw, &msg, 1); | 979 | hw->mbx.ops.read(hw, &msg, 1); |
965 | 980 | ||
966 | if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) | 981 | if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) |
967 | mod_timer(&adapter->watchdog_timer, | 982 | mod_timer(&adapter->watchdog_timer, |
968 | round_jiffies(jiffies + 1)); | 983 | round_jiffies(jiffies + 1)); |
969 | 984 | ||
985 | out: | ||
970 | return IRQ_HANDLED; | 986 | return IRQ_HANDLED; |
971 | } | 987 | } |
972 | 988 | ||
@@ -1721,9 +1737,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, | |||
1721 | 1737 | ||
1722 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 1738 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
1723 | if (rx_buffer_info->dma) { | 1739 | if (rx_buffer_info->dma) { |
1724 | pci_unmap_single(pdev, rx_buffer_info->dma, | 1740 | dma_unmap_single(&pdev->dev, rx_buffer_info->dma, |
1725 | rx_ring->rx_buf_len, | 1741 | rx_ring->rx_buf_len, |
1726 | PCI_DMA_FROMDEVICE); | 1742 | DMA_FROM_DEVICE); |
1727 | rx_buffer_info->dma = 0; | 1743 | rx_buffer_info->dma = 0; |
1728 | } | 1744 | } |
1729 | if (rx_buffer_info->skb) { | 1745 | if (rx_buffer_info->skb) { |
@@ -1737,8 +1753,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, | |||
1737 | } | 1753 | } |
1738 | if (!rx_buffer_info->page) | 1754 | if (!rx_buffer_info->page) |
1739 | continue; | 1755 | continue; |
1740 | pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, | 1756 | dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, |
1741 | PCI_DMA_FROMDEVICE); | 1757 | PAGE_SIZE / 2, DMA_FROM_DEVICE); |
1742 | rx_buffer_info->page_dma = 0; | 1758 | rx_buffer_info->page_dma = 0; |
1743 | put_page(rx_buffer_info->page); | 1759 | put_page(rx_buffer_info->page); |
1744 | rx_buffer_info->page = NULL; | 1760 | rx_buffer_info->page = NULL; |
@@ -2445,7 +2461,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, | |||
2445 | vfree(tx_ring->tx_buffer_info); | 2461 | vfree(tx_ring->tx_buffer_info); |
2446 | tx_ring->tx_buffer_info = NULL; | 2462 | tx_ring->tx_buffer_info = NULL; |
2447 | 2463 | ||
2448 | pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); | 2464 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, |
2465 | tx_ring->dma); | ||
2449 | 2466 | ||
2450 | tx_ring->desc = NULL; | 2467 | tx_ring->desc = NULL; |
2451 | } | 2468 | } |
@@ -2490,8 +2507,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, | |||
2490 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); | 2507 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); |
2491 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 2508 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
2492 | 2509 | ||
2493 | tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, | 2510 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, |
2494 | &tx_ring->dma); | 2511 | &tx_ring->dma, GFP_KERNEL); |
2495 | if (!tx_ring->desc) | 2512 | if (!tx_ring->desc) |
2496 | goto err; | 2513 | goto err; |
2497 | 2514 | ||
@@ -2561,8 +2578,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, | |||
2561 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); | 2578 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); |
2562 | rx_ring->size = ALIGN(rx_ring->size, 4096); | 2579 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
2563 | 2580 | ||
2564 | rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, | 2581 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, |
2565 | &rx_ring->dma); | 2582 | &rx_ring->dma, GFP_KERNEL); |
2566 | 2583 | ||
2567 | if (!rx_ring->desc) { | 2584 | if (!rx_ring->desc) { |
2568 | hw_dbg(&adapter->hw, | 2585 | hw_dbg(&adapter->hw, |
@@ -2623,7 +2640,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, | |||
2623 | vfree(rx_ring->rx_buffer_info); | 2640 | vfree(rx_ring->rx_buffer_info); |
2624 | rx_ring->rx_buffer_info = NULL; | 2641 | rx_ring->rx_buffer_info = NULL; |
2625 | 2642 | ||
2626 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); | 2643 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
2644 | rx_ring->dma); | ||
2627 | 2645 | ||
2628 | rx_ring->desc = NULL; | 2646 | rx_ring->desc = NULL; |
2629 | } | 2647 | } |
@@ -2935,10 +2953,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, | |||
2935 | 2953 | ||
2936 | tx_buffer_info->length = size; | 2954 | tx_buffer_info->length = size; |
2937 | tx_buffer_info->mapped_as_page = false; | 2955 | tx_buffer_info->mapped_as_page = false; |
2938 | tx_buffer_info->dma = pci_map_single(adapter->pdev, | 2956 | tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, |
2939 | skb->data + offset, | 2957 | skb->data + offset, |
2940 | size, PCI_DMA_TODEVICE); | 2958 | size, DMA_TO_DEVICE); |
2941 | if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) | 2959 | if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) |
2942 | goto dma_error; | 2960 | goto dma_error; |
2943 | tx_buffer_info->time_stamp = jiffies; | 2961 | tx_buffer_info->time_stamp = jiffies; |
2944 | tx_buffer_info->next_to_watch = i; | 2962 | tx_buffer_info->next_to_watch = i; |
@@ -2964,13 +2982,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, | |||
2964 | size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); | 2982 | size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); |
2965 | 2983 | ||
2966 | tx_buffer_info->length = size; | 2984 | tx_buffer_info->length = size; |
2967 | tx_buffer_info->dma = pci_map_page(adapter->pdev, | 2985 | tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, |
2968 | frag->page, | 2986 | frag->page, |
2969 | offset, | 2987 | offset, |
2970 | size, | 2988 | size, |
2971 | PCI_DMA_TODEVICE); | 2989 | DMA_TO_DEVICE); |
2972 | tx_buffer_info->mapped_as_page = true; | 2990 | tx_buffer_info->mapped_as_page = true; |
2973 | if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) | 2991 | if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) |
2974 | goto dma_error; | 2992 | goto dma_error; |
2975 | tx_buffer_info->time_stamp = jiffies; | 2993 | tx_buffer_info->time_stamp = jiffies; |
2976 | tx_buffer_info->next_to_watch = i; | 2994 | tx_buffer_info->next_to_watch = i; |
@@ -3311,14 +3329,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3311 | if (err) | 3329 | if (err) |
3312 | return err; | 3330 | return err; |
3313 | 3331 | ||
3314 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && | 3332 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && |
3315 | !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | 3333 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
3316 | pci_using_dac = 1; | 3334 | pci_using_dac = 1; |
3317 | } else { | 3335 | } else { |
3318 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 3336 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
3319 | if (err) { | 3337 | if (err) { |
3320 | err = pci_set_consistent_dma_mask(pdev, | 3338 | err = dma_set_coherent_mask(&pdev->dev, |
3321 | DMA_BIT_MASK(32)); | 3339 | DMA_BIT_MASK(32)); |
3322 | if (err) { | 3340 | if (err) { |
3323 | dev_err(&pdev->dev, "No usable DMA " | 3341 | dev_err(&pdev->dev, "No usable DMA " |
3324 | "configuration, aborting\n"); | 3342 | "configuration, aborting\n"); |
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c index 852e9c4fd934..f6f929958ba0 100644 --- a/drivers/net/ixgbevf/vf.c +++ b/drivers/net/ixgbevf/vf.c | |||
@@ -359,7 +359,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, | |||
359 | else | 359 | else |
360 | *link_up = false; | 360 | *link_up = false; |
361 | 361 | ||
362 | if (links_reg & IXGBE_LINKS_SPEED) | 362 | if ((links_reg & IXGBE_LINKS_SPEED_82599) == |
363 | IXGBE_LINKS_SPEED_10G_82599) | ||
363 | *speed = IXGBE_LINK_SPEED_10GB_FULL; | 364 | *speed = IXGBE_LINK_SPEED_10GB_FULL; |
364 | else | 365 | else |
365 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | 366 | *speed = IXGBE_LINK_SPEED_1GB_FULL; |