diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2009-10-27 11:51:47 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-28 04:20:26 -0400 |
commit | 80785298aa5b6f2005a34afb97457ae7a65af270 (patch) | |
tree | c236ae1f3f688b816b1fa2475fcd7ccd67261ac4 | |
parent | fce99e341524c204ef3dd3e7c5f77265a7e05ddd (diff) |
igb: add pci device pointer to ring structure
This patch adds a pci device pointer to the ring structure. The main use of
this pointer is for memory mapping/unmapping of the rings.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/igb/igb.h | 5 | ||||
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 72 |
3 files changed, 40 insertions, 41 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index e52fee44aeac..de268620dd92 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -173,6 +173,7 @@ struct igb_q_vector { | |||
173 | struct igb_ring { | 173 | struct igb_ring { |
174 | struct igb_q_vector *q_vector; /* backlink to q_vector */ | 174 | struct igb_q_vector *q_vector; /* backlink to q_vector */ |
175 | void *desc; /* descriptor ring memory */ | 175 | void *desc; /* descriptor ring memory */ |
176 | struct pci_dev *pdev; /* pci device for dma mapping */ | ||
176 | dma_addr_t dma; /* phys address of the ring */ | 177 | dma_addr_t dma; /* phys address of the ring */ |
177 | unsigned int size; /* length of desc. ring in bytes */ | 178 | unsigned int size; /* length of desc. ring in bytes */ |
178 | unsigned int count; /* number of desc. in the ring */ | 179 | unsigned int count; /* number of desc. in the ring */ |
@@ -325,8 +326,8 @@ extern void igb_down(struct igb_adapter *); | |||
325 | extern void igb_reinit_locked(struct igb_adapter *); | 326 | extern void igb_reinit_locked(struct igb_adapter *); |
326 | extern void igb_reset(struct igb_adapter *); | 327 | extern void igb_reset(struct igb_adapter *); |
327 | extern int igb_set_spd_dplx(struct igb_adapter *, u16); | 328 | extern int igb_set_spd_dplx(struct igb_adapter *, u16); |
328 | extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *); | 329 | extern int igb_setup_tx_resources(struct igb_ring *); |
329 | extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *); | 330 | extern int igb_setup_rx_resources(struct igb_ring *); |
330 | extern void igb_free_tx_resources(struct igb_ring *); | 331 | extern void igb_free_tx_resources(struct igb_ring *); |
331 | extern void igb_free_rx_resources(struct igb_ring *); | 332 | extern void igb_free_rx_resources(struct igb_ring *); |
332 | extern void igb_update_stats(struct igb_adapter *); | 333 | extern void igb_update_stats(struct igb_adapter *); |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 2929546115c1..c48a555bda2c 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -794,7 +794,7 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
794 | 794 | ||
795 | for (i = 0; i < adapter->num_tx_queues; i++) { | 795 | for (i = 0; i < adapter->num_tx_queues; i++) { |
796 | temp_ring[i].count = new_tx_count; | 796 | temp_ring[i].count = new_tx_count; |
797 | err = igb_setup_tx_resources(adapter, &temp_ring[i]); | 797 | err = igb_setup_tx_resources(&temp_ring[i]); |
798 | if (err) { | 798 | if (err) { |
799 | while (i) { | 799 | while (i) { |
800 | i--; | 800 | i--; |
@@ -819,7 +819,7 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
819 | 819 | ||
820 | for (i = 0; i < adapter->num_rx_queues; i++) { | 820 | for (i = 0; i < adapter->num_rx_queues; i++) { |
821 | temp_ring[i].count = new_rx_count; | 821 | temp_ring[i].count = new_rx_count; |
822 | err = igb_setup_rx_resources(adapter, &temp_ring[i]); | 822 | err = igb_setup_rx_resources(&temp_ring[i]); |
823 | if (err) { | 823 | if (err) { |
824 | while (i) { | 824 | while (i) { |
825 | i--; | 825 | i--; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 2728f9316027..ff16b7ac0d1e 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -436,11 +436,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter) | |||
436 | struct igb_ring *ring = &(adapter->tx_ring[i]); | 436 | struct igb_ring *ring = &(adapter->tx_ring[i]); |
437 | ring->count = adapter->tx_ring_count; | 437 | ring->count = adapter->tx_ring_count; |
438 | ring->queue_index = i; | 438 | ring->queue_index = i; |
439 | ring->pdev = adapter->pdev; | ||
439 | } | 440 | } |
440 | for (i = 0; i < adapter->num_rx_queues; i++) { | 441 | for (i = 0; i < adapter->num_rx_queues; i++) { |
441 | struct igb_ring *ring = &(adapter->rx_ring[i]); | 442 | struct igb_ring *ring = &(adapter->rx_ring[i]); |
442 | ring->count = adapter->rx_ring_count; | 443 | ring->count = adapter->rx_ring_count; |
443 | ring->queue_index = i; | 444 | ring->queue_index = i; |
445 | ring->pdev = adapter->pdev; | ||
444 | } | 446 | } |
445 | 447 | ||
446 | igb_cache_ring_register(adapter); | 448 | igb_cache_ring_register(adapter); |
@@ -2002,15 +2004,13 @@ static int igb_close(struct net_device *netdev) | |||
2002 | 2004 | ||
2003 | /** | 2005 | /** |
2004 | * igb_setup_tx_resources - allocate Tx resources (Descriptors) | 2006 | * igb_setup_tx_resources - allocate Tx resources (Descriptors) |
2005 | * @adapter: board private structure | ||
2006 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | 2007 | * @tx_ring: tx descriptor ring (for a specific queue) to setup |
2007 | * | 2008 | * |
2008 | * Return 0 on success, negative on failure | 2009 | * Return 0 on success, negative on failure |
2009 | **/ | 2010 | **/ |
2010 | int igb_setup_tx_resources(struct igb_adapter *adapter, | 2011 | int igb_setup_tx_resources(struct igb_ring *tx_ring) |
2011 | struct igb_ring *tx_ring) | ||
2012 | { | 2012 | { |
2013 | struct pci_dev *pdev = adapter->pdev; | 2013 | struct pci_dev *pdev = tx_ring->pdev; |
2014 | int size; | 2014 | int size; |
2015 | 2015 | ||
2016 | size = sizeof(struct igb_buffer) * tx_ring->count; | 2016 | size = sizeof(struct igb_buffer) * tx_ring->count; |
@@ -2053,7 +2053,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) | |||
2053 | int r_idx; | 2053 | int r_idx; |
2054 | 2054 | ||
2055 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2055 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2056 | err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); | 2056 | err = igb_setup_tx_resources(&adapter->tx_ring[i]); |
2057 | if (err) { | 2057 | if (err) { |
2058 | dev_err(&adapter->pdev->dev, | 2058 | dev_err(&adapter->pdev->dev, |
2059 | "Allocation for Tx Queue %u failed\n", i); | 2059 | "Allocation for Tx Queue %u failed\n", i); |
@@ -2156,15 +2156,13 @@ static void igb_configure_tx(struct igb_adapter *adapter) | |||
2156 | 2156 | ||
2157 | /** | 2157 | /** |
2158 | * igb_setup_rx_resources - allocate Rx resources (Descriptors) | 2158 | * igb_setup_rx_resources - allocate Rx resources (Descriptors) |
2159 | * @adapter: board private structure | ||
2160 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | 2159 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
2161 | * | 2160 | * |
2162 | * Returns 0 on success, negative on failure | 2161 | * Returns 0 on success, negative on failure |
2163 | **/ | 2162 | **/ |
2164 | int igb_setup_rx_resources(struct igb_adapter *adapter, | 2163 | int igb_setup_rx_resources(struct igb_ring *rx_ring) |
2165 | struct igb_ring *rx_ring) | ||
2166 | { | 2164 | { |
2167 | struct pci_dev *pdev = adapter->pdev; | 2165 | struct pci_dev *pdev = rx_ring->pdev; |
2168 | int size, desc_len; | 2166 | int size, desc_len; |
2169 | 2167 | ||
2170 | size = sizeof(struct igb_buffer) * rx_ring->count; | 2168 | size = sizeof(struct igb_buffer) * rx_ring->count; |
@@ -2192,7 +2190,7 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, | |||
2192 | 2190 | ||
2193 | err: | 2191 | err: |
2194 | vfree(rx_ring->buffer_info); | 2192 | vfree(rx_ring->buffer_info); |
2195 | dev_err(&adapter->pdev->dev, "Unable to allocate memory for " | 2193 | dev_err(&pdev->dev, "Unable to allocate memory for " |
2196 | "the receive descriptor ring\n"); | 2194 | "the receive descriptor ring\n"); |
2197 | return -ENOMEM; | 2195 | return -ENOMEM; |
2198 | } | 2196 | } |
@@ -2209,7 +2207,7 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) | |||
2209 | int i, err = 0; | 2207 | int i, err = 0; |
2210 | 2208 | ||
2211 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2209 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2212 | err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]); | 2210 | err = igb_setup_rx_resources(&adapter->rx_ring[i]); |
2213 | if (err) { | 2211 | if (err) { |
2214 | dev_err(&adapter->pdev->dev, | 2212 | dev_err(&adapter->pdev->dev, |
2215 | "Allocation for Rx Queue %u failed\n", i); | 2213 | "Allocation for Rx Queue %u failed\n", i); |
@@ -2497,14 +2495,13 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
2497 | **/ | 2495 | **/ |
2498 | void igb_free_tx_resources(struct igb_ring *tx_ring) | 2496 | void igb_free_tx_resources(struct igb_ring *tx_ring) |
2499 | { | 2497 | { |
2500 | struct pci_dev *pdev = tx_ring->q_vector->adapter->pdev; | ||
2501 | |||
2502 | igb_clean_tx_ring(tx_ring); | 2498 | igb_clean_tx_ring(tx_ring); |
2503 | 2499 | ||
2504 | vfree(tx_ring->buffer_info); | 2500 | vfree(tx_ring->buffer_info); |
2505 | tx_ring->buffer_info = NULL; | 2501 | tx_ring->buffer_info = NULL; |
2506 | 2502 | ||
2507 | pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); | 2503 | pci_free_consistent(tx_ring->pdev, tx_ring->size, |
2504 | tx_ring->desc, tx_ring->dma); | ||
2508 | 2505 | ||
2509 | tx_ring->desc = NULL; | 2506 | tx_ring->desc = NULL; |
2510 | } | 2507 | } |
@@ -2523,12 +2520,13 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) | |||
2523 | igb_free_tx_resources(&adapter->tx_ring[i]); | 2520 | igb_free_tx_resources(&adapter->tx_ring[i]); |
2524 | } | 2521 | } |
2525 | 2522 | ||
2526 | static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, | 2523 | static void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, |
2527 | struct igb_buffer *buffer_info) | 2524 | struct igb_buffer *buffer_info) |
2528 | { | 2525 | { |
2529 | buffer_info->dma = 0; | 2526 | buffer_info->dma = 0; |
2530 | if (buffer_info->skb) { | 2527 | if (buffer_info->skb) { |
2531 | skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, | 2528 | skb_dma_unmap(&tx_ring->pdev->dev, |
2529 | buffer_info->skb, | ||
2532 | DMA_TO_DEVICE); | 2530 | DMA_TO_DEVICE); |
2533 | dev_kfree_skb_any(buffer_info->skb); | 2531 | dev_kfree_skb_any(buffer_info->skb); |
2534 | buffer_info->skb = NULL; | 2532 | buffer_info->skb = NULL; |
@@ -2543,7 +2541,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, | |||
2543 | **/ | 2541 | **/ |
2544 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) | 2542 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) |
2545 | { | 2543 | { |
2546 | struct igb_adapter *adapter = tx_ring->q_vector->adapter; | ||
2547 | struct igb_buffer *buffer_info; | 2544 | struct igb_buffer *buffer_info; |
2548 | unsigned long size; | 2545 | unsigned long size; |
2549 | unsigned int i; | 2546 | unsigned int i; |
@@ -2554,7 +2551,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) | |||
2554 | 2551 | ||
2555 | for (i = 0; i < tx_ring->count; i++) { | 2552 | for (i = 0; i < tx_ring->count; i++) { |
2556 | buffer_info = &tx_ring->buffer_info[i]; | 2553 | buffer_info = &tx_ring->buffer_info[i]; |
2557 | igb_unmap_and_free_tx_resource(adapter, buffer_info); | 2554 | igb_unmap_and_free_tx_resource(tx_ring, buffer_info); |
2558 | } | 2555 | } |
2559 | 2556 | ||
2560 | size = sizeof(struct igb_buffer) * tx_ring->count; | 2557 | size = sizeof(struct igb_buffer) * tx_ring->count; |
@@ -2591,14 +2588,13 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) | |||
2591 | **/ | 2588 | **/ |
2592 | void igb_free_rx_resources(struct igb_ring *rx_ring) | 2589 | void igb_free_rx_resources(struct igb_ring *rx_ring) |
2593 | { | 2590 | { |
2594 | struct pci_dev *pdev = rx_ring->q_vector->adapter->pdev; | ||
2595 | |||
2596 | igb_clean_rx_ring(rx_ring); | 2591 | igb_clean_rx_ring(rx_ring); |
2597 | 2592 | ||
2598 | vfree(rx_ring->buffer_info); | 2593 | vfree(rx_ring->buffer_info); |
2599 | rx_ring->buffer_info = NULL; | 2594 | rx_ring->buffer_info = NULL; |
2600 | 2595 | ||
2601 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); | 2596 | pci_free_consistent(rx_ring->pdev, rx_ring->size, |
2597 | rx_ring->desc, rx_ring->dma); | ||
2602 | 2598 | ||
2603 | rx_ring->desc = NULL; | 2599 | rx_ring->desc = NULL; |
2604 | } | 2600 | } |
@@ -2625,7 +2621,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) | |||
2625 | { | 2621 | { |
2626 | struct igb_adapter *adapter = rx_ring->q_vector->adapter; | 2622 | struct igb_adapter *adapter = rx_ring->q_vector->adapter; |
2627 | struct igb_buffer *buffer_info; | 2623 | struct igb_buffer *buffer_info; |
2628 | struct pci_dev *pdev = adapter->pdev; | ||
2629 | unsigned long size; | 2624 | unsigned long size; |
2630 | unsigned int i; | 2625 | unsigned int i; |
2631 | 2626 | ||
@@ -2635,7 +2630,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) | |||
2635 | for (i = 0; i < rx_ring->count; i++) { | 2630 | for (i = 0; i < rx_ring->count; i++) { |
2636 | buffer_info = &rx_ring->buffer_info[i]; | 2631 | buffer_info = &rx_ring->buffer_info[i]; |
2637 | if (buffer_info->dma) { | 2632 | if (buffer_info->dma) { |
2638 | pci_unmap_single(pdev, buffer_info->dma, | 2633 | pci_unmap_single(rx_ring->pdev, |
2634 | buffer_info->dma, | ||
2639 | adapter->rx_buffer_len, | 2635 | adapter->rx_buffer_len, |
2640 | PCI_DMA_FROMDEVICE); | 2636 | PCI_DMA_FROMDEVICE); |
2641 | buffer_info->dma = 0; | 2637 | buffer_info->dma = 0; |
@@ -2646,7 +2642,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) | |||
2646 | buffer_info->skb = NULL; | 2642 | buffer_info->skb = NULL; |
2647 | } | 2643 | } |
2648 | if (buffer_info->page_dma) { | 2644 | if (buffer_info->page_dma) { |
2649 | pci_unmap_page(pdev, buffer_info->page_dma, | 2645 | pci_unmap_page(rx_ring->pdev, |
2646 | buffer_info->page_dma, | ||
2650 | PAGE_SIZE / 2, | 2647 | PAGE_SIZE / 2, |
2651 | PCI_DMA_FROMDEVICE); | 2648 | PCI_DMA_FROMDEVICE); |
2652 | buffer_info->page_dma = 0; | 2649 | buffer_info->page_dma = 0; |
@@ -3362,9 +3359,10 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
3362 | struct sk_buff *skb, u32 tx_flags) | 3359 | struct sk_buff *skb, u32 tx_flags) |
3363 | { | 3360 | { |
3364 | struct e1000_adv_tx_context_desc *context_desc; | 3361 | struct e1000_adv_tx_context_desc *context_desc; |
3365 | unsigned int i; | 3362 | struct pci_dev *pdev = tx_ring->pdev; |
3366 | struct igb_buffer *buffer_info; | 3363 | struct igb_buffer *buffer_info; |
3367 | u32 info = 0, tu_cmd = 0; | 3364 | u32 info = 0, tu_cmd = 0; |
3365 | unsigned int i; | ||
3368 | 3366 | ||
3369 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || | 3367 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || |
3370 | (tx_flags & IGB_TX_FLAGS_VLAN)) { | 3368 | (tx_flags & IGB_TX_FLAGS_VLAN)) { |
@@ -3411,7 +3409,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
3411 | break; | 3409 | break; |
3412 | default: | 3410 | default: |
3413 | if (unlikely(net_ratelimit())) | 3411 | if (unlikely(net_ratelimit())) |
3414 | dev_warn(&adapter->pdev->dev, | 3412 | dev_warn(&pdev->dev, |
3415 | "partial checksum but proto=%x!\n", | 3413 | "partial checksum but proto=%x!\n", |
3416 | skb->protocol); | 3414 | skb->protocol); |
3417 | break; | 3415 | break; |
@@ -3443,11 +3441,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
3443 | #define IGB_MAX_TXD_PWR 16 | 3441 | #define IGB_MAX_TXD_PWR 16 |
3444 | #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) | 3442 | #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) |
3445 | 3443 | ||
3446 | static inline int igb_tx_map_adv(struct igb_adapter *adapter, | 3444 | static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, |
3447 | struct igb_ring *tx_ring, struct sk_buff *skb, | ||
3448 | unsigned int first) | 3445 | unsigned int first) |
3449 | { | 3446 | { |
3450 | struct igb_buffer *buffer_info; | 3447 | struct igb_buffer *buffer_info; |
3448 | struct pci_dev *pdev = tx_ring->pdev; | ||
3451 | unsigned int len = skb_headlen(skb); | 3449 | unsigned int len = skb_headlen(skb); |
3452 | unsigned int count = 0, i; | 3450 | unsigned int count = 0, i; |
3453 | unsigned int f; | 3451 | unsigned int f; |
@@ -3455,8 +3453,8 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter, | |||
3455 | 3453 | ||
3456 | i = tx_ring->next_to_use; | 3454 | i = tx_ring->next_to_use; |
3457 | 3455 | ||
3458 | if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { | 3456 | if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) { |
3459 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | 3457 | dev_err(&pdev->dev, "TX DMA map failed\n"); |
3460 | return 0; | 3458 | return 0; |
3461 | } | 3459 | } |
3462 | 3460 | ||
@@ -3667,7 +3665,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
3667 | * count reflects descriptors mapped, if 0 then mapping error | 3665 | * count reflects descriptors mapped, if 0 then mapping error |
3668 | * has occured and we need to rewind the descriptor queue | 3666 | * has occured and we need to rewind the descriptor queue |
3669 | */ | 3667 | */ |
3670 | count = igb_tx_map_adv(adapter, tx_ring, skb, first); | 3668 | count = igb_tx_map_adv(tx_ring, skb, first); |
3671 | 3669 | ||
3672 | if (count) { | 3670 | if (count) { |
3673 | igb_tx_queue_adv(adapter, tx_ring, tx_flags, count, | 3671 | igb_tx_queue_adv(adapter, tx_ring, tx_flags, count, |
@@ -4710,7 +4708,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
4710 | igb_tx_hwtstamp(adapter, skb); | 4708 | igb_tx_hwtstamp(adapter, skb); |
4711 | } | 4709 | } |
4712 | 4710 | ||
4713 | igb_unmap_and_free_tx_resource(adapter, buffer_info); | 4711 | igb_unmap_and_free_tx_resource(tx_ring, buffer_info); |
4714 | tx_desc->wb.status = 0; | 4712 | tx_desc->wb.status = 0; |
4715 | 4713 | ||
4716 | i++; | 4714 | i++; |
@@ -4748,7 +4746,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
4748 | E1000_STATUS_TXOFF)) { | 4746 | E1000_STATUS_TXOFF)) { |
4749 | 4747 | ||
4750 | /* detected Tx unit hang */ | 4748 | /* detected Tx unit hang */ |
4751 | dev_err(&adapter->pdev->dev, | 4749 | dev_err(&tx_ring->pdev->dev, |
4752 | "Detected Tx Unit Hang\n" | 4750 | "Detected Tx Unit Hang\n" |
4753 | " Tx Queue <%d>\n" | 4751 | " Tx Queue <%d>\n" |
4754 | " TDH <%x>\n" | 4752 | " TDH <%x>\n" |
@@ -4851,7 +4849,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, | |||
4851 | struct net_device *netdev = adapter->netdev; | 4849 | struct net_device *netdev = adapter->netdev; |
4852 | struct igb_ring *rx_ring = q_vector->rx_ring; | 4850 | struct igb_ring *rx_ring = q_vector->rx_ring; |
4853 | struct e1000_hw *hw = &adapter->hw; | 4851 | struct e1000_hw *hw = &adapter->hw; |
4854 | struct pci_dev *pdev = adapter->pdev; | 4852 | struct pci_dev *pdev = rx_ring->pdev; |
4855 | union e1000_adv_rx_desc *rx_desc , *next_rxd; | 4853 | union e1000_adv_rx_desc *rx_desc , *next_rxd; |
4856 | struct igb_buffer *buffer_info , *next_buffer; | 4854 | struct igb_buffer *buffer_info , *next_buffer; |
4857 | struct sk_buff *skb; | 4855 | struct sk_buff *skb; |
@@ -5027,7 +5025,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
5027 | { | 5025 | { |
5028 | struct igb_adapter *adapter = rx_ring->q_vector->adapter; | 5026 | struct igb_adapter *adapter = rx_ring->q_vector->adapter; |
5029 | struct net_device *netdev = adapter->netdev; | 5027 | struct net_device *netdev = adapter->netdev; |
5030 | struct pci_dev *pdev = adapter->pdev; | ||
5031 | union e1000_adv_rx_desc *rx_desc; | 5028 | union e1000_adv_rx_desc *rx_desc; |
5032 | struct igb_buffer *buffer_info; | 5029 | struct igb_buffer *buffer_info; |
5033 | struct sk_buff *skb; | 5030 | struct sk_buff *skb; |
@@ -5054,7 +5051,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
5054 | buffer_info->page_offset ^= PAGE_SIZE / 2; | 5051 | buffer_info->page_offset ^= PAGE_SIZE / 2; |
5055 | } | 5052 | } |
5056 | buffer_info->page_dma = | 5053 | buffer_info->page_dma = |
5057 | pci_map_page(pdev, buffer_info->page, | 5054 | pci_map_page(rx_ring->pdev, buffer_info->page, |
5058 | buffer_info->page_offset, | 5055 | buffer_info->page_offset, |
5059 | PAGE_SIZE / 2, | 5056 | PAGE_SIZE / 2, |
5060 | PCI_DMA_FROMDEVICE); | 5057 | PCI_DMA_FROMDEVICE); |
@@ -5068,7 +5065,8 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
5068 | } | 5065 | } |
5069 | 5066 | ||
5070 | buffer_info->skb = skb; | 5067 | buffer_info->skb = skb; |
5071 | buffer_info->dma = pci_map_single(pdev, skb->data, | 5068 | buffer_info->dma = pci_map_single(rx_ring->pdev, |
5069 | skb->data, | ||
5072 | bufsz, | 5070 | bufsz, |
5073 | PCI_DMA_FROMDEVICE); | 5071 | PCI_DMA_FROMDEVICE); |
5074 | } | 5072 | } |