aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_ethtool.c8
-rw-r--r--drivers/net/igb/igb_main.c109
3 files changed, 61 insertions, 58 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 7d288ccca1c..096a526e912 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -186,7 +186,7 @@ struct igb_q_vector {
186struct igb_ring { 186struct igb_ring {
187 struct igb_q_vector *q_vector; /* backlink to q_vector */ 187 struct igb_q_vector *q_vector; /* backlink to q_vector */
188 struct net_device *netdev; /* back pointer to net_device */ 188 struct net_device *netdev; /* back pointer to net_device */
189 struct pci_dev *pdev; /* pci device for dma mapping */ 189 struct device *dev; /* device pointer for dma mapping */
190 dma_addr_t dma; /* phys address of the ring */ 190 dma_addr_t dma; /* phys address of the ring */
191 void *desc; /* descriptor ring memory */ 191 void *desc; /* descriptor ring memory */
192 unsigned int size; /* length of desc. ring in bytes */ 192 unsigned int size; /* length of desc. ring in bytes */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 1b8fd7f4064..f2ebf927e4b 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1394,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1394 1394
1395 /* Setup Tx descriptor ring and Tx buffers */ 1395 /* Setup Tx descriptor ring and Tx buffers */
1396 tx_ring->count = IGB_DEFAULT_TXD; 1396 tx_ring->count = IGB_DEFAULT_TXD;
1397 tx_ring->pdev = adapter->pdev; 1397 tx_ring->dev = &adapter->pdev->dev;
1398 tx_ring->netdev = adapter->netdev; 1398 tx_ring->netdev = adapter->netdev;
1399 tx_ring->reg_idx = adapter->vfs_allocated_count; 1399 tx_ring->reg_idx = adapter->vfs_allocated_count;
1400 1400
@@ -1408,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1408 1408
1409 /* Setup Rx descriptor ring and Rx buffers */ 1409 /* Setup Rx descriptor ring and Rx buffers */
1410 rx_ring->count = IGB_DEFAULT_RXD; 1410 rx_ring->count = IGB_DEFAULT_RXD;
1411 rx_ring->pdev = adapter->pdev; 1411 rx_ring->dev = &adapter->pdev->dev;
1412 rx_ring->netdev = adapter->netdev; 1412 rx_ring->netdev = adapter->netdev;
1413 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048; 1413 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1414 rx_ring->reg_idx = adapter->vfs_allocated_count; 1414 rx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1604,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1604 buffer_info = &rx_ring->buffer_info[rx_ntc]; 1604 buffer_info = &rx_ring->buffer_info[rx_ntc];
1605 1605
1606 /* unmap rx buffer, will be remapped by alloc_rx_buffers */ 1606 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1607 pci_unmap_single(rx_ring->pdev, 1607 dma_unmap_single(rx_ring->dev,
1608 buffer_info->dma, 1608 buffer_info->dma,
1609 rx_ring->rx_buffer_len, 1609 rx_ring->rx_buffer_len,
1610 PCI_DMA_FROMDEVICE); 1610 DMA_FROM_DEVICE);
1611 buffer_info->dma = 0; 1611 buffer_info->dma = 0;
1612 1612
1613 /* verify contents of skb */ 1613 /* verify contents of skb */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 919e3638667..9d042fe299c 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -350,7 +350,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
350 goto err; 350 goto err;
351 ring->count = adapter->tx_ring_count; 351 ring->count = adapter->tx_ring_count;
352 ring->queue_index = i; 352 ring->queue_index = i;
353 ring->pdev = adapter->pdev; 353 ring->dev = &adapter->pdev->dev;
354 ring->netdev = adapter->netdev; 354 ring->netdev = adapter->netdev;
355 /* For 82575, context index must be unique per ring. */ 355 /* For 82575, context index must be unique per ring. */
356 if (adapter->hw.mac.type == e1000_82575) 356 if (adapter->hw.mac.type == e1000_82575)
@@ -364,7 +364,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
364 goto err; 364 goto err;
365 ring->count = adapter->rx_ring_count; 365 ring->count = adapter->rx_ring_count;
366 ring->queue_index = i; 366 ring->queue_index = i;
367 ring->pdev = adapter->pdev; 367 ring->dev = &adapter->pdev->dev;
368 ring->netdev = adapter->netdev; 368 ring->netdev = adapter->netdev;
369 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 369 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
370 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ 370 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -1398,15 +1398,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1398 return err; 1398 return err;
1399 1399
1400 pci_using_dac = 0; 1400 pci_using_dac = 0;
1401 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1401 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1402 if (!err) { 1402 if (!err) {
1403 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1403 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1404 if (!err) 1404 if (!err)
1405 pci_using_dac = 1; 1405 pci_using_dac = 1;
1406 } else { 1406 } else {
1407 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1407 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1408 if (err) { 1408 if (err) {
1409 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1409 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1410 if (err) { 1410 if (err) {
1411 dev_err(&pdev->dev, "No usable DMA " 1411 dev_err(&pdev->dev, "No usable DMA "
1412 "configuration, aborting\n"); 1412 "configuration, aborting\n");
@@ -2080,7 +2080,7 @@ static int igb_close(struct net_device *netdev)
2080 **/ 2080 **/
2081int igb_setup_tx_resources(struct igb_ring *tx_ring) 2081int igb_setup_tx_resources(struct igb_ring *tx_ring)
2082{ 2082{
2083 struct pci_dev *pdev = tx_ring->pdev; 2083 struct device *dev = tx_ring->dev;
2084 int size; 2084 int size;
2085 2085
2086 size = sizeof(struct igb_buffer) * tx_ring->count; 2086 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2093,9 +2093,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2093 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2093 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2094 tx_ring->size = ALIGN(tx_ring->size, 4096); 2094 tx_ring->size = ALIGN(tx_ring->size, 4096);
2095 2095
2096 tx_ring->desc = pci_alloc_consistent(pdev, 2096 tx_ring->desc = dma_alloc_coherent(dev,
2097 tx_ring->size, 2097 tx_ring->size,
2098 &tx_ring->dma); 2098 &tx_ring->dma,
2099 GFP_KERNEL);
2099 2100
2100 if (!tx_ring->desc) 2101 if (!tx_ring->desc)
2101 goto err; 2102 goto err;
@@ -2106,7 +2107,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2106 2107
2107err: 2108err:
2108 vfree(tx_ring->buffer_info); 2109 vfree(tx_ring->buffer_info);
2109 dev_err(&pdev->dev, 2110 dev_err(dev,
2110 "Unable to allocate memory for the transmit descriptor ring\n"); 2111 "Unable to allocate memory for the transmit descriptor ring\n");
2111 return -ENOMEM; 2112 return -ENOMEM;
2112} 2113}
@@ -2230,7 +2231,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2230 **/ 2231 **/
2231int igb_setup_rx_resources(struct igb_ring *rx_ring) 2232int igb_setup_rx_resources(struct igb_ring *rx_ring)
2232{ 2233{
2233 struct pci_dev *pdev = rx_ring->pdev; 2234 struct device *dev = rx_ring->dev;
2234 int size, desc_len; 2235 int size, desc_len;
2235 2236
2236 size = sizeof(struct igb_buffer) * rx_ring->count; 2237 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2245,8 +2246,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2245 rx_ring->size = rx_ring->count * desc_len; 2246 rx_ring->size = rx_ring->count * desc_len;
2246 rx_ring->size = ALIGN(rx_ring->size, 4096); 2247 rx_ring->size = ALIGN(rx_ring->size, 4096);
2247 2248
2248 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2249 rx_ring->desc = dma_alloc_coherent(dev,
2249 &rx_ring->dma); 2250 rx_ring->size,
2251 &rx_ring->dma,
2252 GFP_KERNEL);
2250 2253
2251 if (!rx_ring->desc) 2254 if (!rx_ring->desc)
2252 goto err; 2255 goto err;
@@ -2259,8 +2262,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2259err: 2262err:
2260 vfree(rx_ring->buffer_info); 2263 vfree(rx_ring->buffer_info);
2261 rx_ring->buffer_info = NULL; 2264 rx_ring->buffer_info = NULL;
2262 dev_err(&pdev->dev, "Unable to allocate memory for " 2265 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2263 "the receive descriptor ring\n"); 2266 " ring\n");
2264 return -ENOMEM; 2267 return -ENOMEM;
2265} 2268}
2266 2269
@@ -2636,8 +2639,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
2636 if (!tx_ring->desc) 2639 if (!tx_ring->desc)
2637 return; 2640 return;
2638 2641
2639 pci_free_consistent(tx_ring->pdev, tx_ring->size, 2642 dma_free_coherent(tx_ring->dev, tx_ring->size,
2640 tx_ring->desc, tx_ring->dma); 2643 tx_ring->desc, tx_ring->dma);
2641 2644
2642 tx_ring->desc = NULL; 2645 tx_ring->desc = NULL;
2643} 2646}
@@ -2661,15 +2664,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2661{ 2664{
2662 if (buffer_info->dma) { 2665 if (buffer_info->dma) {
2663 if (buffer_info->mapped_as_page) 2666 if (buffer_info->mapped_as_page)
2664 pci_unmap_page(tx_ring->pdev, 2667 dma_unmap_page(tx_ring->dev,
2665 buffer_info->dma, 2668 buffer_info->dma,
2666 buffer_info->length, 2669 buffer_info->length,
2667 PCI_DMA_TODEVICE); 2670 DMA_TO_DEVICE);
2668 else 2671 else
2669 pci_unmap_single(tx_ring->pdev, 2672 dma_unmap_single(tx_ring->dev,
2670 buffer_info->dma, 2673 buffer_info->dma,
2671 buffer_info->length, 2674 buffer_info->length,
2672 PCI_DMA_TODEVICE); 2675 DMA_TO_DEVICE);
2673 buffer_info->dma = 0; 2676 buffer_info->dma = 0;
2674 } 2677 }
2675 if (buffer_info->skb) { 2678 if (buffer_info->skb) {
@@ -2740,8 +2743,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2740 if (!rx_ring->desc) 2743 if (!rx_ring->desc)
2741 return; 2744 return;
2742 2745
2743 pci_free_consistent(rx_ring->pdev, rx_ring->size, 2746 dma_free_coherent(rx_ring->dev, rx_ring->size,
2744 rx_ring->desc, rx_ring->dma); 2747 rx_ring->desc, rx_ring->dma);
2745 2748
2746 rx_ring->desc = NULL; 2749 rx_ring->desc = NULL;
2747} 2750}
@@ -2777,10 +2780,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2777 for (i = 0; i < rx_ring->count; i++) { 2780 for (i = 0; i < rx_ring->count; i++) {
2778 buffer_info = &rx_ring->buffer_info[i]; 2781 buffer_info = &rx_ring->buffer_info[i];
2779 if (buffer_info->dma) { 2782 if (buffer_info->dma) {
2780 pci_unmap_single(rx_ring->pdev, 2783 dma_unmap_single(rx_ring->dev,
2781 buffer_info->dma, 2784 buffer_info->dma,
2782 rx_ring->rx_buffer_len, 2785 rx_ring->rx_buffer_len,
2783 PCI_DMA_FROMDEVICE); 2786 DMA_FROM_DEVICE);
2784 buffer_info->dma = 0; 2787 buffer_info->dma = 0;
2785 } 2788 }
2786 2789
@@ -2789,10 +2792,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2789 buffer_info->skb = NULL; 2792 buffer_info->skb = NULL;
2790 } 2793 }
2791 if (buffer_info->page_dma) { 2794 if (buffer_info->page_dma) {
2792 pci_unmap_page(rx_ring->pdev, 2795 dma_unmap_page(rx_ring->dev,
2793 buffer_info->page_dma, 2796 buffer_info->page_dma,
2794 PAGE_SIZE / 2, 2797 PAGE_SIZE / 2,
2795 PCI_DMA_FROMDEVICE); 2798 DMA_FROM_DEVICE);
2796 buffer_info->page_dma = 0; 2799 buffer_info->page_dma = 0;
2797 } 2800 }
2798 if (buffer_info->page) { 2801 if (buffer_info->page) {
@@ -3480,7 +3483,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3480 struct sk_buff *skb, u32 tx_flags) 3483 struct sk_buff *skb, u32 tx_flags)
3481{ 3484{
3482 struct e1000_adv_tx_context_desc *context_desc; 3485 struct e1000_adv_tx_context_desc *context_desc;
3483 struct pci_dev *pdev = tx_ring->pdev; 3486 struct device *dev = tx_ring->dev;
3484 struct igb_buffer *buffer_info; 3487 struct igb_buffer *buffer_info;
3485 u32 info = 0, tu_cmd = 0; 3488 u32 info = 0, tu_cmd = 0;
3486 unsigned int i; 3489 unsigned int i;
@@ -3531,7 +3534,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3531 break; 3534 break;
3532 default: 3535 default:
3533 if (unlikely(net_ratelimit())) 3536 if (unlikely(net_ratelimit()))
3534 dev_warn(&pdev->dev, 3537 dev_warn(dev,
3535 "partial checksum but proto=%x!\n", 3538 "partial checksum but proto=%x!\n",
3536 skb->protocol); 3539 skb->protocol);
3537 break; 3540 break;
@@ -3565,7 +3568,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3565 unsigned int first) 3568 unsigned int first)
3566{ 3569{
3567 struct igb_buffer *buffer_info; 3570 struct igb_buffer *buffer_info;
3568 struct pci_dev *pdev = tx_ring->pdev; 3571 struct device *dev = tx_ring->dev;
3569 unsigned int len = skb_headlen(skb); 3572 unsigned int len = skb_headlen(skb);
3570 unsigned int count = 0, i; 3573 unsigned int count = 0, i;
3571 unsigned int f; 3574 unsigned int f;
@@ -3578,9 +3581,9 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3578 /* set time_stamp *before* dma to help avoid a possible race */ 3581 /* set time_stamp *before* dma to help avoid a possible race */
3579 buffer_info->time_stamp = jiffies; 3582 buffer_info->time_stamp = jiffies;
3580 buffer_info->next_to_watch = i; 3583 buffer_info->next_to_watch = i;
3581 buffer_info->dma = pci_map_single(pdev, skb->data, len, 3584 buffer_info->dma = dma_map_single(dev, skb->data, len,
3582 PCI_DMA_TODEVICE); 3585 DMA_TO_DEVICE);
3583 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3586 if (dma_mapping_error(dev, buffer_info->dma))
3584 goto dma_error; 3587 goto dma_error;
3585 3588
3586 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3589 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
@@ -3600,12 +3603,12 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3600 buffer_info->time_stamp = jiffies; 3603 buffer_info->time_stamp = jiffies;
3601 buffer_info->next_to_watch = i; 3604 buffer_info->next_to_watch = i;
3602 buffer_info->mapped_as_page = true; 3605 buffer_info->mapped_as_page = true;
3603 buffer_info->dma = pci_map_page(pdev, 3606 buffer_info->dma = dma_map_page(dev,
3604 frag->page, 3607 frag->page,
3605 frag->page_offset, 3608 frag->page_offset,
3606 len, 3609 len,
3607 PCI_DMA_TODEVICE); 3610 DMA_TO_DEVICE);
3608 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3611 if (dma_mapping_error(dev, buffer_info->dma))
3609 goto dma_error; 3612 goto dma_error;
3610 3613
3611 } 3614 }
@@ -3617,7 +3620,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3617 return ++count; 3620 return ++count;
3618 3621
3619dma_error: 3622dma_error:
3620 dev_err(&pdev->dev, "TX DMA map failed\n"); 3623 dev_err(dev, "TX DMA map failed\n");
3621 3624
3622 /* clear timestamp and dma mappings for failed buffer_info mapping */ 3625 /* clear timestamp and dma mappings for failed buffer_info mapping */
3623 buffer_info->dma = 0; 3626 buffer_info->dma = 0;
@@ -5059,7 +5062,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5059 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 5062 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5060 5063
5061 /* detected Tx unit hang */ 5064 /* detected Tx unit hang */
5062 dev_err(&tx_ring->pdev->dev, 5065 dev_err(tx_ring->dev,
5063 "Detected Tx Unit Hang\n" 5066 "Detected Tx Unit Hang\n"
5064 " Tx Queue <%d>\n" 5067 " Tx Queue <%d>\n"
5065 " TDH <%x>\n" 5068 " TDH <%x>\n"
@@ -5138,7 +5141,7 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5138 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 5141 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5139 skb->ip_summed = CHECKSUM_UNNECESSARY; 5142 skb->ip_summed = CHECKSUM_UNNECESSARY;
5140 5143
5141 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); 5144 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
5142} 5145}
5143 5146
5144static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, 5147static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
@@ -5193,7 +5196,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5193{ 5196{
5194 struct igb_ring *rx_ring = q_vector->rx_ring; 5197 struct igb_ring *rx_ring = q_vector->rx_ring;
5195 struct net_device *netdev = rx_ring->netdev; 5198 struct net_device *netdev = rx_ring->netdev;
5196 struct pci_dev *pdev = rx_ring->pdev; 5199 struct device *dev = rx_ring->dev;
5197 union e1000_adv_rx_desc *rx_desc , *next_rxd; 5200 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5198 struct igb_buffer *buffer_info , *next_buffer; 5201 struct igb_buffer *buffer_info , *next_buffer;
5199 struct sk_buff *skb; 5202 struct sk_buff *skb;
@@ -5233,9 +5236,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5233 cleaned_count++; 5236 cleaned_count++;
5234 5237
5235 if (buffer_info->dma) { 5238 if (buffer_info->dma) {
5236 pci_unmap_single(pdev, buffer_info->dma, 5239 dma_unmap_single(dev, buffer_info->dma,
5237 rx_ring->rx_buffer_len, 5240 rx_ring->rx_buffer_len,
5238 PCI_DMA_FROMDEVICE); 5241 DMA_FROM_DEVICE);
5239 buffer_info->dma = 0; 5242 buffer_info->dma = 0;
5240 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { 5243 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5241 skb_put(skb, length); 5244 skb_put(skb, length);
@@ -5245,8 +5248,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5245 } 5248 }
5246 5249
5247 if (length) { 5250 if (length) {
5248 pci_unmap_page(pdev, buffer_info->page_dma, 5251 dma_unmap_page(dev, buffer_info->page_dma,
5249 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 5252 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5250 buffer_info->page_dma = 0; 5253 buffer_info->page_dma = 0;
5251 5254
5252 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 5255 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
@@ -5354,12 +5357,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5354 buffer_info->page_offset ^= PAGE_SIZE / 2; 5357 buffer_info->page_offset ^= PAGE_SIZE / 2;
5355 } 5358 }
5356 buffer_info->page_dma = 5359 buffer_info->page_dma =
5357 pci_map_page(rx_ring->pdev, buffer_info->page, 5360 dma_map_page(rx_ring->dev, buffer_info->page,
5358 buffer_info->page_offset, 5361 buffer_info->page_offset,
5359 PAGE_SIZE / 2, 5362 PAGE_SIZE / 2,
5360 PCI_DMA_FROMDEVICE); 5363 DMA_FROM_DEVICE);
5361 if (pci_dma_mapping_error(rx_ring->pdev, 5364 if (dma_mapping_error(rx_ring->dev,
5362 buffer_info->page_dma)) { 5365 buffer_info->page_dma)) {
5363 buffer_info->page_dma = 0; 5366 buffer_info->page_dma = 0;
5364 rx_ring->rx_stats.alloc_failed++; 5367 rx_ring->rx_stats.alloc_failed++;
5365 goto no_buffers; 5368 goto no_buffers;
@@ -5377,12 +5380,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5377 buffer_info->skb = skb; 5380 buffer_info->skb = skb;
5378 } 5381 }
5379 if (!buffer_info->dma) { 5382 if (!buffer_info->dma) {
5380 buffer_info->dma = pci_map_single(rx_ring->pdev, 5383 buffer_info->dma = dma_map_single(rx_ring->dev,
5381 skb->data, 5384 skb->data,
5382 bufsz, 5385 bufsz,
5383 PCI_DMA_FROMDEVICE); 5386 DMA_FROM_DEVICE);
5384 if (pci_dma_mapping_error(rx_ring->pdev, 5387 if (dma_mapping_error(rx_ring->dev,
5385 buffer_info->dma)) { 5388 buffer_info->dma)) {
5386 buffer_info->dma = 0; 5389 buffer_info->dma = 0;
5387 rx_ring->rx_stats.alloc_failed++; 5390 rx_ring->rx_stats.alloc_failed++;
5388 goto no_buffers; 5391 goto no_buffers;