aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-12-02 11:47:18 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-02 22:57:12 -0500
commit6366ad331f436388129dfc044db871de79604e4d (patch)
tree39e667c2191093fbb21f5cf74a538da945b32817 /drivers/net/igb
parente5a43549f7a58509a91b299a51337d386697b92c (diff)
igb: remove use of skb_dma_map from driver
This change removes skb_dma_map/unmap calls from the igb driver due to the fact that the call is incompatible with iommu enabled kernels. In order to prevent warnings about using the wrong unmap call I have added a mapped_as_page value to the buffer_info structure to track if the mapped region is a page or a buffer. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/igb.h5
-rw-r--r--drivers/net/igb/igb_main.c69
2 files changed, 56 insertions, 18 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index c458d9b188ba..b1c1eb88893f 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -137,12 +137,13 @@ struct igb_buffer {
137 unsigned long time_stamp; 137 unsigned long time_stamp;
138 u16 length; 138 u16 length;
139 u16 next_to_watch; 139 u16 next_to_watch;
140 u16 mapped_as_page;
140 }; 141 };
141 /* RX */ 142 /* RX */
142 struct { 143 struct {
143 struct page *page; 144 struct page *page;
144 u64 page_dma; 145 dma_addr_t page_dma;
145 unsigned int page_offset; 146 u16 page_offset;
146 }; 147 };
147 }; 148 };
148}; 149};
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index bb1a6eeade06..e57b32d3fde4 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2654,16 +2654,27 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2654void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, 2654void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2655 struct igb_buffer *buffer_info) 2655 struct igb_buffer *buffer_info)
2656{ 2656{
2657 buffer_info->dma = 0; 2657 if (buffer_info->dma) {
2658 if (buffer_info->mapped_as_page)
2659 pci_unmap_page(tx_ring->pdev,
2660 buffer_info->dma,
2661 buffer_info->length,
2662 PCI_DMA_TODEVICE);
2663 else
2664 pci_unmap_single(tx_ring->pdev,
2665 buffer_info->dma,
2666 buffer_info->length,
2667 PCI_DMA_TODEVICE);
2668 buffer_info->dma = 0;
2669 }
2658 if (buffer_info->skb) { 2670 if (buffer_info->skb) {
2659 skb_dma_unmap(&tx_ring->pdev->dev,
2660 buffer_info->skb,
2661 DMA_TO_DEVICE);
2662 dev_kfree_skb_any(buffer_info->skb); 2671 dev_kfree_skb_any(buffer_info->skb);
2663 buffer_info->skb = NULL; 2672 buffer_info->skb = NULL;
2664 } 2673 }
2665 buffer_info->time_stamp = 0; 2674 buffer_info->time_stamp = 0;
2666 /* buffer_info must be completely set up in the transmit path */ 2675 buffer_info->length = 0;
2676 buffer_info->next_to_watch = 0;
2677 buffer_info->mapped_as_page = false;
2667} 2678}
2668 2679
2669/** 2680/**
@@ -3561,24 +3572,19 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3561 unsigned int len = skb_headlen(skb); 3572 unsigned int len = skb_headlen(skb);
3562 unsigned int count = 0, i; 3573 unsigned int count = 0, i;
3563 unsigned int f; 3574 unsigned int f;
3564 dma_addr_t *map;
3565 3575
3566 i = tx_ring->next_to_use; 3576 i = tx_ring->next_to_use;
3567 3577
3568 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3569 dev_err(&pdev->dev, "TX DMA map failed\n");
3570 return 0;
3571 }
3572
3573 map = skb_shinfo(skb)->dma_maps;
3574
3575 buffer_info = &tx_ring->buffer_info[i]; 3578 buffer_info = &tx_ring->buffer_info[i];
3576 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3579 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3577 buffer_info->length = len; 3580 buffer_info->length = len;
3578 /* set time_stamp *before* dma to help avoid a possible race */ 3581 /* set time_stamp *before* dma to help avoid a possible race */
3579 buffer_info->time_stamp = jiffies; 3582 buffer_info->time_stamp = jiffies;
3580 buffer_info->next_to_watch = i; 3583 buffer_info->next_to_watch = i;
3581 buffer_info->dma = skb_shinfo(skb)->dma_head; 3584 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3585 PCI_DMA_TODEVICE);
3586 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3587 goto dma_error;
3582 3588
3583 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3589 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3584 struct skb_frag_struct *frag; 3590 struct skb_frag_struct *frag;
@@ -3595,7 +3601,15 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3595 buffer_info->length = len; 3601 buffer_info->length = len;
3596 buffer_info->time_stamp = jiffies; 3602 buffer_info->time_stamp = jiffies;
3597 buffer_info->next_to_watch = i; 3603 buffer_info->next_to_watch = i;
3598 buffer_info->dma = map[count]; 3604 buffer_info->mapped_as_page = true;
3605 buffer_info->dma = pci_map_page(pdev,
3606 frag->page,
3607 frag->page_offset,
3608 len,
3609 PCI_DMA_TODEVICE);
3610 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3611 goto dma_error;
3612
3599 count++; 3613 count++;
3600 } 3614 }
3601 3615
@@ -3603,6 +3617,29 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3603 tx_ring->buffer_info[first].next_to_watch = i; 3617 tx_ring->buffer_info[first].next_to_watch = i;
3604 3618
3605 return ++count; 3619 return ++count;
3620
3621dma_error:
3622 dev_err(&pdev->dev, "TX DMA map failed\n");
3623
3624 /* clear timestamp and dma mappings for failed buffer_info mapping */
3625 buffer_info->dma = 0;
3626 buffer_info->time_stamp = 0;
3627 buffer_info->length = 0;
3628 buffer_info->next_to_watch = 0;
3629 buffer_info->mapped_as_page = false;
3630 count--;
3631
3632 /* clear timestamp and dma mappings for remaining portion of packet */
3633 while (count >= 0) {
3634 count--;
3635 i--;
3636 if (i < 0)
3637 i += tx_ring->count;
3638 buffer_info = &tx_ring->buffer_info[i];
3639 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3640 }
3641
3642 return 0;
3606} 3643}
3607 3644
3608static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, 3645static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
@@ -3755,7 +3792,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3755 * has occured and we need to rewind the descriptor queue 3792 * has occured and we need to rewind the descriptor queue
3756 */ 3793 */
3757 count = igb_tx_map_adv(tx_ring, skb, first); 3794 count = igb_tx_map_adv(tx_ring, skb, first);
3758 if (count <= 0) { 3795 if (!count) {
3759 dev_kfree_skb_any(skb); 3796 dev_kfree_skb_any(skb);
3760 tx_ring->buffer_info[first].time_stamp = 0; 3797 tx_ring->buffer_info[first].time_stamp = 0;
3761 tx_ring->next_to_use = first; 3798 tx_ring->next_to_use = first;