aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-10-27 19:51:16 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-28 06:26:01 -0400
commit42d0781a1337ec5624da0657ba57b734768f489c (patch)
tree552f213eebd6be6d7a64896eda7ac1d184f0c9b8 /drivers/net/igb
parentcdfd01fcc674cc1c0c7b54084d74c2b684bf67c2 (diff)
igb: cleanup clean_rx_irq_adv and alloc_rx_buffers_adv
This patch cleans up some whitespace issues in clean_rx_irq_adv. It also adds NUMA aware page allocation and dma error handling to alloc_rx_buffers_adv. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/igb_main.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 8f8b7ccc7db5..d3e831699b47 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4952,6 +4952,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4952 i++; 4952 i++;
4953 if (i == rx_ring->count) 4953 if (i == rx_ring->count)
4954 i = 0; 4954 i = 0;
4955
4955 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); 4956 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4956 prefetch(next_rxd); 4957 prefetch(next_rxd);
4957 next_buffer = &rx_ring->buffer_info[i]; 4958 next_buffer = &rx_ring->buffer_info[i];
@@ -4989,7 +4990,6 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4989 4990
4990 skb->len += length; 4991 skb->len += length;
4991 skb->data_len += length; 4992 skb->data_len += length;
4992
4993 skb->truesize += length; 4993 skb->truesize += length;
4994 } 4994 }
4995 4995
@@ -5071,7 +5071,7 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5071 5071
5072 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { 5072 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5073 if (!buffer_info->page) { 5073 if (!buffer_info->page) {
5074 buffer_info->page = alloc_page(GFP_ATOMIC); 5074 buffer_info->page = netdev_alloc_page(netdev);
5075 if (!buffer_info->page) { 5075 if (!buffer_info->page) {
5076 rx_ring->rx_stats.alloc_failed++; 5076 rx_ring->rx_stats.alloc_failed++;
5077 goto no_buffers; 5077 goto no_buffers;
@@ -5085,9 +5085,16 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5085 buffer_info->page_offset, 5085 buffer_info->page_offset,
5086 PAGE_SIZE / 2, 5086 PAGE_SIZE / 2,
5087 PCI_DMA_FROMDEVICE); 5087 PCI_DMA_FROMDEVICE);
5088 if (pci_dma_mapping_error(rx_ring->pdev,
5089 buffer_info->page_dma)) {
5090 buffer_info->page_dma = 0;
5091 rx_ring->rx_stats.alloc_failed++;
5092 goto no_buffers;
5093 }
5088 } 5094 }
5089 5095
5090 if (!buffer_info->skb) { 5096 skb = buffer_info->skb;
5097 if (!skb) {
5091 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 5098 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5092 if (!skb) { 5099 if (!skb) {
5093 rx_ring->rx_stats.alloc_failed++; 5100 rx_ring->rx_stats.alloc_failed++;
@@ -5095,10 +5102,18 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5095 } 5102 }
5096 5103
5097 buffer_info->skb = skb; 5104 buffer_info->skb = skb;
5105 }
5106 if (!buffer_info->dma) {
5098 buffer_info->dma = pci_map_single(rx_ring->pdev, 5107 buffer_info->dma = pci_map_single(rx_ring->pdev,
5099 skb->data, 5108 skb->data,
5100 bufsz, 5109 bufsz,
5101 PCI_DMA_FROMDEVICE); 5110 PCI_DMA_FROMDEVICE);
5111 if (pci_dma_mapping_error(rx_ring->pdev,
5112 buffer_info->dma)) {
5113 buffer_info->dma = 0;
5114 rx_ring->rx_stats.alloc_failed++;
5115 goto no_buffers;
5116 }
5102 } 5117 }
5103 /* Refresh the desc even if buffer_addrs didn't change because 5118 /* Refresh the desc even if buffer_addrs didn't change because
5104 * each write-back erases this info. */ 5119 * each write-back erases this info. */
@@ -5107,8 +5122,7 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5107 cpu_to_le64(buffer_info->page_dma); 5122 cpu_to_le64(buffer_info->page_dma);
5108 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 5123 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5109 } else { 5124 } else {
5110 rx_desc->read.pkt_addr = 5125 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
5111 cpu_to_le64(buffer_info->dma);
5112 rx_desc->read.hdr_addr = 0; 5126 rx_desc->read.hdr_addr = 0;
5113 } 5127 }
5114 5128