aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-01-30 21:59:34 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-02-10 18:49:14 -0500
commitf990b79bc80ca7a23b8a6c33241c439072d0b85b (patch)
tree96376aebd1253a17bec049178893c1f82c579fc3 /drivers/net/ethernet/intel/ixgbe
parent1d2024f61ec14bdb0c57a97a3fe73685abc2d198 (diff)
ixgbe: Let the Rx buffer allocation clear status bits instead of cleanup
This change makes it so that we always clear the status/error bits in the Rx descriptor in the allocation path instead of the cleanup path. The advantage to this is that we spend less time modifying data. As such we can modify the data once and then let it go cold in the cache instead of writing it, reading it, and then writing it again. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c155
1 files changed, 93 insertions, 62 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 762f33777a7f..538577b08e25 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1101,8 +1101,75 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1101 writel(val, rx_ring->tail); 1101 writel(val, rx_ring->tail);
1102} 1102}
1103 1103
1104static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring,
1105 struct ixgbe_rx_buffer *bi)
1106{
1107 struct sk_buff *skb = bi->skb;
1108 dma_addr_t dma = bi->dma;
1109
1110 if (dma)
1111 return true;
1112
1113 if (likely(!skb)) {
1114 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1115 rx_ring->rx_buf_len);
1116 bi->skb = skb;
1117 if (!skb) {
1118 rx_ring->rx_stats.alloc_rx_buff_failed++;
1119 return false;
1120 }
1121
1122 /* initialize skb for ring */
1123 skb_record_rx_queue(skb, rx_ring->queue_index);
1124 }
1125
1126 dma = dma_map_single(rx_ring->dev, skb->data,
1127 rx_ring->rx_buf_len, DMA_FROM_DEVICE);
1128
1129 if (dma_mapping_error(rx_ring->dev, dma)) {
1130 rx_ring->rx_stats.alloc_rx_buff_failed++;
1131 return false;
1132 }
1133
1134 bi->dma = dma;
1135 return true;
1136}
1137
1138static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1139 struct ixgbe_rx_buffer *bi)
1140{
1141 struct page *page = bi->page;
1142 dma_addr_t page_dma = bi->page_dma;
1143 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
1144
1145 if (page_dma)
1146 return true;
1147
1148 if (!page) {
1149 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
1150 bi->page = page;
1151 if (unlikely(!page)) {
1152 rx_ring->rx_stats.alloc_rx_page_failed++;
1153 return false;
1154 }
1155 }
1156
1157 page_dma = dma_map_page(rx_ring->dev, page,
1158 page_offset, PAGE_SIZE / 2,
1159 DMA_FROM_DEVICE);
1160
1161 if (dma_mapping_error(rx_ring->dev, page_dma)) {
1162 rx_ring->rx_stats.alloc_rx_page_failed++;
1163 return false;
1164 }
1165
1166 bi->page_dma = page_dma;
1167 bi->page_offset = page_offset;
1168 return true;
1169}
1170
1104/** 1171/**
1105 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 1172 * ixgbe_alloc_rx_buffers - Replace used receive buffers
1106 * @rx_ring: ring to place buffers on 1173 * @rx_ring: ring to place buffers on
1107 * @cleaned_count: number of buffers to replace 1174 * @cleaned_count: number of buffers to replace
1108 **/ 1175 **/
@@ -1110,82 +1177,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1110{ 1177{
1111 union ixgbe_adv_rx_desc *rx_desc; 1178 union ixgbe_adv_rx_desc *rx_desc;
1112 struct ixgbe_rx_buffer *bi; 1179 struct ixgbe_rx_buffer *bi;
1113 struct sk_buff *skb;
1114 u16 i = rx_ring->next_to_use; 1180 u16 i = rx_ring->next_to_use;
1115 1181
1116 /* do nothing if no valid netdev defined */ 1182 /* nothing to do or no valid netdev defined */
1117 if (!rx_ring->netdev) 1183 if (!cleaned_count || !rx_ring->netdev)
1118 return; 1184 return;
1119 1185
1120 while (cleaned_count--) { 1186 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1121 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1187 bi = &rx_ring->rx_buffer_info[i];
1122 bi = &rx_ring->rx_buffer_info[i]; 1188 i -= rx_ring->count;
1123 skb = bi->skb;
1124
1125 if (!skb) {
1126 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1127 rx_ring->rx_buf_len);
1128 if (!skb) {
1129 rx_ring->rx_stats.alloc_rx_buff_failed++;
1130 goto no_buffers;
1131 }
1132 /* initialize queue mapping */
1133 skb_record_rx_queue(skb, rx_ring->queue_index);
1134 bi->skb = skb;
1135 }
1136 1189
1137 if (!bi->dma) { 1190 while (cleaned_count--) {
1138 bi->dma = dma_map_single(rx_ring->dev, 1191 if (!ixgbe_alloc_mapped_skb(rx_ring, bi))
1139 skb->data, 1192 break;
1140 rx_ring->rx_buf_len,
1141 DMA_FROM_DEVICE);
1142 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1143 rx_ring->rx_stats.alloc_rx_buff_failed++;
1144 bi->dma = 0;
1145 goto no_buffers;
1146 }
1147 }
1148 1193
1194 /* Refresh the desc even if buffer_addrs didn't change
1195 * because each write-back erases this info. */
1149 if (ring_is_ps_enabled(rx_ring)) { 1196 if (ring_is_ps_enabled(rx_ring)) {
1150 if (!bi->page) { 1197 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1151 bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
1152 if (!bi->page) {
1153 rx_ring->rx_stats.alloc_rx_page_failed++;
1154 goto no_buffers;
1155 }
1156 }
1157 1198
1158 if (!bi->page_dma) { 1199 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1159 /* use a half page if we're re-using */ 1200 break;
1160 bi->page_offset ^= PAGE_SIZE / 2;
1161 bi->page_dma = dma_map_page(rx_ring->dev,
1162 bi->page,
1163 bi->page_offset,
1164 PAGE_SIZE / 2,
1165 DMA_FROM_DEVICE);
1166 if (dma_mapping_error(rx_ring->dev,
1167 bi->page_dma)) {
1168 rx_ring->rx_stats.alloc_rx_page_failed++;
1169 bi->page_dma = 0;
1170 goto no_buffers;
1171 }
1172 }
1173 1201
1174 /* Refresh the desc even if buffer_addrs didn't change
1175 * because each write-back erases this info. */
1176 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 1202 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1177 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1178 } else { 1203 } else {
1179 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 1204 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1180 rx_desc->read.hdr_addr = 0;
1181 } 1205 }
1182 1206
1207 rx_desc++;
1208 bi++;
1183 i++; 1209 i++;
1184 if (i == rx_ring->count) 1210 if (unlikely(!i)) {
1185 i = 0; 1211 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, 0);
1212 bi = rx_ring->rx_buffer_info;
1213 i -= rx_ring->count;
1214 }
1215
1216 /* clear the hdr_addr for the next_to_use descriptor */
1217 rx_desc->read.hdr_addr = 0;
1186 } 1218 }
1187 1219
1188no_buffers: 1220 i += rx_ring->count;
1221
1189 if (rx_ring->next_to_use != i) { 1222 if (rx_ring->next_to_use != i) {
1190 rx_ring->next_to_use = i; 1223 rx_ring->next_to_use = i;
1191 ixgbe_release_rx_desc(rx_ring, i); 1224 ixgbe_release_rx_desc(rx_ring, i);
@@ -1593,8 +1626,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1593 1626
1594 budget--; 1627 budget--;
1595next_desc: 1628next_desc:
1596 rx_desc->wb.upper.status_error = 0;
1597
1598 if (!budget) 1629 if (!budget)
1599 break; 1630 break;
1600 1631