aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 22:27:00 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 22:27:00 -0500
commitc267fc166a3308c45c7f0ad2ddd6fc696caaeb80 (patch)
tree40baa71d030515319ed478928745e1962f51ad93 /drivers/net
parent32aa77a4fc06bd1116f83c25bf0389a3e9b80533 (diff)
ixgbe: cleanup ixgbe_clean_rx_irq
The code for ixgbe_clean_rx_irq was much more tangled up than it needed to be in terms of logic statements and unused variables. This change untangles much of that and drops several unused variables such as cleaned which was being returned but never checked. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c149
1 files changed, 78 insertions, 71 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 5dde7d63c3a3..584608d267b2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1128,14 +1128,18 @@ no_buffers:
1128 } 1128 }
1129} 1129}
1130 1130
1131static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) 1131static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
1132{ 1132{
1133 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; 1133 /* HW will not DMA in data larger than the given buffer, even if it
1134} 1134 * parses the (NFS, of course) header to be larger. In that case, it
1135 1135 * fills the header buffer and spills the rest into the page.
1136static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) 1136 */
1137{ 1137 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1138 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1138 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1139 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1140 if (hlen > IXGBE_RX_HDR_SIZE)
1141 hlen = IXGBE_RX_HDR_SIZE;
1142 return hlen;
1139} 1143}
1140 1144
1141static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 1145static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
@@ -1182,7 +1186,7 @@ struct ixgbe_rsc_cb {
1182 1186
1183#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) 1187#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1184 1188
1185static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1189static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1186 struct ixgbe_ring *rx_ring, 1190 struct ixgbe_ring *rx_ring,
1187 int *work_done, int work_to_do) 1191 int *work_done, int work_to_do)
1188{ 1192{
@@ -1190,49 +1194,40 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1190 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 1194 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1191 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 1195 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1192 struct sk_buff *skb; 1196 struct sk_buff *skb;
1193 unsigned int i, rsc_count = 0;
1194 u32 len, staterr;
1195 u16 hdr_info;
1196 bool cleaned = false;
1197 int cleaned_count = 0;
1198 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1197 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1198 const int current_node = numa_node_id();
1199 unsigned int rsc_count = 0;
1199#ifdef IXGBE_FCOE 1200#ifdef IXGBE_FCOE
1200 int ddp_bytes = 0; 1201 int ddp_bytes = 0;
1201#endif /* IXGBE_FCOE */ 1202#endif /* IXGBE_FCOE */
1203 u32 staterr;
1204 u16 i;
1205 u16 cleaned_count = 0;
1202 1206
1203 i = rx_ring->next_to_clean; 1207 i = rx_ring->next_to_clean;
1204 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1208 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1205 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1209 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1206 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1207 1210
1208 while (staterr & IXGBE_RXD_STAT_DD) { 1211 while (staterr & IXGBE_RXD_STAT_DD) {
1209 u32 upper_len = 0; 1212 u32 upper_len = 0;
1210 if (*work_done >= work_to_do)
1211 break;
1212 (*work_done)++;
1213 1213
1214 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1214 rmb(); /* read descriptor and rx_buffer_info after status DD */
1215 if (ring_is_ps_enabled(rx_ring)) {
1216 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1217 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1218 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1219 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1220 if ((len > IXGBE_RX_HDR_SIZE) ||
1221 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1222 len = IXGBE_RX_HDR_SIZE;
1223 } else {
1224 len = le16_to_cpu(rx_desc->wb.upper.length);
1225 }
1226 1215
1227 cleaned = true; 1216 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1217
1228 skb = rx_buffer_info->skb; 1218 skb = rx_buffer_info->skb;
1229 prefetch(skb->data);
1230 rx_buffer_info->skb = NULL; 1219 rx_buffer_info->skb = NULL;
1220 prefetch(skb->data);
1231 1221
1222 if (ring_is_rsc_enabled(rx_ring))
1223 rsc_count = ixgbe_get_rsc_count(rx_desc);
1224
1225 /* if this is a skb from previous receive DMA will be 0 */
1232 if (rx_buffer_info->dma) { 1226 if (rx_buffer_info->dma) {
1233 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1227 u16 hlen;
1234 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1228 if (rsc_count &&
1235 (!(skb->prev))) { 1229 !(staterr & IXGBE_RXD_STAT_EOP) &&
1230 !skb->prev) {
1236 /* 1231 /*
1237 * When HWRSC is enabled, delay unmapping 1232 * When HWRSC is enabled, delay unmapping
1238 * of the first packet. It carries the 1233 * of the first packet. It carries the
@@ -1249,7 +1244,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1249 DMA_FROM_DEVICE); 1244 DMA_FROM_DEVICE);
1250 } 1245 }
1251 rx_buffer_info->dma = 0; 1246 rx_buffer_info->dma = 0;
1252 skb_put(skb, len); 1247
1248 if (ring_is_ps_enabled(rx_ring)) {
1249 hlen = ixgbe_get_hlen(rx_desc);
1250 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1251 } else {
1252 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1253 }
1254
1255 skb_put(skb, hlen);
1256 } else {
1257 /* assume packet split since header is unmapped */
1258 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1253 } 1259 }
1254 1260
1255 if (upper_len) { 1261 if (upper_len) {
@@ -1263,11 +1269,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1263 rx_buffer_info->page_offset, 1269 rx_buffer_info->page_offset,
1264 upper_len); 1270 upper_len);
1265 1271
1266 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 1272 if ((page_count(rx_buffer_info->page) == 1) &&
1267 (page_count(rx_buffer_info->page) != 1)) 1273 (page_to_nid(rx_buffer_info->page) == current_node))
1268 rx_buffer_info->page = NULL;
1269 else
1270 get_page(rx_buffer_info->page); 1274 get_page(rx_buffer_info->page);
1275 else
1276 rx_buffer_info->page = NULL;
1271 1277
1272 skb->len += upper_len; 1278 skb->len += upper_len;
1273 skb->data_len += upper_len; 1279 skb->data_len += upper_len;
@@ -1282,9 +1288,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1282 prefetch(next_rxd); 1288 prefetch(next_rxd);
1283 cleaned_count++; 1289 cleaned_count++;
1284 1290
1285 if (ring_is_rsc_enabled(rx_ring))
1286 rsc_count = ixgbe_get_rsc_count(rx_desc);
1287
1288 if (rsc_count) { 1291 if (rsc_count) {
1289 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> 1292 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1290 IXGBE_RXDADV_NEXTP_SHIFT; 1293 IXGBE_RXDADV_NEXTP_SHIFT;
@@ -1293,31 +1296,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1293 next_buffer = &rx_ring->rx_buffer_info[i]; 1296 next_buffer = &rx_ring->rx_buffer_info[i];
1294 } 1297 }
1295 1298
1296 if (staterr & IXGBE_RXD_STAT_EOP) { 1299 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
1297 if (skb->prev)
1298 skb = ixgbe_transform_rsc_queue(skb,
1299 &(rx_ring->rx_stats.rsc_count));
1300 if (ring_is_rsc_enabled(rx_ring)) {
1301 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1302 dma_unmap_single(rx_ring->dev,
1303 IXGBE_RSC_CB(skb)->dma,
1304 rx_ring->rx_buf_len,
1305 DMA_FROM_DEVICE);
1306 IXGBE_RSC_CB(skb)->dma = 0;
1307 IXGBE_RSC_CB(skb)->delay_unmap = false;
1308 }
1309 if (ring_is_ps_enabled(rx_ring))
1310 rx_ring->rx_stats.rsc_count +=
1311 skb_shinfo(skb)->nr_frags;
1312 else
1313 rx_ring->rx_stats.rsc_count++;
1314 rx_ring->rx_stats.rsc_flush++;
1315 }
1316 u64_stats_update_begin(&rx_ring->syncp);
1317 rx_ring->stats.packets++;
1318 rx_ring->stats.bytes += skb->len;
1319 u64_stats_update_end(&rx_ring->syncp);
1320 } else {
1321 if (ring_is_ps_enabled(rx_ring)) { 1300 if (ring_is_ps_enabled(rx_ring)) {
1322 rx_buffer_info->skb = next_buffer->skb; 1301 rx_buffer_info->skb = next_buffer->skb;
1323 rx_buffer_info->dma = next_buffer->dma; 1302 rx_buffer_info->dma = next_buffer->dma;
@@ -1331,8 +1310,32 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1331 goto next_desc; 1310 goto next_desc;
1332 } 1311 }
1333 1312
1313 if (skb->prev)
1314 skb = ixgbe_transform_rsc_queue(skb,
1315 &(rx_ring->rx_stats.rsc_count));
1316
1317 if (ring_is_rsc_enabled(rx_ring)) {
1318 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1319 dma_unmap_single(rx_ring->dev,
1320 IXGBE_RSC_CB(skb)->dma,
1321 rx_ring->rx_buf_len,
1322 DMA_FROM_DEVICE);
1323 IXGBE_RSC_CB(skb)->dma = 0;
1324 IXGBE_RSC_CB(skb)->delay_unmap = false;
1325 }
1326 if (ring_is_ps_enabled(rx_ring))
1327 rx_ring->rx_stats.rsc_count +=
1328 skb_shinfo(skb)->nr_frags;
1329 else
1330 rx_ring->rx_stats.rsc_count++;
1331 rx_ring->rx_stats.rsc_flush++;
1332 }
1333
1334 /* ERR_MASK will only have valid bits if EOP set */
1334 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { 1335 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1335 dev_kfree_skb_irq(skb); 1336 /* trim packet back to size 0 and recycle it */
1337 __pskb_trim(skb, 0);
1338 rx_buffer_info->skb = skb;
1336 goto next_desc; 1339 goto next_desc;
1337 } 1340 }
1338 1341
@@ -1356,6 +1359,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1356next_desc: 1359next_desc:
1357 rx_desc->wb.upper.status_error = 0; 1360 rx_desc->wb.upper.status_error = 0;
1358 1361
1362 (*work_done)++;
1363 if (*work_done >= work_to_do)
1364 break;
1365
1359 /* return some buffers to hardware, one at a time is too slow */ 1366 /* return some buffers to hardware, one at a time is too slow */
1360 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 1367 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1361 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 1368 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -1364,8 +1371,6 @@ next_desc:
1364 1371
1365 /* use prefetched values */ 1372 /* use prefetched values */
1366 rx_desc = next_rxd; 1373 rx_desc = next_rxd;
1367 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1368
1369 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1374 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1370 } 1375 }
1371 1376
@@ -1392,8 +1397,10 @@ next_desc:
1392 1397
1393 rx_ring->total_packets += total_rx_packets; 1398 rx_ring->total_packets += total_rx_packets;
1394 rx_ring->total_bytes += total_rx_bytes; 1399 rx_ring->total_bytes += total_rx_bytes;
1395 1400 u64_stats_update_begin(&rx_ring->syncp);
1396 return cleaned; 1401 rx_ring->stats.packets += total_rx_packets;
1402 rx_ring->stats.bytes += total_rx_bytes;
1403 u64_stats_update_end(&rx_ring->syncp);
1397} 1404}
1398 1405
1399static int ixgbe_clean_rxonly(struct napi_struct *, int); 1406static int ixgbe_clean_rxonly(struct napi_struct *, int);