diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-01-30 21:59:23 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-02-10 18:42:09 -0500 |
commit | 4c1975d77b73feed7161999aab4cc64c1ae7155c (patch) | |
tree | f6668f281dcf42c16aa83f0d9e53c23e0fedc900 | |
parent | d9dd966d7fc088a6bed991c2b1e2fba4485e0a31 (diff) |
ixgbe: Minor refactor of RSC
This change addresses several issue.
First I had left the use of the next and prev skb pointers floating around
in the code and they were overdue to be pulled since I had rewritten the
RSC code in the out-of-tree driver some time ago to address issues brought
up by David Miller in regards to this.
I am also now defaulting to always leaving the first buffer unmapped on any
packet and then unmapping it after we read the EOP descriptor. This allows
a simplification of the path with less branching.
Instead of counting packets received the code was changed some time ago to
track the number of buffers received. This leads to inaccurate counting
when you compare numbers of packets received by the hardware versus what is
tracked by the software. To correct this I am revising things so that the
append_cnt value for RSC accurately tracks the number of frames received.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 235 |
2 files changed, 147 insertions, 98 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e6aeb64105a4..fca055362847 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -535,12 +535,16 @@ enum ixbge_state_t { | |||
535 | __IXGBE_IN_SFP_INIT, | 535 | __IXGBE_IN_SFP_INIT, |
536 | }; | 536 | }; |
537 | 537 | ||
538 | struct ixgbe_rsc_cb { | 538 | struct ixgbe_cb { |
539 | union { /* Union defining head/tail partner */ | ||
540 | struct sk_buff *head; | ||
541 | struct sk_buff *tail; | ||
542 | }; | ||
539 | dma_addr_t dma; | 543 | dma_addr_t dma; |
540 | u16 skb_cnt; | 544 | u16 append_cnt; |
541 | bool delay_unmap; | 545 | bool delay_unmap; |
542 | }; | 546 | }; |
543 | #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) | 547 | #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) |
544 | 548 | ||
545 | enum ixgbe_boards { | 549 | enum ixgbe_boards { |
546 | board_82598, | 550 | board_82598, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ecc46ce8b2c3..18e474c25e61 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1207,40 +1207,96 @@ static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) | |||
1207 | } | 1207 | } |
1208 | 1208 | ||
1209 | /** | 1209 | /** |
1210 | * ixgbe_transform_rsc_queue - change rsc queue into a full packet | 1210 | * ixgbe_merge_active_tail - merge active tail into lro skb |
1211 | * @skb: pointer to the last skb in the rsc queue | 1211 | * @tail: pointer to active tail in frag_list |
1212 | * | 1212 | * |
1213 | * This function changes a queue full of hw rsc buffers into a completed | 1213 | * This function merges the length and data of an active tail into the |
1214 | * packet. It uses the ->prev pointers to find the first packet and then | 1214 | * skb containing the frag_list. It resets the tail's pointer to the head, |
1215 | * turns it into the frag list owner. | 1215 | * but it leaves the heads pointer to tail intact. |
1216 | **/ | 1216 | **/ |
1217 | static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) | 1217 | static inline struct sk_buff *ixgbe_merge_active_tail(struct sk_buff *tail) |
1218 | { | 1218 | { |
1219 | unsigned int frag_list_size = 0; | 1219 | struct sk_buff *head = IXGBE_CB(tail)->head; |
1220 | unsigned int skb_cnt = 1; | ||
1221 | 1220 | ||
1222 | while (skb->prev) { | 1221 | if (!head) |
1223 | struct sk_buff *prev = skb->prev; | 1222 | return tail; |
1224 | frag_list_size += skb->len; | 1223 | |
1225 | skb->prev = NULL; | 1224 | head->len += tail->len; |
1226 | skb = prev; | 1225 | head->data_len += tail->len; |
1227 | skb_cnt++; | 1226 | head->truesize += tail->len; |
1227 | |||
1228 | IXGBE_CB(tail)->head = NULL; | ||
1229 | |||
1230 | return head; | ||
1231 | } | ||
1232 | |||
1233 | /** | ||
1234 | * ixgbe_add_active_tail - adds an active tail into the skb frag_list | ||
1235 | * @head: pointer to the start of the skb | ||
1236 | * @tail: pointer to active tail to add to frag_list | ||
1237 | * | ||
1238 | * This function adds an active tail to the end of the frag list. This tail | ||
1239 | * will still be receiving data so we cannot yet ad it's stats to the main | ||
1240 | * skb. That is done via ixgbe_merge_active_tail. | ||
1241 | **/ | ||
1242 | static inline void ixgbe_add_active_tail(struct sk_buff *head, | ||
1243 | struct sk_buff *tail) | ||
1244 | { | ||
1245 | struct sk_buff *old_tail = IXGBE_CB(head)->tail; | ||
1246 | |||
1247 | if (old_tail) { | ||
1248 | ixgbe_merge_active_tail(old_tail); | ||
1249 | old_tail->next = tail; | ||
1250 | } else { | ||
1251 | skb_shinfo(head)->frag_list = tail; | ||
1228 | } | 1252 | } |
1229 | 1253 | ||
1230 | skb_shinfo(skb)->frag_list = skb->next; | 1254 | IXGBE_CB(tail)->head = head; |
1231 | skb->next = NULL; | 1255 | IXGBE_CB(head)->tail = tail; |
1232 | skb->len += frag_list_size; | 1256 | } |
1233 | skb->data_len += frag_list_size; | 1257 | |
1234 | skb->truesize += frag_list_size; | 1258 | /** |
1235 | IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; | 1259 | * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb |
1260 | * @head: pointer to head of an active frag list | ||
1261 | * | ||
1262 | * This function will clear the frag_tail_tracker pointer on an active | ||
1263 | * frag_list and returns true if the pointer was actually set | ||
1264 | **/ | ||
1265 | static inline bool ixgbe_close_active_frag_list(struct sk_buff *head) | ||
1266 | { | ||
1267 | struct sk_buff *tail = IXGBE_CB(head)->tail; | ||
1268 | |||
1269 | if (!tail) | ||
1270 | return false; | ||
1236 | 1271 | ||
1237 | return skb; | 1272 | ixgbe_merge_active_tail(tail); |
1273 | |||
1274 | IXGBE_CB(head)->tail = NULL; | ||
1275 | |||
1276 | return true; | ||
1238 | } | 1277 | } |
1239 | 1278 | ||
1240 | static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) | 1279 | static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring, |
1280 | union ixgbe_adv_rx_desc *rx_desc, | ||
1281 | struct sk_buff *skb) | ||
1241 | { | 1282 | { |
1242 | return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & | 1283 | __le32 rsc_enabled; |
1243 | IXGBE_RXDADV_RSCCNT_MASK); | 1284 | u32 rsc_cnt; |
1285 | |||
1286 | if (!ring_is_rsc_enabled(rx_ring)) | ||
1287 | return; | ||
1288 | |||
1289 | rsc_enabled = rx_desc->wb.lower.lo_dword.data & | ||
1290 | cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); | ||
1291 | |||
1292 | /* If this is an RSC frame rsc_cnt should be non-zero */ | ||
1293 | if (!rsc_enabled) | ||
1294 | return; | ||
1295 | |||
1296 | rsc_cnt = le32_to_cpu(rsc_enabled); | ||
1297 | rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; | ||
1298 | |||
1299 | IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; | ||
1244 | } | 1300 | } |
1245 | 1301 | ||
1246 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | 1302 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
@@ -1249,7 +1305,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1249 | { | 1305 | { |
1250 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1306 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1251 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | 1307 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; |
1252 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; | 1308 | struct ixgbe_rx_buffer *rx_buffer_info; |
1253 | struct sk_buff *skb; | 1309 | struct sk_buff *skb; |
1254 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 1310 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
1255 | const int current_node = numa_node_id(); | 1311 | const int current_node = numa_node_id(); |
@@ -1259,7 +1315,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1259 | u32 staterr; | 1315 | u32 staterr; |
1260 | u16 i; | 1316 | u16 i; |
1261 | u16 cleaned_count = 0; | 1317 | u16 cleaned_count = 0; |
1262 | bool pkt_is_rsc = false; | ||
1263 | 1318 | ||
1264 | i = rx_ring->next_to_clean; | 1319 | i = rx_ring->next_to_clean; |
1265 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); | 1320 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
@@ -1276,32 +1331,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1276 | rx_buffer_info->skb = NULL; | 1331 | rx_buffer_info->skb = NULL; |
1277 | prefetch(skb->data); | 1332 | prefetch(skb->data); |
1278 | 1333 | ||
1279 | if (ring_is_rsc_enabled(rx_ring)) | ||
1280 | pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); | ||
1281 | |||
1282 | /* linear means we are building an skb from multiple pages */ | 1334 | /* linear means we are building an skb from multiple pages */ |
1283 | if (!skb_is_nonlinear(skb)) { | 1335 | if (!skb_is_nonlinear(skb)) { |
1284 | u16 hlen; | 1336 | u16 hlen; |
1285 | if (pkt_is_rsc && | ||
1286 | !(staterr & IXGBE_RXD_STAT_EOP) && | ||
1287 | !skb->prev) { | ||
1288 | /* | ||
1289 | * When HWRSC is enabled, delay unmapping | ||
1290 | * of the first packet. It carries the | ||
1291 | * header information, HW may still | ||
1292 | * access the header after the writeback. | ||
1293 | * Only unmap it when EOP is reached | ||
1294 | */ | ||
1295 | IXGBE_RSC_CB(skb)->delay_unmap = true; | ||
1296 | IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; | ||
1297 | } else { | ||
1298 | dma_unmap_single(rx_ring->dev, | ||
1299 | rx_buffer_info->dma, | ||
1300 | rx_ring->rx_buf_len, | ||
1301 | DMA_FROM_DEVICE); | ||
1302 | } | ||
1303 | rx_buffer_info->dma = 0; | ||
1304 | |||
1305 | if (ring_is_ps_enabled(rx_ring)) { | 1337 | if (ring_is_ps_enabled(rx_ring)) { |
1306 | hlen = ixgbe_get_hlen(rx_desc); | 1338 | hlen = ixgbe_get_hlen(rx_desc); |
1307 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | 1339 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); |
@@ -1310,6 +1342,23 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1310 | } | 1342 | } |
1311 | 1343 | ||
1312 | skb_put(skb, hlen); | 1344 | skb_put(skb, hlen); |
1345 | |||
1346 | /* | ||
1347 | * Delay unmapping of the first packet. It carries the | ||
1348 | * header information, HW may still access the header | ||
1349 | * after writeback. Only unmap it when EOP is reached | ||
1350 | */ | ||
1351 | if (!IXGBE_CB(skb)->head) { | ||
1352 | IXGBE_CB(skb)->delay_unmap = true; | ||
1353 | IXGBE_CB(skb)->dma = rx_buffer_info->dma; | ||
1354 | } else { | ||
1355 | skb = ixgbe_merge_active_tail(skb); | ||
1356 | dma_unmap_single(rx_ring->dev, | ||
1357 | rx_buffer_info->dma, | ||
1358 | rx_ring->rx_buf_len, | ||
1359 | DMA_FROM_DEVICE); | ||
1360 | } | ||
1361 | rx_buffer_info->dma = 0; | ||
1313 | } else { | 1362 | } else { |
1314 | /* assume packet split since header is unmapped */ | 1363 | /* assume packet split since header is unmapped */ |
1315 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | 1364 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); |
@@ -1337,6 +1386,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1337 | skb->truesize += PAGE_SIZE / 2; | 1386 | skb->truesize += PAGE_SIZE / 2; |
1338 | } | 1387 | } |
1339 | 1388 | ||
1389 | ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); | ||
1390 | |||
1340 | i++; | 1391 | i++; |
1341 | if (i == rx_ring->count) | 1392 | if (i == rx_ring->count) |
1342 | i = 0; | 1393 | i = 0; |
@@ -1345,55 +1396,50 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1345 | prefetch(next_rxd); | 1396 | prefetch(next_rxd); |
1346 | cleaned_count++; | 1397 | cleaned_count++; |
1347 | 1398 | ||
1348 | if (pkt_is_rsc) { | 1399 | if (!(staterr & IXGBE_RXD_STAT_EOP)) { |
1349 | u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> | 1400 | struct ixgbe_rx_buffer *next_buffer; |
1350 | IXGBE_RXDADV_NEXTP_SHIFT; | 1401 | u32 nextp; |
1402 | |||
1403 | if (IXGBE_CB(skb)->append_cnt) { | ||
1404 | nextp = staterr & IXGBE_RXDADV_NEXTP_MASK; | ||
1405 | nextp >>= IXGBE_RXDADV_NEXTP_SHIFT; | ||
1406 | } else { | ||
1407 | nextp = i; | ||
1408 | } | ||
1409 | |||
1351 | next_buffer = &rx_ring->rx_buffer_info[nextp]; | 1410 | next_buffer = &rx_ring->rx_buffer_info[nextp]; |
1352 | } else { | ||
1353 | next_buffer = &rx_ring->rx_buffer_info[i]; | ||
1354 | } | ||
1355 | 1411 | ||
1356 | if (!(staterr & IXGBE_RXD_STAT_EOP)) { | ||
1357 | if (ring_is_ps_enabled(rx_ring)) { | 1412 | if (ring_is_ps_enabled(rx_ring)) { |
1358 | rx_buffer_info->skb = next_buffer->skb; | 1413 | rx_buffer_info->skb = next_buffer->skb; |
1359 | rx_buffer_info->dma = next_buffer->dma; | 1414 | rx_buffer_info->dma = next_buffer->dma; |
1360 | next_buffer->skb = skb; | 1415 | next_buffer->skb = skb; |
1361 | next_buffer->dma = 0; | 1416 | next_buffer->dma = 0; |
1362 | } else { | 1417 | } else { |
1363 | skb->next = next_buffer->skb; | 1418 | struct sk_buff *next_skb = next_buffer->skb; |
1364 | skb->next->prev = skb; | 1419 | ixgbe_add_active_tail(skb, next_skb); |
1420 | IXGBE_CB(next_skb)->head = skb; | ||
1365 | } | 1421 | } |
1366 | rx_ring->rx_stats.non_eop_descs++; | 1422 | rx_ring->rx_stats.non_eop_descs++; |
1367 | goto next_desc; | 1423 | goto next_desc; |
1368 | } | 1424 | } |
1369 | 1425 | ||
1370 | if (skb->prev) { | 1426 | dma_unmap_single(rx_ring->dev, |
1371 | skb = ixgbe_transform_rsc_queue(skb); | 1427 | IXGBE_CB(skb)->dma, |
1428 | rx_ring->rx_buf_len, | ||
1429 | DMA_FROM_DEVICE); | ||
1430 | IXGBE_CB(skb)->dma = 0; | ||
1431 | IXGBE_CB(skb)->delay_unmap = false; | ||
1432 | |||
1433 | if (ixgbe_close_active_frag_list(skb) && | ||
1434 | !IXGBE_CB(skb)->append_cnt) { | ||
1372 | /* if we got here without RSC the packet is invalid */ | 1435 | /* if we got here without RSC the packet is invalid */ |
1373 | if (!pkt_is_rsc) { | 1436 | dev_kfree_skb_any(skb); |
1374 | __pskb_trim(skb, 0); | 1437 | goto next_desc; |
1375 | rx_buffer_info->skb = skb; | ||
1376 | goto next_desc; | ||
1377 | } | ||
1378 | } | 1438 | } |
1379 | 1439 | ||
1380 | if (ring_is_rsc_enabled(rx_ring)) { | 1440 | if (IXGBE_CB(skb)->append_cnt) { |
1381 | if (IXGBE_RSC_CB(skb)->delay_unmap) { | 1441 | rx_ring->rx_stats.rsc_count += |
1382 | dma_unmap_single(rx_ring->dev, | 1442 | IXGBE_CB(skb)->append_cnt; |
1383 | IXGBE_RSC_CB(skb)->dma, | ||
1384 | rx_ring->rx_buf_len, | ||
1385 | DMA_FROM_DEVICE); | ||
1386 | IXGBE_RSC_CB(skb)->dma = 0; | ||
1387 | IXGBE_RSC_CB(skb)->delay_unmap = false; | ||
1388 | } | ||
1389 | } | ||
1390 | if (pkt_is_rsc) { | ||
1391 | if (ring_is_ps_enabled(rx_ring)) | ||
1392 | rx_ring->rx_stats.rsc_count += | ||
1393 | skb_shinfo(skb)->nr_frags; | ||
1394 | else | ||
1395 | rx_ring->rx_stats.rsc_count += | ||
1396 | IXGBE_RSC_CB(skb)->skb_cnt; | ||
1397 | rx_ring->rx_stats.rsc_flush++; | 1443 | rx_ring->rx_stats.rsc_flush++; |
1398 | } | 1444 | } |
1399 | 1445 | ||
@@ -3881,19 +3927,18 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) | |||
3881 | if (rx_buffer_info->skb) { | 3927 | if (rx_buffer_info->skb) { |
3882 | struct sk_buff *skb = rx_buffer_info->skb; | 3928 | struct sk_buff *skb = rx_buffer_info->skb; |
3883 | rx_buffer_info->skb = NULL; | 3929 | rx_buffer_info->skb = NULL; |
3884 | do { | 3930 | /* We need to clean up RSC frag lists */ |
3885 | struct sk_buff *this = skb; | 3931 | skb = ixgbe_merge_active_tail(skb); |
3886 | if (IXGBE_RSC_CB(this)->delay_unmap) { | 3932 | ixgbe_close_active_frag_list(skb); |
3887 | dma_unmap_single(dev, | 3933 | if (IXGBE_CB(skb)->delay_unmap) { |
3888 | IXGBE_RSC_CB(this)->dma, | 3934 | dma_unmap_single(dev, |
3889 | rx_ring->rx_buf_len, | 3935 | IXGBE_CB(skb)->dma, |
3890 | DMA_FROM_DEVICE); | 3936 | rx_ring->rx_buf_len, |
3891 | IXGBE_RSC_CB(this)->dma = 0; | 3937 | DMA_FROM_DEVICE); |
3892 | IXGBE_RSC_CB(skb)->delay_unmap = false; | 3938 | IXGBE_CB(skb)->dma = 0; |
3893 | } | 3939 | IXGBE_CB(skb)->delay_unmap = false; |
3894 | skb = skb->prev; | 3940 | } |
3895 | dev_kfree_skb(this); | 3941 | dev_kfree_skb(skb); |
3896 | } while (skb); | ||
3897 | } | 3942 | } |
3898 | if (!rx_buffer_info->page) | 3943 | if (!rx_buffer_info->page) |
3899 | continue; | 3944 | continue; |