diff options
author | Emil Tantilov <emil.s.tantilov@intel.com> | 2014-11-07 20:39:20 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2014-11-20 17:21:31 -0500 |
commit | ec62fe264110a021336de20e400bc778a4111f60 (patch) | |
tree | c8c5d77f114aa87c375dd2b3b5c240caf160ac4f /drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |
parent | bafa578fdfb2e9861dcaf7d9863e1265aff226c9 (diff) |
ixgbevf: Test Rx status bits directly out of the descriptor
Instead of keeping a local copy of the status bits from the descriptor
we can just read them directly - this is accomplished with the addition
of ixgbevf_test_staterr().
In addition instead of doing a byteswap on the status bits value, we
can byteswap the constant values we are testing since that can be done
at compile time which should help to improve performance on big-endian
systems.
CC: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 59 |
1 files changed, 26 insertions, 33 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index deda74d24075..19062dcf1e80 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -331,15 +331,14 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, | |||
331 | * ixgbevf_receive_skb - Send a completed packet up the stack | 331 | * ixgbevf_receive_skb - Send a completed packet up the stack |
332 | * @q_vector: structure containing interrupt and ring information | 332 | * @q_vector: structure containing interrupt and ring information |
333 | * @skb: packet to send up | 333 | * @skb: packet to send up |
334 | * @status: hardware indication of status of receive | ||
335 | * @rx_desc: rx descriptor | 334 | * @rx_desc: rx descriptor |
336 | **/ | 335 | **/ |
337 | static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, | 336 | static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, |
338 | struct sk_buff *skb, u8 status, | 337 | struct sk_buff *skb, |
339 | union ixgbe_adv_rx_desc *rx_desc) | 338 | union ixgbe_adv_rx_desc *rx_desc) |
340 | { | 339 | { |
341 | struct ixgbevf_adapter *adapter = q_vector->adapter; | 340 | struct ixgbevf_adapter *adapter = q_vector->adapter; |
342 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); | 341 | bool is_vlan = !!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP); |
343 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); | 342 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); |
344 | 343 | ||
345 | if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) | 344 | if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) |
@@ -355,11 +354,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, | |||
355 | * ixgbevf_rx_skb - Helper function to determine proper Rx method | 354 | * ixgbevf_rx_skb - Helper function to determine proper Rx method |
356 | * @q_vector: structure containing interrupt and ring information | 355 | * @q_vector: structure containing interrupt and ring information |
357 | * @skb: packet to send up | 356 | * @skb: packet to send up |
358 | * @status: hardware indication of status of receive | ||
359 | * @rx_desc: rx descriptor | 357 | * @rx_desc: rx descriptor |
360 | **/ | 358 | **/ |
361 | static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, | 359 | static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, |
362 | struct sk_buff *skb, u8 status, | 360 | struct sk_buff *skb, |
363 | union ixgbe_adv_rx_desc *rx_desc) | 361 | union ixgbe_adv_rx_desc *rx_desc) |
364 | { | 362 | { |
365 | #ifdef CONFIG_NET_RX_BUSY_POLL | 363 | #ifdef CONFIG_NET_RX_BUSY_POLL |
@@ -372,17 +370,17 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, | |||
372 | } | 370 | } |
373 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | 371 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
374 | 372 | ||
375 | ixgbevf_receive_skb(q_vector, skb, status, rx_desc); | 373 | ixgbevf_receive_skb(q_vector, skb, rx_desc); |
376 | } | 374 | } |
377 | 375 | ||
378 | /** | 376 | /* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum |
379 | * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum | 377 | * @ring: structure containig ring specific data |
380 | * @ring: pointer to Rx descriptor ring structure | 378 | * @rx_desc: current Rx descriptor being processed |
381 | * @status_err: hardware indication of status of receive | ||
382 | * @skb: skb currently being received and modified | 379 | * @skb: skb currently being received and modified |
383 | **/ | 380 | */ |
384 | static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, | 381 | static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, |
385 | u32 status_err, struct sk_buff *skb) | 382 | union ixgbe_adv_rx_desc *rx_desc, |
383 | struct sk_buff *skb) | ||
386 | { | 384 | { |
387 | skb_checksum_none_assert(skb); | 385 | skb_checksum_none_assert(skb); |
388 | 386 | ||
@@ -391,16 +389,16 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, | |||
391 | return; | 389 | return; |
392 | 390 | ||
393 | /* if IP and error */ | 391 | /* if IP and error */ |
394 | if ((status_err & IXGBE_RXD_STAT_IPCS) && | 392 | if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && |
395 | (status_err & IXGBE_RXDADV_ERR_IPE)) { | 393 | ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { |
396 | ring->rx_stats.csum_err++; | 394 | ring->rx_stats.csum_err++; |
397 | return; | 395 | return; |
398 | } | 396 | } |
399 | 397 | ||
400 | if (!(status_err & IXGBE_RXD_STAT_L4CS)) | 398 | if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) |
401 | return; | 399 | return; |
402 | 400 | ||
403 | if (status_err & IXGBE_RXDADV_ERR_TCPE) { | 401 | if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { |
404 | ring->rx_stats.csum_err++; | 402 | ring->rx_stats.csum_err++; |
405 | return; | 403 | return; |
406 | } | 404 | } |
@@ -520,33 +518,29 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
520 | struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; | 518 | struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; |
521 | struct sk_buff *skb; | 519 | struct sk_buff *skb; |
522 | unsigned int i; | 520 | unsigned int i; |
523 | u32 len, staterr; | ||
524 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 521 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
525 | u16 cleaned_count = ixgbevf_desc_unused(rx_ring); | 522 | u16 cleaned_count = ixgbevf_desc_unused(rx_ring); |
526 | 523 | ||
527 | i = rx_ring->next_to_clean; | 524 | i = rx_ring->next_to_clean; |
528 | rx_desc = IXGBEVF_RX_DESC(rx_ring, i); | 525 | rx_desc = IXGBEVF_RX_DESC(rx_ring, i); |
529 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
530 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 526 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
531 | 527 | ||
532 | while (staterr & IXGBE_RXD_STAT_DD) { | 528 | while (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { |
533 | if (!budget) | 529 | if (!budget) |
534 | break; | 530 | break; |
535 | budget--; | 531 | budget--; |
536 | 532 | ||
537 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | 533 | rmb(); /* read descriptor and rx_buffer_info after status DD */ |
538 | len = le16_to_cpu(rx_desc->wb.upper.length); | 534 | |
539 | skb = rx_buffer_info->skb; | 535 | skb = rx_buffer_info->skb; |
540 | prefetch(skb->data - NET_IP_ALIGN); | 536 | prefetch(skb->data - NET_IP_ALIGN); |
541 | rx_buffer_info->skb = NULL; | 537 | rx_buffer_info->skb = NULL; |
542 | 538 | ||
543 | if (rx_buffer_info->dma) { | 539 | dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, |
544 | dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, | 540 | rx_ring->rx_buf_len, |
545 | rx_ring->rx_buf_len, | 541 | DMA_FROM_DEVICE); |
546 | DMA_FROM_DEVICE); | 542 | rx_buffer_info->dma = 0; |
547 | rx_buffer_info->dma = 0; | 543 | skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length)); |
548 | skb_put(skb, len); | ||
549 | } | ||
550 | 544 | ||
551 | i++; | 545 | i++; |
552 | if (i == rx_ring->count) | 546 | if (i == rx_ring->count) |
@@ -558,7 +552,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
558 | 552 | ||
559 | next_buffer = &rx_ring->rx_buffer_info[i]; | 553 | next_buffer = &rx_ring->rx_buffer_info[i]; |
560 | 554 | ||
561 | if (!(staterr & IXGBE_RXD_STAT_EOP)) { | 555 | if (!(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) { |
562 | skb->next = next_buffer->skb; | 556 | skb->next = next_buffer->skb; |
563 | IXGBE_CB(skb->next)->prev = skb; | 557 | IXGBE_CB(skb->next)->prev = skb; |
564 | rx_ring->rx_stats.non_eop_descs++; | 558 | rx_ring->rx_stats.non_eop_descs++; |
@@ -576,12 +570,13 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
576 | } | 570 | } |
577 | 571 | ||
578 | /* ERR_MASK will only have valid bits if EOP set */ | 572 | /* ERR_MASK will only have valid bits if EOP set */ |
579 | if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { | 573 | if (unlikely(ixgbevf_test_staterr(rx_desc, |
574 | IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { | ||
580 | dev_kfree_skb_irq(skb); | 575 | dev_kfree_skb_irq(skb); |
581 | goto next_desc; | 576 | goto next_desc; |
582 | } | 577 | } |
583 | 578 | ||
584 | ixgbevf_rx_checksum(rx_ring, staterr, skb); | 579 | ixgbevf_rx_checksum(rx_ring, rx_desc, skb); |
585 | 580 | ||
586 | /* probably a little skewed due to removing CRC */ | 581 | /* probably a little skewed due to removing CRC */ |
587 | total_rx_bytes += skb->len; | 582 | total_rx_bytes += skb->len; |
@@ -600,7 +595,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
600 | goto next_desc; | 595 | goto next_desc; |
601 | } | 596 | } |
602 | 597 | ||
603 | ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); | 598 | ixgbevf_rx_skb(q_vector, skb, rx_desc); |
604 | 599 | ||
605 | next_desc: | 600 | next_desc: |
606 | /* return some buffers to hardware, one at a time is too slow */ | 601 | /* return some buffers to hardware, one at a time is too slow */ |
@@ -612,8 +607,6 @@ next_desc: | |||
612 | /* use prefetched values */ | 607 | /* use prefetched values */ |
613 | rx_desc = next_rxd; | 608 | rx_desc = next_rxd; |
614 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 609 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
615 | |||
616 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
617 | } | 610 | } |
618 | 611 | ||
619 | rx_ring->next_to_clean = i; | 612 | rx_ring->next_to_clean = i; |