aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbevf
diff options
context:
space:
mode:
authorEmil Tantilov <emil.s.tantilov@intel.com>2014-11-07 20:39:15 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2014-11-20 17:21:30 -0500
commitbafa578fdfb2e9861dcaf7d9863e1265aff226c9 (patch)
treea195ab53f08bbe34109610616bc663b0d8732cb3 /drivers/net/ethernet/intel/ixgbevf
parentdaaf427c6ab392bedcd018e326b2ffa1e1110cd6 (diff)
ixgbevf: Update ixgbevf_alloc_rx_buffers to handle clearing of status bits
Instead of clearing the status bits in the cleanup it makes more sense to just clear the status bits on allocation. This way we can leave the Rx descriptor rings as a read only memory block until we actually have buffers to give back to the hardware. CC: Alexander Duyck <alexander.h.duyck@redhat.com> Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c132
1 files changed, 80 insertions, 52 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 030a219c85e3..deda74d24075 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -143,21 +143,6 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
143 return value; 143 return value;
144} 144}
145 145
146static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
147 u32 val)
148{
149 rx_ring->next_to_use = val;
150
151 /*
152 * Force memory writes to complete before letting h/w
153 * know there are new descriptors to fetch. (Only
154 * applicable for weak-ordered memory model archs,
155 * such as IA-64).
156 */
157 wmb();
158 ixgbevf_write_tail(rx_ring, val);
159}
160
161/** 146/**
162 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 147 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
163 * @adapter: pointer to adapter struct 148 * @adapter: pointer to adapter struct
@@ -424,52 +409,99 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
424 skb->ip_summed = CHECKSUM_UNNECESSARY; 409 skb->ip_summed = CHECKSUM_UNNECESSARY;
425} 410}
426 411
412static bool ixgbevf_alloc_mapped_skb(struct ixgbevf_ring *rx_ring,
413 struct ixgbevf_rx_buffer *bi)
414{
415 struct sk_buff *skb = bi->skb;
416 dma_addr_t dma = bi->dma;
417
418 if (unlikely(skb))
419 return true;
420
421 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
422 rx_ring->rx_buf_len);
423 if (unlikely(!skb)) {
424 rx_ring->rx_stats.alloc_rx_buff_failed++;
425 return false;
426 }
427
428 dma = dma_map_single(rx_ring->dev, skb->data,
429 rx_ring->rx_buf_len, DMA_FROM_DEVICE);
430
431 /* if mapping failed free memory back to system since
432 * there isn't much point in holding memory we can't use
433 */
434 if (dma_mapping_error(rx_ring->dev, dma)) {
435 dev_kfree_skb_any(skb);
436
437 rx_ring->rx_stats.alloc_rx_buff_failed++;
438 return false;
439 }
440
441 bi->skb = skb;
442 bi->dma = dma;
443
444 return true;
445}
446
427/** 447/**
428 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 448 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
429 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on 449 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
450 * @cleaned_count: number of buffers to replace
430 **/ 451 **/
431static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, 452static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
432 int cleaned_count) 453 u16 cleaned_count)
433{ 454{
434 union ixgbe_adv_rx_desc *rx_desc; 455 union ixgbe_adv_rx_desc *rx_desc;
435 struct ixgbevf_rx_buffer *bi; 456 struct ixgbevf_rx_buffer *bi;
436 unsigned int i = rx_ring->next_to_use; 457 unsigned int i = rx_ring->next_to_use;
437 458
438 while (cleaned_count--) { 459 /* nothing to do or no valid netdev defined */
439 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 460 if (!cleaned_count || !rx_ring->netdev)
440 bi = &rx_ring->rx_buffer_info[i]; 461 return;
441
442 if (!bi->skb) {
443 struct sk_buff *skb;
444 462
445 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 463 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
446 rx_ring->rx_buf_len); 464 bi = &rx_ring->rx_buffer_info[i];
447 if (!skb) 465 i -= rx_ring->count;
448 goto no_buffers;
449 466
450 bi->skb = skb; 467 do {
468 if (!ixgbevf_alloc_mapped_skb(rx_ring, bi))
469 break;
451 470
452 bi->dma = dma_map_single(rx_ring->dev, skb->data, 471 /* Refresh the desc even if pkt_addr didn't change
453 rx_ring->rx_buf_len, 472 * because each write-back erases this info.
454 DMA_FROM_DEVICE); 473 */
455 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
456 dev_kfree_skb(skb);
457 bi->skb = NULL;
458 dev_err(rx_ring->dev, "Rx DMA map failed\n");
459 break;
460 }
461 }
462 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 474 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
463 475
476 rx_desc++;
477 bi++;
464 i++; 478 i++;
465 if (i == rx_ring->count) 479 if (unlikely(!i)) {
466 i = 0; 480 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
467 } 481 bi = rx_ring->rx_buffer_info;
482 i -= rx_ring->count;
483 }
484
485 /* clear the hdr_addr for the next_to_use descriptor */
486 rx_desc->read.hdr_addr = 0;
487
488 cleaned_count--;
489 } while (cleaned_count);
468 490
469no_buffers: 491 i += rx_ring->count;
470 rx_ring->rx_stats.alloc_rx_buff_failed++; 492
471 if (rx_ring->next_to_use != i) 493 if (rx_ring->next_to_use != i) {
472 ixgbevf_release_rx_desc(rx_ring, i); 494 /* record the next descriptor to use */
495 rx_ring->next_to_use = i;
496
497 /* Force memory writes to complete before letting h/w
498 * know there are new descriptors to fetch. (Only
499 * applicable for weak-ordered memory model archs,
500 * such as IA-64).
501 */
502 wmb();
503 ixgbevf_write_tail(rx_ring, i);
504 }
473} 505}
474 506
475static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 507static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -489,8 +521,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
489 struct sk_buff *skb; 521 struct sk_buff *skb;
490 unsigned int i; 522 unsigned int i;
491 u32 len, staterr; 523 u32 len, staterr;
492 int cleaned_count = 0;
493 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 524 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
525 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
494 526
495 i = rx_ring->next_to_clean; 527 i = rx_ring->next_to_clean;
496 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 528 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
@@ -571,8 +603,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
571 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); 603 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
572 604
573next_desc: 605next_desc:
574 rx_desc->wb.upper.status_error = 0;
575
576 /* return some buffers to hardware, one at a time is too slow */ 606 /* return some buffers to hardware, one at a time is too slow */
577 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 607 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
578 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 608 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -587,11 +617,6 @@ next_desc:
587 } 617 }
588 618
589 rx_ring->next_to_clean = i; 619 rx_ring->next_to_clean = i;
590 cleaned_count = ixgbevf_desc_unused(rx_ring);
591
592 if (cleaned_count)
593 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
594
595 u64_stats_update_begin(&rx_ring->syncp); 620 u64_stats_update_begin(&rx_ring->syncp);
596 rx_ring->stats.packets += total_rx_packets; 621 rx_ring->stats.packets += total_rx_packets;
597 rx_ring->stats.bytes += total_rx_bytes; 622 rx_ring->stats.bytes += total_rx_bytes;
@@ -599,6 +624,9 @@ next_desc:
599 q_vector->rx.total_packets += total_rx_packets; 624 q_vector->rx.total_packets += total_rx_packets;
600 q_vector->rx.total_bytes += total_rx_bytes; 625 q_vector->rx.total_bytes += total_rx_bytes;
601 626
627 if (cleaned_count)
628 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
629
602 return total_rx_packets; 630 return total_rx_packets;
603} 631}
604 632