aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIvan Vecera <ivecera@redhat.com>2016-01-15 07:45:28 -0500
committerDavid S. Miller <davem@davemloft.net>2016-01-15 21:49:25 -0500
commit6c3f5aef1159a278b54642ebc0bbb5cdab7630cf (patch)
treeb65167bb24f1b50d78ed06008e448205ddf2ed33
parent4e5448a31d73d0e944b7adb9049438a09bc332cb (diff)
bna: fix Rx data corruption with VLAN stripping enabled and MTU > 4096
The multi-buffer Rx mode implemented in the past introduced a regression that causes a data corruption for received VLAN traffic when VLAN tag stripping is enabled. This mode is supported only be newer chipsets (1860) and is enabled when MTU > 4096. When this mode is enabled Rx queue contains buffers with fixed size 2048 bytes. Any incoming packet larger than 2048 is divided into multiple buffers that are attached as skb frags in polling routine. The driver assumes that all buffers associated with a packet except the last one is fully used (e.g. packet with size 5000 are divided into 3 buffers 2048 + 2048 + 904 bytes) and ignores true size reported in completions. This assumption is usually true but not when VLAN packet is received and VLAN tag stripping is enabled. In this case the first buffer is 2044 bytes long but as the driver always assumes 2048 bytes then 4 extra random bytes are included between the first and the second frag. Additionally the driver sets checksum as correct so the packet is properly processed by the core. The driver needs to check the size of used space in each Rx buffer reported by FW and not blindly use the fixed value. Cc: Rasesh Mody <rasesh.mody@qlogic.com> Signed-off-by: Ivan Vecera <ivecera@redhat.com> Reviewed-by: Rasesh Mody <rasesh.mody@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 21a0cfc3e7ec..771cc267f217 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -542,39 +542,50 @@ bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
542} 542}
543 543
544static void 544static void
545bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, 545bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
546 u32 sop_ci, u32 nvecs, u32 last_fraglen)
547{ 546{
547 struct bna_rcb *rcb;
548 struct bnad *bnad; 548 struct bnad *bnad;
549 u32 ci, vec, len, totlen = 0;
550 struct bnad_rx_unmap_q *unmap_q; 549 struct bnad_rx_unmap_q *unmap_q;
551 struct bnad_rx_unmap *unmap; 550 struct bna_cq_entry *cq, *cmpl;
551 u32 ci, pi, totlen = 0;
552
553 cq = ccb->sw_q;
554 pi = ccb->producer_index;
555 cmpl = &cq[pi];
552 556
557 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
553 unmap_q = rcb->unmap_q; 558 unmap_q = rcb->unmap_q;
554 bnad = rcb->bnad; 559 bnad = rcb->bnad;
560 ci = rcb->consumer_index;
555 561
556 /* prefetch header */ 562 /* prefetch header */
557 prefetch(page_address(unmap_q->unmap[sop_ci].page) + 563 prefetch(page_address(unmap_q->unmap[ci].page) +
558 unmap_q->unmap[sop_ci].page_offset); 564 unmap_q->unmap[ci].page_offset);
565
566 while (nvecs--) {
567 struct bnad_rx_unmap *unmap;
568 u32 len;
559 569
560 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
561 unmap = &unmap_q->unmap[ci]; 570 unmap = &unmap_q->unmap[ci];
562 BNA_QE_INDX_INC(ci, rcb->q_depth); 571 BNA_QE_INDX_INC(ci, rcb->q_depth);
563 572
564 dma_unmap_page(&bnad->pcidev->dev, 573 dma_unmap_page(&bnad->pcidev->dev,
565 dma_unmap_addr(&unmap->vector, dma_addr), 574 dma_unmap_addr(&unmap->vector, dma_addr),
566 unmap->vector.len, DMA_FROM_DEVICE); 575 unmap->vector.len, DMA_FROM_DEVICE);
567 576
568 len = (vec == nvecs) ? 577 len = ntohs(cmpl->length);
569 last_fraglen : unmap->vector.len;
570 skb->truesize += unmap->vector.len; 578 skb->truesize += unmap->vector.len;
571 totlen += len; 579 totlen += len;
572 580
573 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 581 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
574 unmap->page, unmap->page_offset, len); 582 unmap->page, unmap->page_offset, len);
575 583
576 unmap->page = NULL; 584 unmap->page = NULL;
577 unmap->vector.len = 0; 585 unmap->vector.len = 0;
586
587 BNA_QE_INDX_INC(pi, ccb->q_depth);
588 cmpl = &cq[pi];
578 } 589 }
579 590
580 skb->len += totlen; 591 skb->len += totlen;
@@ -704,7 +715,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
704 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) 715 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
705 bnad_cq_setup_skb(bnad, skb, unmap, len); 716 bnad_cq_setup_skb(bnad, skb, unmap, len);
706 else 717 else
707 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); 718 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
708 719
709 rcb->rxq->rx_packets++; 720 rcb->rxq->rx_packets++;
710 rcb->rxq->rx_bytes += totlen; 721 rcb->rxq->rx_bytes += totlen;