diff options
author | Scott Feldman <scofeldm@cisco.com> | 2009-09-03 13:02:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-09-03 23:19:11 -0400 |
commit | ea0d7d9181b0831f1b570b02fa02a15f25520c12 (patch) | |
tree | 8ec141dc43dd2a8999a3dd69231854e90e11f309 /drivers/net/enic | |
parent | 4badc385d1a9e140ad0992537237fc22211adad0 (diff) |
enic: bug fix: split TSO fragments larger than 16K into multiple descs
enic WQ desc supports a maximum 16K buf size, so split any send fragments
larger than 16K into several descs.
Signed-off-by: Scott Feldman <scofeldm@cisco.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/enic')
-rw-r--r-- | drivers/net/enic/enic_main.c | 87 |
1 files changed, 69 insertions, 18 deletions
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 58cae6e6a59c..bc5cb225ddac 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c | |||
@@ -44,10 +44,15 @@ | |||
44 | #include "enic.h" | 44 | #include "enic.h" |
45 | 45 | ||
46 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | 46 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) |
47 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) | ||
48 | #define MAX_TSO (1 << 16) | ||
49 | #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) | ||
50 | |||
51 | #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ | ||
47 | 52 | ||
48 | /* Supported devices */ | 53 | /* Supported devices */ |
49 | static struct pci_device_id enic_id_table[] = { | 54 | static struct pci_device_id enic_id_table[] = { |
50 | { PCI_VDEVICE(CISCO, 0x0043) }, | 55 | { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, |
51 | { 0, } /* end of table */ | 56 | { 0, } /* end of table */ |
52 | }; | 57 | }; |
53 | 58 | ||
@@ -310,7 +315,8 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |||
310 | opaque); | 315 | opaque); |
311 | 316 | ||
312 | if (netif_queue_stopped(enic->netdev) && | 317 | if (netif_queue_stopped(enic->netdev) && |
313 | vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1) | 318 | vnic_wq_desc_avail(&enic->wq[q_number]) >= |
319 | (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) | ||
314 | netif_wake_queue(enic->netdev); | 320 | netif_wake_queue(enic->netdev); |
315 | 321 | ||
316 | spin_unlock(&enic->wq_lock[q_number]); | 322 | spin_unlock(&enic->wq_lock[q_number]); |
@@ -525,7 +531,11 @@ static inline void enic_queue_wq_skb_vlan(struct enic *enic, | |||
525 | unsigned int len_left = skb->len - head_len; | 531 | unsigned int len_left = skb->len - head_len; |
526 | int eop = (len_left == 0); | 532 | int eop = (len_left == 0); |
527 | 533 | ||
528 | /* Queue the main skb fragment */ | 534 | /* Queue the main skb fragment. The fragments are no larger |
535 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | ||
536 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | ||
537 | * per fragment is queued. | ||
538 | */ | ||
529 | enic_queue_wq_desc(wq, skb, | 539 | enic_queue_wq_desc(wq, skb, |
530 | pci_map_single(enic->pdev, skb->data, | 540 | pci_map_single(enic->pdev, skb->data, |
531 | head_len, PCI_DMA_TODEVICE), | 541 | head_len, PCI_DMA_TODEVICE), |
@@ -547,7 +557,11 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | |||
547 | unsigned int csum_offset = hdr_len + skb->csum_offset; | 557 | unsigned int csum_offset = hdr_len + skb->csum_offset; |
548 | int eop = (len_left == 0); | 558 | int eop = (len_left == 0); |
549 | 559 | ||
550 | /* Queue the main skb fragment */ | 560 | /* Queue the main skb fragment. The fragments are no larger |
561 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | ||
562 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | ||
563 | * per fragment is queued. | ||
564 | */ | ||
551 | enic_queue_wq_desc_csum_l4(wq, skb, | 565 | enic_queue_wq_desc_csum_l4(wq, skb, |
552 | pci_map_single(enic->pdev, skb->data, | 566 | pci_map_single(enic->pdev, skb->data, |
553 | head_len, PCI_DMA_TODEVICE), | 567 | head_len, PCI_DMA_TODEVICE), |
@@ -565,10 +579,14 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, | |||
565 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, | 579 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, |
566 | int vlan_tag_insert, unsigned int vlan_tag) | 580 | int vlan_tag_insert, unsigned int vlan_tag) |
567 | { | 581 | { |
568 | unsigned int head_len = skb_headlen(skb); | 582 | unsigned int frag_len_left = skb_headlen(skb); |
569 | unsigned int len_left = skb->len - head_len; | 583 | unsigned int len_left = skb->len - frag_len_left; |
570 | unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 584 | unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
571 | int eop = (len_left == 0); | 585 | int eop = (len_left == 0); |
586 | unsigned int len; | ||
587 | dma_addr_t dma_addr; | ||
588 | unsigned int offset = 0; | ||
589 | skb_frag_t *frag; | ||
572 | 590 | ||
573 | /* Preload TCP csum field with IP pseudo hdr calculated | 591 | /* Preload TCP csum field with IP pseudo hdr calculated |
574 | * with IP length set to zero. HW will later add in length | 592 | * with IP length set to zero. HW will later add in length |
@@ -584,17 +602,49 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, | |||
584 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | 602 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
585 | } | 603 | } |
586 | 604 | ||
587 | /* Queue the main skb fragment */ | 605 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors |
588 | enic_queue_wq_desc_tso(wq, skb, | 606 | * for the main skb fragment |
589 | pci_map_single(enic->pdev, skb->data, | 607 | */ |
590 | head_len, PCI_DMA_TODEVICE), | 608 | while (frag_len_left) { |
591 | head_len, | 609 | len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); |
592 | mss, hdr_len, | 610 | dma_addr = pci_map_single(enic->pdev, skb->data + offset, |
593 | vlan_tag_insert, vlan_tag, | 611 | len, PCI_DMA_TODEVICE); |
594 | eop); | 612 | enic_queue_wq_desc_tso(wq, skb, |
613 | dma_addr, | ||
614 | len, | ||
615 | mss, hdr_len, | ||
616 | vlan_tag_insert, vlan_tag, | ||
617 | eop && (len == frag_len_left)); | ||
618 | frag_len_left -= len; | ||
619 | offset += len; | ||
620 | } | ||
595 | 621 | ||
596 | if (!eop) | 622 | if (eop) |
597 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | 623 | return; |
624 | |||
625 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors | ||
626 | * for additional data fragments | ||
627 | */ | ||
628 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | ||
629 | len_left -= frag->size; | ||
630 | frag_len_left = frag->size; | ||
631 | offset = frag->page_offset; | ||
632 | |||
633 | while (frag_len_left) { | ||
634 | len = min(frag_len_left, | ||
635 | (unsigned int)WQ_ENET_MAX_DESC_LEN); | ||
636 | dma_addr = pci_map_page(enic->pdev, frag->page, | ||
637 | offset, len, | ||
638 | PCI_DMA_TODEVICE); | ||
639 | enic_queue_wq_desc_cont(wq, skb, | ||
640 | dma_addr, | ||
641 | len, | ||
642 | (len_left == 0) && | ||
643 | (len == frag_len_left)); /* EOP? */ | ||
644 | frag_len_left -= len; | ||
645 | offset += len; | ||
646 | } | ||
647 | } | ||
598 | } | 648 | } |
599 | 649 | ||
600 | static inline void enic_queue_wq_skb(struct enic *enic, | 650 | static inline void enic_queue_wq_skb(struct enic *enic, |
@@ -648,7 +698,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, | |||
648 | 698 | ||
649 | spin_lock_irqsave(&enic->wq_lock[0], flags); | 699 | spin_lock_irqsave(&enic->wq_lock[0], flags); |
650 | 700 | ||
651 | if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) { | 701 | if (vnic_wq_desc_avail(wq) < |
702 | skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { | ||
652 | netif_stop_queue(netdev); | 703 | netif_stop_queue(netdev); |
653 | /* This is a hard error, log it */ | 704 | /* This is a hard error, log it */ |
654 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when " | 705 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when " |
@@ -659,7 +710,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, | |||
659 | 710 | ||
660 | enic_queue_wq_skb(enic, wq, skb); | 711 | enic_queue_wq_skb(enic, wq, skb); |
661 | 712 | ||
662 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1) | 713 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) |
663 | netif_stop_queue(netdev); | 714 | netif_stop_queue(netdev); |
664 | 715 | ||
665 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | 716 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); |