aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c112
1 files changed, 46 insertions, 66 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 1b0861d73ab7..c6480be0bc1f 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved. 2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -351,7 +351,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
351 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 351 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
352 q->buf_size, PCI_DMA_FROMDEVICE); 352 q->buf_size, PCI_DMA_FROMDEVICE);
353 if (q->use_pages) { 353 if (q->use_pages) {
354 put_page(d->pg_chunk.page); 354 if (d->pg_chunk.page)
355 put_page(d->pg_chunk.page);
355 d->pg_chunk.page = NULL; 356 d->pg_chunk.page = NULL;
356 } else { 357 } else {
357 kfree_skb(d->skb); 358 kfree_skb(d->skb);
@@ -583,7 +584,7 @@ static void t3_reset_qset(struct sge_qset *q)
583 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); 584 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
584 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
585 q->txq_stopped = 0; 586 q->txq_stopped = 0;
586 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); 587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
587 kfree(q->lro_frag_tbl); 588 kfree(q->lro_frag_tbl);
588 q->lro_nfrags = q->lro_frag_len = 0; 589 q->lro_nfrags = q->lro_frag_len = 0;
589} 590}
@@ -603,9 +604,6 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
603 int i; 604 int i;
604 struct pci_dev *pdev = adapter->pdev; 605 struct pci_dev *pdev = adapter->pdev;
605 606
606 if (q->tx_reclaim_timer.function)
607 del_timer_sync(&q->tx_reclaim_timer);
608
609 for (i = 0; i < SGE_RXQ_PER_SET; ++i) 607 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
610 if (q->fl[i].desc) { 608 if (q->fl[i].desc) {
611 spin_lock_irq(&adapter->sge.reg_lock); 609 spin_lock_irq(&adapter->sge.reg_lock);
@@ -1704,16 +1702,15 @@ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1704 */ 1702 */
1705static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) 1703static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1706{ 1704{
1707 skb->next = skb->prev = NULL; 1705 int was_empty = skb_queue_empty(&q->rx_queue);
1708 if (q->rx_tail) 1706
1709 q->rx_tail->next = skb; 1707 __skb_queue_tail(&q->rx_queue, skb);
1710 else { 1708
1709 if (was_empty) {
1711 struct sge_qset *qs = rspq_to_qset(q); 1710 struct sge_qset *qs = rspq_to_qset(q);
1712 1711
1713 napi_schedule(&qs->napi); 1712 napi_schedule(&qs->napi);
1714 q->rx_head = skb;
1715 } 1713 }
1716 q->rx_tail = skb;
1717} 1714}
1718 1715
1719/** 1716/**
@@ -1754,26 +1751,29 @@ static int ofld_poll(struct napi_struct *napi, int budget)
1754 int work_done = 0; 1751 int work_done = 0;
1755 1752
1756 while (work_done < budget) { 1753 while (work_done < budget) {
1757 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; 1754 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1755 struct sk_buff_head queue;
1758 int ngathered; 1756 int ngathered;
1759 1757
1760 spin_lock_irq(&q->lock); 1758 spin_lock_irq(&q->lock);
1761 head = q->rx_head; 1759 __skb_queue_head_init(&queue);
1762 if (!head) { 1760 skb_queue_splice_init(&q->rx_queue, &queue);
1761 if (skb_queue_empty(&queue)) {
1763 napi_complete(napi); 1762 napi_complete(napi);
1764 spin_unlock_irq(&q->lock); 1763 spin_unlock_irq(&q->lock);
1765 return work_done; 1764 return work_done;
1766 } 1765 }
1767
1768 tail = q->rx_tail;
1769 q->rx_head = q->rx_tail = NULL;
1770 spin_unlock_irq(&q->lock); 1766 spin_unlock_irq(&q->lock);
1771 1767
1772 for (ngathered = 0; work_done < budget && head; work_done++) { 1768 ngathered = 0;
1773 prefetch(head->data); 1769 skb_queue_walk_safe(&queue, skb, tmp) {
1774 skbs[ngathered] = head; 1770 if (work_done >= budget)
1775 head = head->next; 1771 break;
1776 skbs[ngathered]->next = NULL; 1772 work_done++;
1773
1774 __skb_unlink(skb, &queue);
1775 prefetch(skb->data);
1776 skbs[ngathered] = skb;
1777 if (++ngathered == RX_BUNDLE_SIZE) { 1777 if (++ngathered == RX_BUNDLE_SIZE) {
1778 q->offload_bundles++; 1778 q->offload_bundles++;
1779 adapter->tdev.recv(&adapter->tdev, skbs, 1779 adapter->tdev.recv(&adapter->tdev, skbs,
@@ -1781,12 +1781,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
1781 ngathered = 0; 1781 ngathered = 0;
1782 } 1782 }
1783 } 1783 }
1784 if (head) { /* splice remaining packets back onto Rx queue */ 1784 if (!skb_queue_empty(&queue)) {
1785 /* splice remaining packets back onto Rx queue */
1785 spin_lock_irq(&q->lock); 1786 spin_lock_irq(&q->lock);
1786 tail->next = q->rx_head; 1787 skb_queue_splice(&queue, &q->rx_queue);
1787 if (!q->rx_head)
1788 q->rx_tail = tail;
1789 q->rx_head = head;
1790 spin_unlock_irq(&q->lock); 1788 spin_unlock_irq(&q->lock);
1791 } 1789 }
1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); 1790 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
@@ -1937,38 +1935,6 @@ static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); 1935 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1938} 1936}
1939 1937
1940#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
1941 TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
1942 TCP_FLAG_SYN | TCP_FLAG_FIN)
1943#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
1944 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
1945
1946/**
1947 * lro_segment_ok - check if a TCP segment is eligible for LRO
1948 * @tcph: the TCP header of the packet
1949 *
1950 * Returns true if a TCP packet is eligible for LRO. This requires that
1951 * the packet have only the ACK flag set and no TCP options besides
1952 * time stamps.
1953 */
1954static inline int lro_segment_ok(const struct tcphdr *tcph)
1955{
1956 int optlen;
1957
1958 if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
1959 return 0;
1960
1961 optlen = (tcph->doff << 2) - sizeof(*tcph);
1962 if (optlen) {
1963 const u32 *opt = (const u32 *)(tcph + 1);
1964
1965 if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
1966 *opt != htonl(TSTAMP_WORD) || !opt[2])
1967 return 0;
1968 }
1969 return 1;
1970}
1971
1972static int t3_get_lro_header(void **eh, void **iph, void **tcph, 1938static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1973 u64 *hdr_flags, void *priv) 1939 u64 *hdr_flags, void *priv)
1974{ 1940{
@@ -1981,9 +1947,6 @@ static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1981 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); 1947 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1982 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); 1948 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1983 1949
1984 if (!lro_segment_ok(*tcph))
1985 return -1;
1986
1987 *hdr_flags = LRO_IPV4 | LRO_TCP; 1950 *hdr_flags = LRO_IPV4 | LRO_TCP;
1988 return 0; 1951 return 0;
1989} 1952}
@@ -2878,9 +2841,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2878 struct net_lro_mgr *lro_mgr = &q->lro_mgr; 2841 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2879 2842
2880 init_qset_cntxt(q, id); 2843 init_qset_cntxt(q, id);
2881 init_timer(&q->tx_reclaim_timer); 2844 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
2882 q->tx_reclaim_timer.data = (unsigned long)q;
2883 q->tx_reclaim_timer.function = sge_timer_cb;
2884 2845
2885 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, 2846 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2886 sizeof(struct rx_desc), 2847 sizeof(struct rx_desc),
@@ -2934,6 +2895,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2934 q->rspq.gen = 1; 2895 q->rspq.gen = 1;
2935 q->rspq.size = p->rspq_size; 2896 q->rspq.size = p->rspq_size;
2936 spin_lock_init(&q->rspq.lock); 2897 spin_lock_init(&q->rspq.lock);
2898 skb_queue_head_init(&q->rspq.rx_queue);
2937 2899
2938 q->txq[TXQ_ETH].stop_thres = nports * 2900 q->txq[TXQ_ETH].stop_thres = nports *
2939 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2901 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
@@ -3043,6 +3005,24 @@ err:
3043} 3005}
3044 3006
3045/** 3007/**
3008 * t3_stop_sge_timers - stop SGE timer call backs
3009 * @adap: the adapter
3010 *
3011 * Stops each SGE queue set's timer call back
3012 */
3013void t3_stop_sge_timers(struct adapter *adap)
3014{
3015 int i;
3016
3017 for (i = 0; i < SGE_QSETS; ++i) {
3018 struct sge_qset *q = &adap->sge.qs[i];
3019
3020 if (q->tx_reclaim_timer.function)
3021 del_timer_sync(&q->tx_reclaim_timer);
3022 }
3023}
3024
3025/**
3046 * t3_free_sge_resources - free SGE resources 3026 * t3_free_sge_resources - free SGE resources
3047 * @adap: the adapter 3027 * @adap: the adapter
3048 * 3028 *