aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-09-22 04:29:52 -0400
committerDavid S. Miller <davem@davemloft.net>2008-09-22 04:29:52 -0400
commit147e70e62fdd5af6263106ad634b03c5154c1e56 (patch)
treee56fdeb0b035149e157952bffbf8c04b04f0d7a3
parent38783e671399b5405f1fd177d602c400a9577ae6 (diff)
cxgb3: Use SKB list interfaces instead of home-grown implementation.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/cxgb3/adapter.h3
-rw-r--r--drivers/net/cxgb3/l2t.c39
-rw-r--r--drivers/net/cxgb3/l2t.h3
-rw-r--r--drivers/net/cxgb3/sge.c45
4 files changed, 42 insertions, 48 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 271140433b09..4f5cc6987ec1 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -124,8 +124,7 @@ struct sge_rspq { /* state for an SGE response queue */
124 dma_addr_t phys_addr; /* physical address of the ring */ 124 dma_addr_t phys_addr; /* physical address of the ring */
125 unsigned int cntxt_id; /* SGE context id for the response q */ 125 unsigned int cntxt_id; /* SGE context id for the response q */
126 spinlock_t lock; /* guards response processing */ 126 spinlock_t lock; /* guards response processing */
127 struct sk_buff *rx_head; /* offload packet receive queue head */ 127 struct sk_buff_head rx_queue; /* offload packet receive queue */
128 struct sk_buff *rx_tail; /* offload packet receive queue tail */
129 struct sk_buff *pg_skb; /* used to build frag list in napi handler */ 128 struct sk_buff *pg_skb; /* used to build frag list in napi handler */
130 129
131 unsigned long offload_pkts; 130 unsigned long offload_pkts;
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 825e510bd9ed..b2c5314582aa 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -86,6 +86,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
86 struct l2t_entry *e) 86 struct l2t_entry *e)
87{ 87{
88 struct cpl_l2t_write_req *req; 88 struct cpl_l2t_write_req *req;
89 struct sk_buff *tmp;
89 90
90 if (!skb) { 91 if (!skb) {
91 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
@@ -103,13 +104,11 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
103 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
104 skb->priority = CPL_PRIORITY_CONTROL; 105 skb->priority = CPL_PRIORITY_CONTROL;
105 cxgb3_ofld_send(dev, skb); 106 cxgb3_ofld_send(dev, skb);
106 while (e->arpq_head) { 107
107 skb = e->arpq_head; 108 skb_queue_walk_safe(&e->arpq, skb, tmp) {
108 e->arpq_head = skb->next; 109 __skb_unlink(skb, &e->arpq);
109 skb->next = NULL;
110 cxgb3_ofld_send(dev, skb); 110 cxgb3_ofld_send(dev, skb);
111 } 111 }
112 e->arpq_tail = NULL;
113 e->state = L2T_STATE_VALID; 112 e->state = L2T_STATE_VALID;
114 113
115 return 0; 114 return 0;
@@ -121,12 +120,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
121 */ 120 */
122static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) 121static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
123{ 122{
124 skb->next = NULL; 123 __skb_queue_tail(&e->arpq, skb);
125 if (e->arpq_head)
126 e->arpq_tail->next = skb;
127 else
128 e->arpq_head = skb;
129 e->arpq_tail = skb;
130} 124}
131 125
132int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, 126int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
@@ -167,7 +161,7 @@ again:
167 break; 161 break;
168 162
169 spin_lock_bh(&e->lock); 163 spin_lock_bh(&e->lock);
170 if (e->arpq_head) 164 if (!skb_queue_empty(&e->arpq))
171 setup_l2e_send_pending(dev, skb, e); 165 setup_l2e_send_pending(dev, skb, e);
172 else /* we lost the race */ 166 else /* we lost the race */
173 __kfree_skb(skb); 167 __kfree_skb(skb);
@@ -357,14 +351,14 @@ EXPORT_SYMBOL(t3_l2t_get);
357 * XXX: maybe we should abandon the latter behavior and just require a failure 351 * XXX: maybe we should abandon the latter behavior and just require a failure
358 * handler. 352 * handler.
359 */ 353 */
360static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq) 354static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
361{ 355{
362 while (arpq) { 356 struct sk_buff *skb, *tmp;
363 struct sk_buff *skb = arpq; 357
358 skb_queue_walk_safe(arpq, skb, tmp) {
364 struct l2t_skb_cb *cb = L2T_SKB_CB(skb); 359 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
365 360
366 arpq = skb->next; 361 __skb_unlink(skb, arpq);
367 skb->next = NULL;
368 if (cb->arp_failure_handler) 362 if (cb->arp_failure_handler)
369 cb->arp_failure_handler(dev, skb); 363 cb->arp_failure_handler(dev, skb);
370 else 364 else
@@ -378,8 +372,8 @@ static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
378 */ 372 */
379void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) 373void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
380{ 374{
375 struct sk_buff_head arpq;
381 struct l2t_entry *e; 376 struct l2t_entry *e;
382 struct sk_buff *arpq = NULL;
383 struct l2t_data *d = L2DATA(dev); 377 struct l2t_data *d = L2DATA(dev);
384 u32 addr = *(u32 *) neigh->primary_key; 378 u32 addr = *(u32 *) neigh->primary_key;
385 int ifidx = neigh->dev->ifindex; 379 int ifidx = neigh->dev->ifindex;
@@ -395,6 +389,8 @@ void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
395 return; 389 return;
396 390
397found: 391found:
392 __skb_queue_head_init(&arpq);
393
398 read_unlock(&d->lock); 394 read_unlock(&d->lock);
399 if (atomic_read(&e->refcnt)) { 395 if (atomic_read(&e->refcnt)) {
400 if (neigh != e->neigh) 396 if (neigh != e->neigh)
@@ -402,8 +398,7 @@ found:
402 398
403 if (e->state == L2T_STATE_RESOLVING) { 399 if (e->state == L2T_STATE_RESOLVING) {
404 if (neigh->nud_state & NUD_FAILED) { 400 if (neigh->nud_state & NUD_FAILED) {
405 arpq = e->arpq_head; 401 skb_queue_splice_init(&e->arpq, &arpq);
406 e->arpq_head = e->arpq_tail = NULL;
407 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) 402 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
408 setup_l2e_send_pending(dev, NULL, e); 403 setup_l2e_send_pending(dev, NULL, e);
409 } else { 404 } else {
@@ -415,8 +410,8 @@ found:
415 } 410 }
416 spin_unlock_bh(&e->lock); 411 spin_unlock_bh(&e->lock);
417 412
418 if (arpq) 413 if (!skb_queue_empty(&arpq))
419 handle_failed_resolution(dev, arpq); 414 handle_failed_resolution(dev, &arpq);
420} 415}
421 416
422struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) 417struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index d79001336cfd..42ce65f76a87 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -64,8 +64,7 @@ struct l2t_entry {
64 struct neighbour *neigh; /* associated neighbour */ 64 struct neighbour *neigh; /* associated neighbour */
65 struct l2t_entry *first; /* start of hash chain */ 65 struct l2t_entry *first; /* start of hash chain */
66 struct l2t_entry *next; /* next l2t_entry on chain */ 66 struct l2t_entry *next; /* next l2t_entry on chain */
67 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ 67 struct sk_buff_head arpq; /* queue of packets awaiting resolution */
68 struct sk_buff *arpq_tail;
69 spinlock_t lock; 68 spinlock_t lock;
70 atomic_t refcnt; /* entry reference count */ 69 atomic_t refcnt; /* entry reference count */
71 u8 dmac[6]; /* neighbour's MAC address */ 70 u8 dmac[6]; /* neighbour's MAC address */
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 1b0861d73ab7..6990c0ddc854 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1704,16 +1704,15 @@ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1704 */ 1704 */
1705static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) 1705static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1706{ 1706{
1707 skb->next = skb->prev = NULL; 1707 int was_empty = skb_queue_empty(&q->rx_queue);
1708 if (q->rx_tail) 1708
1709 q->rx_tail->next = skb; 1709 __skb_queue_tail(&q->rx_queue, skb);
1710 else { 1710
1711 if (was_empty) {
1711 struct sge_qset *qs = rspq_to_qset(q); 1712 struct sge_qset *qs = rspq_to_qset(q);
1712 1713
1713 napi_schedule(&qs->napi); 1714 napi_schedule(&qs->napi);
1714 q->rx_head = skb;
1715 } 1715 }
1716 q->rx_tail = skb;
1717} 1716}
1718 1717
1719/** 1718/**
@@ -1754,26 +1753,29 @@ static int ofld_poll(struct napi_struct *napi, int budget)
1754 int work_done = 0; 1753 int work_done = 0;
1755 1754
1756 while (work_done < budget) { 1755 while (work_done < budget) {
1757 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; 1756 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1757 struct sk_buff_head queue;
1758 int ngathered; 1758 int ngathered;
1759 1759
1760 spin_lock_irq(&q->lock); 1760 spin_lock_irq(&q->lock);
1761 head = q->rx_head; 1761 __skb_queue_head_init(&queue);
1762 if (!head) { 1762 skb_queue_splice_init(&q->rx_queue, &queue);
1763 if (skb_queue_empty(&queue)) {
1763 napi_complete(napi); 1764 napi_complete(napi);
1764 spin_unlock_irq(&q->lock); 1765 spin_unlock_irq(&q->lock);
1765 return work_done; 1766 return work_done;
1766 } 1767 }
1767
1768 tail = q->rx_tail;
1769 q->rx_head = q->rx_tail = NULL;
1770 spin_unlock_irq(&q->lock); 1768 spin_unlock_irq(&q->lock);
1771 1769
1772 for (ngathered = 0; work_done < budget && head; work_done++) { 1770 ngathered = 0;
1773 prefetch(head->data); 1771 skb_queue_walk_safe(&queue, skb, tmp) {
1774 skbs[ngathered] = head; 1772 if (work_done >= budget)
1775 head = head->next; 1773 break;
1776 skbs[ngathered]->next = NULL; 1774 work_done++;
1775
1776 __skb_unlink(skb, &queue);
1777 prefetch(skb->data);
1778 skbs[ngathered] = skb;
1777 if (++ngathered == RX_BUNDLE_SIZE) { 1779 if (++ngathered == RX_BUNDLE_SIZE) {
1778 q->offload_bundles++; 1780 q->offload_bundles++;
1779 adapter->tdev.recv(&adapter->tdev, skbs, 1781 adapter->tdev.recv(&adapter->tdev, skbs,
@@ -1781,12 +1783,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
1781 ngathered = 0; 1783 ngathered = 0;
1782 } 1784 }
1783 } 1785 }
1784 if (head) { /* splice remaining packets back onto Rx queue */ 1786 if (!skb_queue_empty(&queue)) {
1787 /* splice remaining packets back onto Rx queue */
1785 spin_lock_irq(&q->lock); 1788 spin_lock_irq(&q->lock);
1786 tail->next = q->rx_head; 1789 skb_queue_splice(&queue, &q->rx_queue);
1787 if (!q->rx_head)
1788 q->rx_tail = tail;
1789 q->rx_head = head;
1790 spin_unlock_irq(&q->lock); 1790 spin_unlock_irq(&q->lock);
1791 } 1791 }
1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); 1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
@@ -2934,6 +2934,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2934 q->rspq.gen = 1; 2934 q->rspq.gen = 1;
2935 q->rspq.size = p->rspq_size; 2935 q->rspq.size = p->rspq_size;
2936 spin_lock_init(&q->rspq.lock); 2936 spin_lock_init(&q->rspq.lock);
2937 skb_queue_head_init(&q->rspq.rx_queue);
2937 2938
2938 q->txq[TXQ_ETH].stop_thres = nports * 2939 q->txq[TXQ_ETH].stop_thres = nports *
2939 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2940 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);