aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/l2t.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-09-22 04:29:52 -0400
committerDavid S. Miller <davem@davemloft.net>2008-09-22 04:29:52 -0400
commit147e70e62fdd5af6263106ad634b03c5154c1e56 (patch)
treee56fdeb0b035149e157952bffbf8c04b04f0d7a3 /drivers/net/cxgb3/l2t.c
parent38783e671399b5405f1fd177d602c400a9577ae6 (diff)
cxgb3: Use SKB list interfaces instead of home-grown implementation.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb3/l2t.c')
-rw-r--r--drivers/net/cxgb3/l2t.c39
1 files changed, 17 insertions, 22 deletions
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 825e510bd9ed..b2c5314582aa 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -86,6 +86,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
86 struct l2t_entry *e) 86 struct l2t_entry *e)
87{ 87{
88 struct cpl_l2t_write_req *req; 88 struct cpl_l2t_write_req *req;
89 struct sk_buff *tmp;
89 90
90 if (!skb) { 91 if (!skb) {
91 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
@@ -103,13 +104,11 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
103 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
104 skb->priority = CPL_PRIORITY_CONTROL; 105 skb->priority = CPL_PRIORITY_CONTROL;
105 cxgb3_ofld_send(dev, skb); 106 cxgb3_ofld_send(dev, skb);
106 while (e->arpq_head) { 107
107 skb = e->arpq_head; 108 skb_queue_walk_safe(&e->arpq, skb, tmp) {
108 e->arpq_head = skb->next; 109 __skb_unlink(skb, &e->arpq);
109 skb->next = NULL;
110 cxgb3_ofld_send(dev, skb); 110 cxgb3_ofld_send(dev, skb);
111 } 111 }
112 e->arpq_tail = NULL;
113 e->state = L2T_STATE_VALID; 112 e->state = L2T_STATE_VALID;
114 113
115 return 0; 114 return 0;
@@ -121,12 +120,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
121 */ 120 */
122static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) 121static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
123{ 122{
124 skb->next = NULL; 123 __skb_queue_tail(&e->arpq, skb);
125 if (e->arpq_head)
126 e->arpq_tail->next = skb;
127 else
128 e->arpq_head = skb;
129 e->arpq_tail = skb;
130} 124}
131 125
132int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, 126int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
@@ -167,7 +161,7 @@ again:
167 break; 161 break;
168 162
169 spin_lock_bh(&e->lock); 163 spin_lock_bh(&e->lock);
170 if (e->arpq_head) 164 if (!skb_queue_empty(&e->arpq))
171 setup_l2e_send_pending(dev, skb, e); 165 setup_l2e_send_pending(dev, skb, e);
172 else /* we lost the race */ 166 else /* we lost the race */
173 __kfree_skb(skb); 167 __kfree_skb(skb);
@@ -357,14 +351,14 @@ EXPORT_SYMBOL(t3_l2t_get);
357 * XXX: maybe we should abandon the latter behavior and just require a failure 351 * XXX: maybe we should abandon the latter behavior and just require a failure
358 * handler. 352 * handler.
359 */ 353 */
360static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq) 354static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
361{ 355{
362 while (arpq) { 356 struct sk_buff *skb, *tmp;
363 struct sk_buff *skb = arpq; 357
358 skb_queue_walk_safe(arpq, skb, tmp) {
364 struct l2t_skb_cb *cb = L2T_SKB_CB(skb); 359 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
365 360
366 arpq = skb->next; 361 __skb_unlink(skb, arpq);
367 skb->next = NULL;
368 if (cb->arp_failure_handler) 362 if (cb->arp_failure_handler)
369 cb->arp_failure_handler(dev, skb); 363 cb->arp_failure_handler(dev, skb);
370 else 364 else
@@ -378,8 +372,8 @@ static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
378 */ 372 */
379void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) 373void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
380{ 374{
375 struct sk_buff_head arpq;
381 struct l2t_entry *e; 376 struct l2t_entry *e;
382 struct sk_buff *arpq = NULL;
383 struct l2t_data *d = L2DATA(dev); 377 struct l2t_data *d = L2DATA(dev);
384 u32 addr = *(u32 *) neigh->primary_key; 378 u32 addr = *(u32 *) neigh->primary_key;
385 int ifidx = neigh->dev->ifindex; 379 int ifidx = neigh->dev->ifindex;
@@ -395,6 +389,8 @@ void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
395 return; 389 return;
396 390
397found: 391found:
392 __skb_queue_head_init(&arpq);
393
398 read_unlock(&d->lock); 394 read_unlock(&d->lock);
399 if (atomic_read(&e->refcnt)) { 395 if (atomic_read(&e->refcnt)) {
400 if (neigh != e->neigh) 396 if (neigh != e->neigh)
@@ -402,8 +398,7 @@ found:
402 398
403 if (e->state == L2T_STATE_RESOLVING) { 399 if (e->state == L2T_STATE_RESOLVING) {
404 if (neigh->nud_state & NUD_FAILED) { 400 if (neigh->nud_state & NUD_FAILED) {
405 arpq = e->arpq_head; 401 skb_queue_splice_init(&e->arpq, &arpq);
406 e->arpq_head = e->arpq_tail = NULL;
407 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) 402 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
408 setup_l2e_send_pending(dev, NULL, e); 403 setup_l2e_send_pending(dev, NULL, e);
409 } else { 404 } else {
@@ -415,8 +410,8 @@ found:
415 } 410 }
416 spin_unlock_bh(&e->lock); 411 spin_unlock_bh(&e->lock);
417 412
418 if (arpq) 413 if (!skb_queue_empty(&arpq))
419 handle_failed_resolution(dev, arpq); 414 handle_failed_resolution(dev, &arpq);
420} 415}
421 416
422struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) 417struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)