aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c61
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c44
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
5 files changed, 66 insertions, 75 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index a198ce8371d..6545fa798b1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -249,6 +249,8 @@ struct ipoib_dev_priv {
249 249
250 struct net_device *dev; 250 struct net_device *dev;
251 251
252 struct napi_struct napi;
253
252 unsigned long flags; 254 unsigned long flags;
253 255
254 struct mutex mcast_mutex; 256 struct mutex mcast_mutex;
@@ -299,8 +301,6 @@ struct ipoib_dev_priv {
299 301
300 struct ib_event_handler event_handler; 302 struct ib_event_handler event_handler;
301 303
302 struct net_device_stats stats;
303
304 struct net_device *parent; 304 struct net_device *parent;
305 struct list_head child_intfs; 305 struct list_head child_intfs;
306 struct list_head list; 306 struct list_head list;
@@ -372,7 +372,7 @@ extern struct workqueue_struct *ipoib_workqueue;
372 372
373/* functions */ 373/* functions */
374 374
375int ipoib_poll(struct net_device *dev, int *budget); 375int ipoib_poll(struct napi_struct *napi, int budget);
376void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); 376void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
377 377
378struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 378struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 23addb3a6f4..0a0dcb8fdfd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -430,7 +430,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
430 ipoib_dbg(priv, "cm recv error " 430 ipoib_dbg(priv, "cm recv error "
431 "(status=%d, wrid=%d vend_err %x)\n", 431 "(status=%d, wrid=%d vend_err %x)\n",
432 wc->status, wr_id, wc->vendor_err); 432 wc->status, wr_id, wc->vendor_err);
433 ++priv->stats.rx_dropped; 433 ++dev->stats.rx_dropped;
434 goto repost; 434 goto repost;
435 } 435 }
436 436
@@ -457,7 +457,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
457 * this packet and reuse the old buffer. 457 * this packet and reuse the old buffer.
458 */ 458 */
459 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); 459 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
460 ++priv->stats.rx_dropped; 460 ++dev->stats.rx_dropped;
461 goto repost; 461 goto repost;
462 } 462 }
463 463
@@ -474,8 +474,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
474 skb_pull(skb, IPOIB_ENCAP_LEN); 474 skb_pull(skb, IPOIB_ENCAP_LEN);
475 475
476 dev->last_rx = jiffies; 476 dev->last_rx = jiffies;
477 ++priv->stats.rx_packets; 477 ++dev->stats.rx_packets;
478 priv->stats.rx_bytes += skb->len; 478 dev->stats.rx_bytes += skb->len;
479 479
480 skb->dev = dev; 480 skb->dev = dev;
481 /* XXX get correct PACKET_ type here */ 481 /* XXX get correct PACKET_ type here */
@@ -512,8 +512,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
512 if (unlikely(skb->len > tx->mtu)) { 512 if (unlikely(skb->len > tx->mtu)) {
513 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 513 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
514 skb->len, tx->mtu); 514 skb->len, tx->mtu);
515 ++priv->stats.tx_dropped; 515 ++dev->stats.tx_dropped;
516 ++priv->stats.tx_errors; 516 ++dev->stats.tx_errors;
517 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); 517 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
518 return; 518 return;
519 } 519 }
@@ -532,7 +532,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
532 tx_req->skb = skb; 532 tx_req->skb = skb;
533 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); 533 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
534 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 534 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
535 ++priv->stats.tx_errors; 535 ++dev->stats.tx_errors;
536 dev_kfree_skb_any(skb); 536 dev_kfree_skb_any(skb);
537 return; 537 return;
538 } 538 }
@@ -542,7 +542,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
542 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 542 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
543 addr, skb->len))) { 543 addr, skb->len))) {
544 ipoib_warn(priv, "post_send failed\n"); 544 ipoib_warn(priv, "post_send failed\n");
545 ++priv->stats.tx_errors; 545 ++dev->stats.tx_errors;
546 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 546 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
547 dev_kfree_skb_any(skb); 547 dev_kfree_skb_any(skb);
548 } else { 548 } else {
@@ -580,8 +580,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx
580 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); 580 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
581 581
582 /* FIXME: is this right? Shouldn't we only increment on success? */ 582 /* FIXME: is this right? Shouldn't we only increment on success? */
583 ++priv->stats.tx_packets; 583 ++dev->stats.tx_packets;
584 priv->stats.tx_bytes += tx_req->skb->len; 584 dev->stats.tx_bytes += tx_req->skb->len;
585 585
586 dev_kfree_skb_any(tx_req->skb); 586 dev_kfree_skb_any(tx_req->skb);
587 587
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5a70e287f25..1a77e79f6b4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -208,7 +208,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
208 * this packet and reuse the old buffer. 208 * this packet and reuse the old buffer.
209 */ 209 */
210 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { 210 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
211 ++priv->stats.rx_dropped; 211 ++dev->stats.rx_dropped;
212 goto repost; 212 goto repost;
213 } 213 }
214 214
@@ -225,8 +225,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
225 skb_pull(skb, IPOIB_ENCAP_LEN); 225 skb_pull(skb, IPOIB_ENCAP_LEN);
226 226
227 dev->last_rx = jiffies; 227 dev->last_rx = jiffies;
228 ++priv->stats.rx_packets; 228 ++dev->stats.rx_packets;
229 priv->stats.rx_bytes += skb->len; 229 dev->stats.rx_bytes += skb->len;
230 230
231 skb->dev = dev; 231 skb->dev = dev;
232 /* XXX get correct PACKET_ type here */ 232 /* XXX get correct PACKET_ type here */
@@ -260,8 +260,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
260 ib_dma_unmap_single(priv->ca, tx_req->mapping, 260 ib_dma_unmap_single(priv->ca, tx_req->mapping,
261 tx_req->skb->len, DMA_TO_DEVICE); 261 tx_req->skb->len, DMA_TO_DEVICE);
262 262
263 ++priv->stats.tx_packets; 263 ++dev->stats.tx_packets;
264 priv->stats.tx_bytes += tx_req->skb->len; 264 dev->stats.tx_bytes += tx_req->skb->len;
265 265
266 dev_kfree_skb_any(tx_req->skb); 266 dev_kfree_skb_any(tx_req->skb);
267 267
@@ -281,63 +281,58 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
281 wc->status, wr_id, wc->vendor_err); 281 wc->status, wr_id, wc->vendor_err);
282} 282}
283 283
284int ipoib_poll(struct net_device *dev, int *budget) 284int ipoib_poll(struct napi_struct *napi, int budget)
285{ 285{
286 struct ipoib_dev_priv *priv = netdev_priv(dev); 286 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
287 int max = min(*budget, dev->quota); 287 struct net_device *dev = priv->dev;
288 int done; 288 int done;
289 int t; 289 int t;
290 int empty;
291 int n, i; 290 int n, i;
292 291
293 done = 0; 292 done = 0;
294 empty = 0;
295 293
296 while (max) { 294poll_more:
295 while (done < budget) {
296 int max = (budget - done);
297
297 t = min(IPOIB_NUM_WC, max); 298 t = min(IPOIB_NUM_WC, max);
298 n = ib_poll_cq(priv->cq, t, priv->ibwc); 299 n = ib_poll_cq(priv->cq, t, priv->ibwc);
299 300
300 for (i = 0; i < n; ++i) { 301 for (i = 0; i < n; i++) {
301 struct ib_wc *wc = priv->ibwc + i; 302 struct ib_wc *wc = priv->ibwc + i;
302 303
303 if (wc->wr_id & IPOIB_CM_OP_SRQ) { 304 if (wc->wr_id & IPOIB_CM_OP_SRQ) {
304 ++done; 305 ++done;
305 --max;
306 ipoib_cm_handle_rx_wc(dev, wc); 306 ipoib_cm_handle_rx_wc(dev, wc);
307 } else if (wc->wr_id & IPOIB_OP_RECV) { 307 } else if (wc->wr_id & IPOIB_OP_RECV) {
308 ++done; 308 ++done;
309 --max;
310 ipoib_ib_handle_rx_wc(dev, wc); 309 ipoib_ib_handle_rx_wc(dev, wc);
311 } else 310 } else
312 ipoib_ib_handle_tx_wc(dev, wc); 311 ipoib_ib_handle_tx_wc(dev, wc);
313 } 312 }
314 313
315 if (n != t) { 314 if (n != t)
316 empty = 1;
317 break; 315 break;
318 }
319 } 316 }
320 317
321 dev->quota -= done; 318 if (done < budget) {
322 *budget -= done; 319 netif_rx_complete(dev, napi);
323
324 if (empty) {
325 netif_rx_complete(dev);
326 if (unlikely(ib_req_notify_cq(priv->cq, 320 if (unlikely(ib_req_notify_cq(priv->cq,
327 IB_CQ_NEXT_COMP | 321 IB_CQ_NEXT_COMP |
328 IB_CQ_REPORT_MISSED_EVENTS)) && 322 IB_CQ_REPORT_MISSED_EVENTS)) &&
329 netif_rx_reschedule(dev, 0)) 323 netif_rx_reschedule(dev, napi))
330 return 1; 324 goto poll_more;
331
332 return 0;
333 } 325 }
334 326
335 return 1; 327 return done;
336} 328}
337 329
338void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 330void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
339{ 331{
340 netif_rx_schedule(dev_ptr); 332 struct net_device *dev = dev_ptr;
333 struct ipoib_dev_priv *priv = netdev_priv(dev);
334
335 netif_rx_schedule(dev, &priv->napi);
341} 336}
342 337
343static inline int post_send(struct ipoib_dev_priv *priv, 338static inline int post_send(struct ipoib_dev_priv *priv,
@@ -367,8 +362,8 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
367 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { 362 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
368 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 363 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
369 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); 364 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
370 ++priv->stats.tx_dropped; 365 ++dev->stats.tx_dropped;
371 ++priv->stats.tx_errors; 366 ++dev->stats.tx_errors;
372 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); 367 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
373 return; 368 return;
374 } 369 }
@@ -388,7 +383,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
388 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, 383 addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
389 DMA_TO_DEVICE); 384 DMA_TO_DEVICE);
390 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 385 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
391 ++priv->stats.tx_errors; 386 ++dev->stats.tx_errors;
392 dev_kfree_skb_any(skb); 387 dev_kfree_skb_any(skb);
393 return; 388 return;
394 } 389 }
@@ -397,7 +392,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
397 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 392 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
398 address->ah, qpn, addr, skb->len))) { 393 address->ah, qpn, addr, skb->len))) {
399 ipoib_warn(priv, "post_send failed\n"); 394 ipoib_warn(priv, "post_send failed\n");
400 ++priv->stats.tx_errors; 395 ++dev->stats.tx_errors;
401 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 396 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
402 dev_kfree_skb_any(skb); 397 dev_kfree_skb_any(skb);
403 } else { 398 } else {
@@ -585,7 +580,6 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
585 int i; 580 int i;
586 581
587 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 582 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
588 netif_poll_disable(dev);
589 583
590 ipoib_cm_dev_stop(dev); 584 ipoib_cm_dev_stop(dev);
591 585
@@ -668,7 +662,6 @@ timeout:
668 msleep(1); 662 msleep(1);
669 } 663 }
670 664
671 netif_poll_enable(dev);
672 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); 665 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP);
673 666
674 return 0; 667 return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ff17fe3c765..e072f3c32ce 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -98,16 +98,20 @@ int ipoib_open(struct net_device *dev)
98 98
99 ipoib_dbg(priv, "bringing up interface\n"); 99 ipoib_dbg(priv, "bringing up interface\n");
100 100
101 napi_enable(&priv->napi);
101 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 102 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
102 103
103 if (ipoib_pkey_dev_delay_open(dev)) 104 if (ipoib_pkey_dev_delay_open(dev))
104 return 0; 105 return 0;
105 106
106 if (ipoib_ib_dev_open(dev)) 107 if (ipoib_ib_dev_open(dev)) {
108 napi_disable(&priv->napi);
107 return -EINVAL; 109 return -EINVAL;
110 }
108 111
109 if (ipoib_ib_dev_up(dev)) { 112 if (ipoib_ib_dev_up(dev)) {
110 ipoib_ib_dev_stop(dev, 1); 113 ipoib_ib_dev_stop(dev, 1);
114 napi_disable(&priv->napi);
111 return -EINVAL; 115 return -EINVAL;
112 } 116 }
113 117
@@ -140,6 +144,7 @@ static int ipoib_stop(struct net_device *dev)
140 ipoib_dbg(priv, "stopping interface\n"); 144 ipoib_dbg(priv, "stopping interface\n");
141 145
142 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 146 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
147 napi_disable(&priv->napi);
143 148
144 netif_stop_queue(dev); 149 netif_stop_queue(dev);
145 150
@@ -514,7 +519,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
514 519
515 neigh = ipoib_neigh_alloc(skb->dst->neighbour); 520 neigh = ipoib_neigh_alloc(skb->dst->neighbour);
516 if (!neigh) { 521 if (!neigh) {
517 ++priv->stats.tx_dropped; 522 ++dev->stats.tx_dropped;
518 dev_kfree_skb_any(skb); 523 dev_kfree_skb_any(skb);
519 return; 524 return;
520 } 525 }
@@ -579,7 +584,7 @@ err_list:
579err_path: 584err_path:
580 ipoib_neigh_free(dev, neigh); 585 ipoib_neigh_free(dev, neigh);
581err_drop: 586err_drop:
582 ++priv->stats.tx_dropped; 587 ++dev->stats.tx_dropped;
583 dev_kfree_skb_any(skb); 588 dev_kfree_skb_any(skb);
584 589
585 spin_unlock(&priv->lock); 590 spin_unlock(&priv->lock);
@@ -628,7 +633,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
628 } else 633 } else
629 __path_add(dev, path); 634 __path_add(dev, path);
630 } else { 635 } else {
631 ++priv->stats.tx_dropped; 636 ++dev->stats.tx_dropped;
632 dev_kfree_skb_any(skb); 637 dev_kfree_skb_any(skb);
633 } 638 }
634 639
@@ -647,7 +652,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
647 skb_push(skb, sizeof *phdr); 652 skb_push(skb, sizeof *phdr);
648 __skb_queue_tail(&path->queue, skb); 653 __skb_queue_tail(&path->queue, skb);
649 } else { 654 } else {
650 ++priv->stats.tx_dropped; 655 ++dev->stats.tx_dropped;
651 dev_kfree_skb_any(skb); 656 dev_kfree_skb_any(skb);
652 } 657 }
653 658
@@ -715,7 +720,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
715 __skb_queue_tail(&neigh->queue, skb); 720 __skb_queue_tail(&neigh->queue, skb);
716 spin_unlock(&priv->lock); 721 spin_unlock(&priv->lock);
717 } else { 722 } else {
718 ++priv->stats.tx_dropped; 723 ++dev->stats.tx_dropped;
719 dev_kfree_skb_any(skb); 724 dev_kfree_skb_any(skb);
720 } 725 }
721 } else { 726 } else {
@@ -741,7 +746,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
741 IPOIB_QPN(phdr->hwaddr), 746 IPOIB_QPN(phdr->hwaddr),
742 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); 747 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
743 dev_kfree_skb_any(skb); 748 dev_kfree_skb_any(skb);
744 ++priv->stats.tx_dropped; 749 ++dev->stats.tx_dropped;
745 goto out; 750 goto out;
746 } 751 }
747 752
@@ -755,13 +760,6 @@ out:
755 return NETDEV_TX_OK; 760 return NETDEV_TX_OK;
756} 761}
757 762
758static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
759{
760 struct ipoib_dev_priv *priv = netdev_priv(dev);
761
762 return &priv->stats;
763}
764
765static void ipoib_timeout(struct net_device *dev) 763static void ipoib_timeout(struct net_device *dev)
766{ 764{
767 struct ipoib_dev_priv *priv = netdev_priv(dev); 765 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -777,7 +775,7 @@ static void ipoib_timeout(struct net_device *dev)
777static int ipoib_hard_header(struct sk_buff *skb, 775static int ipoib_hard_header(struct sk_buff *skb,
778 struct net_device *dev, 776 struct net_device *dev,
779 unsigned short type, 777 unsigned short type,
780 void *daddr, void *saddr, unsigned len) 778 const void *daddr, const void *saddr, unsigned len)
781{ 779{
782 struct ipoib_header *header; 780 struct ipoib_header *header;
783 781
@@ -858,11 +856,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
858 856
859void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) 857void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
860{ 858{
861 struct ipoib_dev_priv *priv = netdev_priv(dev);
862 struct sk_buff *skb; 859 struct sk_buff *skb;
863 *to_ipoib_neigh(neigh->neighbour) = NULL; 860 *to_ipoib_neigh(neigh->neighbour) = NULL;
864 while ((skb = __skb_dequeue(&neigh->queue))) { 861 while ((skb = __skb_dequeue(&neigh->queue))) {
865 ++priv->stats.tx_dropped; 862 ++dev->stats.tx_dropped;
866 dev_kfree_skb_any(skb); 863 dev_kfree_skb_any(skb);
867 } 864 }
868 if (ipoib_cm_get(neigh)) 865 if (ipoib_cm_get(neigh))
@@ -937,6 +934,10 @@ void ipoib_dev_cleanup(struct net_device *dev)
937 priv->tx_ring = NULL; 934 priv->tx_ring = NULL;
938} 935}
939 936
937static const struct header_ops ipoib_header_ops = {
938 .create = ipoib_hard_header,
939};
940
940static void ipoib_setup(struct net_device *dev) 941static void ipoib_setup(struct net_device *dev)
941{ 942{
942 struct ipoib_dev_priv *priv = netdev_priv(dev); 943 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -945,13 +946,12 @@ static void ipoib_setup(struct net_device *dev)
945 dev->stop = ipoib_stop; 946 dev->stop = ipoib_stop;
946 dev->change_mtu = ipoib_change_mtu; 947 dev->change_mtu = ipoib_change_mtu;
947 dev->hard_start_xmit = ipoib_start_xmit; 948 dev->hard_start_xmit = ipoib_start_xmit;
948 dev->get_stats = ipoib_get_stats;
949 dev->tx_timeout = ipoib_timeout; 949 dev->tx_timeout = ipoib_timeout;
950 dev->hard_header = ipoib_hard_header; 950 dev->header_ops = &ipoib_header_ops;
951 dev->set_multicast_list = ipoib_set_mcast_list; 951 dev->set_multicast_list = ipoib_set_mcast_list;
952 dev->neigh_setup = ipoib_neigh_setup_dev; 952 dev->neigh_setup = ipoib_neigh_setup_dev;
953 dev->poll = ipoib_poll; 953
954 dev->weight = 100; 954 netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
955 955
956 dev->watchdog_timeo = HZ; 956 dev->watchdog_timeo = HZ;
957 957
@@ -975,8 +975,6 @@ static void ipoib_setup(struct net_device *dev)
975 975
976 netif_carrier_off(dev); 976 netif_carrier_off(dev);
977 977
978 SET_MODULE_OWNER(dev);
979
980 priv->dev = dev; 978 priv->dev = dev;
981 979
982 spin_lock_init(&priv->lock); 980 spin_lock_init(&priv->lock);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 62abfb6f35c..827820ec66d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -103,7 +103,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
103 } 103 }
104 104
105 spin_lock_irqsave(&priv->tx_lock, flags); 105 spin_lock_irqsave(&priv->tx_lock, flags);
106 priv->stats.tx_dropped += tx_dropped; 106 dev->stats.tx_dropped += tx_dropped;
107 spin_unlock_irqrestore(&priv->tx_lock, flags); 107 spin_unlock_irqrestore(&priv->tx_lock, flags);
108 108
109 kfree(mcast); 109 kfree(mcast);
@@ -298,7 +298,7 @@ ipoib_mcast_sendonly_join_complete(int status,
298 /* Flush out any queued packets */ 298 /* Flush out any queued packets */
299 spin_lock_irq(&priv->tx_lock); 299 spin_lock_irq(&priv->tx_lock);
300 while (!skb_queue_empty(&mcast->pkt_queue)) { 300 while (!skb_queue_empty(&mcast->pkt_queue)) {
301 ++priv->stats.tx_dropped; 301 ++dev->stats.tx_dropped;
302 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 302 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
303 } 303 }
304 spin_unlock_irq(&priv->tx_lock); 304 spin_unlock_irq(&priv->tx_lock);
@@ -653,7 +653,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
653 if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || 653 if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) ||
654 !priv->broadcast || 654 !priv->broadcast ||
655 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 655 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
656 ++priv->stats.tx_dropped; 656 ++dev->stats.tx_dropped;
657 dev_kfree_skb_any(skb); 657 dev_kfree_skb_any(skb);
658 goto unlock; 658 goto unlock;
659 } 659 }
@@ -668,7 +668,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
668 if (!mcast) { 668 if (!mcast) {
669 ipoib_warn(priv, "unable to allocate memory for " 669 ipoib_warn(priv, "unable to allocate memory for "
670 "multicast structure\n"); 670 "multicast structure\n");
671 ++priv->stats.tx_dropped; 671 ++dev->stats.tx_dropped;
672 dev_kfree_skb_any(skb); 672 dev_kfree_skb_any(skb);
673 goto out; 673 goto out;
674 } 674 }
@@ -683,7 +683,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
683 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) 683 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
684 skb_queue_tail(&mcast->pkt_queue, skb); 684 skb_queue_tail(&mcast->pkt_queue, skb);
685 else { 685 else {
686 ++priv->stats.tx_dropped; 686 ++dev->stats.tx_dropped;
687 dev_kfree_skb_any(skb); 687 dev_kfree_skb_any(skb);
688 } 688 }
689 689