aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:40:14 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:40:14 -0400
commit038a5008b2f395c85e6e71d6ddf3c684e7c405b0 (patch)
tree4735eab577e97e5a22c3141e3f60071c8065585e /drivers/infiniband/ulp/ipoib
parentdd6d1844af33acb4edd0a40b1770d091a22c94be (diff)
parent266918303226cceac7eca38ced30f15f277bd89c (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (867 commits) [SKY2]: status polling loop (post merge) [NET]: Fix NAPI completion handling in some drivers. [TCP]: Limit processing lost_retrans loop to work-to-do cases [TCP]: Fix lost_retrans loop vs fastpath problems [TCP]: No need to re-count fackets_out/sacked_out at RTO [TCP]: Extract tcp_match_queue_to_sack from sacktag code [TCP]: Kill almost unused variable pcount from sacktag [TCP]: Fix mark_head_lost to ignore R-bit when trying to mark L [TCP]: Add bytes_acked (ABC) clearing to FRTO too [IPv6]: Update setsockopt(IPV6_MULTICAST_IF) to support RFC 3493, try2 [NETFILTER]: x_tables: add missing ip6t_modulename aliases [NETFILTER]: nf_conntrack_tcp: fix connection reopening [QETH]: fix qeth_main.c [NETLINK]: fib_frontend build fixes [IPv6]: Export userland ND options through netlink (RDNSS support) [9P]: build fix with !CONFIG_SYSCTL [NET]: Fix dev_put() and dev_hold() comments [NET]: make netlink user -> kernel interface synchronious [NET]: unify netlink kernel socket recognition [NET]: cleanup 3rd argument in netlink_sendskb ... Fix up conflicts manually in Documentation/feature-removal-schedule.txt and my new least favourite crap, the "mod_devicetable" support in the files include/linux/mod_devicetable.h and scripts/mod/file2alias.c. (The latter files seem to be explicitly _designed_ to get conflicts when different subsystems work with them - that have an absolutely horrid lack of subsystem separation!) Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/infiniband/ulp/ipoib')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c61
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c44
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
5 files changed, 66 insertions, 75 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 285c143115cc..34c6128d2a34 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -228,6 +228,8 @@ struct ipoib_dev_priv {
228 228
229 struct net_device *dev; 229 struct net_device *dev;
230 230
231 struct napi_struct napi;
232
231 unsigned long flags; 233 unsigned long flags;
232 234
233 struct mutex mcast_mutex; 235 struct mutex mcast_mutex;
@@ -278,8 +280,6 @@ struct ipoib_dev_priv {
278 280
279 struct ib_event_handler event_handler; 281 struct ib_event_handler event_handler;
280 282
281 struct net_device_stats stats;
282
283 struct net_device *parent; 283 struct net_device *parent;
284 struct list_head child_intfs; 284 struct list_head child_intfs;
285 struct list_head list; 285 struct list_head list;
@@ -351,7 +351,7 @@ extern struct workqueue_struct *ipoib_workqueue;
351 351
352/* functions */ 352/* functions */
353 353
354int ipoib_poll(struct net_device *dev, int *budget); 354int ipoib_poll(struct napi_struct *napi, int budget);
355void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); 355void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
356 356
357struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 357struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 08b4676a3820..1afd93cdd6bb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -430,7 +430,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
430 ipoib_dbg(priv, "cm recv error " 430 ipoib_dbg(priv, "cm recv error "
431 "(status=%d, wrid=%d vend_err %x)\n", 431 "(status=%d, wrid=%d vend_err %x)\n",
432 wc->status, wr_id, wc->vendor_err); 432 wc->status, wr_id, wc->vendor_err);
433 ++priv->stats.rx_dropped; 433 ++dev->stats.rx_dropped;
434 goto repost; 434 goto repost;
435 } 435 }
436 436
@@ -457,7 +457,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
457 * this packet and reuse the old buffer. 457 * this packet and reuse the old buffer.
458 */ 458 */
459 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); 459 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
460 ++priv->stats.rx_dropped; 460 ++dev->stats.rx_dropped;
461 goto repost; 461 goto repost;
462 } 462 }
463 463
@@ -474,8 +474,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
474 skb_pull(skb, IPOIB_ENCAP_LEN); 474 skb_pull(skb, IPOIB_ENCAP_LEN);
475 475
476 dev->last_rx = jiffies; 476 dev->last_rx = jiffies;
477 ++priv->stats.rx_packets; 477 ++dev->stats.rx_packets;
478 priv->stats.rx_bytes += skb->len; 478 dev->stats.rx_bytes += skb->len;
479 479
480 skb->dev = dev; 480 skb->dev = dev;
481 /* XXX get correct PACKET_ type here */ 481 /* XXX get correct PACKET_ type here */
@@ -512,8 +512,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
512 if (unlikely(skb->len > tx->mtu)) { 512 if (unlikely(skb->len > tx->mtu)) {
513 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 513 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
514 skb->len, tx->mtu); 514 skb->len, tx->mtu);
515 ++priv->stats.tx_dropped; 515 ++dev->stats.tx_dropped;
516 ++priv->stats.tx_errors; 516 ++dev->stats.tx_errors;
517 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); 517 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
518 return; 518 return;
519 } 519 }
@@ -532,7 +532,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
532 tx_req->skb = skb; 532 tx_req->skb = skb;
533 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); 533 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
534 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 534 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
535 ++priv->stats.tx_errors; 535 ++dev->stats.tx_errors;
536 dev_kfree_skb_any(skb); 536 dev_kfree_skb_any(skb);
537 return; 537 return;
538 } 538 }
@@ -542,7 +542,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
542 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 542 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
543 addr, skb->len))) { 543 addr, skb->len))) {
544 ipoib_warn(priv, "post_send failed\n"); 544 ipoib_warn(priv, "post_send failed\n");
545 ++priv->stats.tx_errors; 545 ++dev->stats.tx_errors;
546 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 546 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
547 dev_kfree_skb_any(skb); 547 dev_kfree_skb_any(skb);
548 } else { 548 } else {
@@ -580,8 +580,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx
580 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); 580 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
581 581
582 /* FIXME: is this right? Shouldn't we only increment on success? */ 582 /* FIXME: is this right? Shouldn't we only increment on success? */
583 ++priv->stats.tx_packets; 583 ++dev->stats.tx_packets;
584 priv->stats.tx_bytes += tx_req->skb->len; 584 dev->stats.tx_bytes += tx_req->skb->len;
585 585
586 dev_kfree_skb_any(tx_req->skb); 586 dev_kfree_skb_any(tx_req->skb);
587 587
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 10944888cffd..0ec28c302fbf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -208,7 +208,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
208 * this packet and reuse the old buffer. 208 * this packet and reuse the old buffer.
209 */ 209 */
210 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { 210 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
211 ++priv->stats.rx_dropped; 211 ++dev->stats.rx_dropped;
212 goto repost; 212 goto repost;
213 } 213 }
214 214
@@ -225,8 +225,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
225 skb_pull(skb, IPOIB_ENCAP_LEN); 225 skb_pull(skb, IPOIB_ENCAP_LEN);
226 226
227 dev->last_rx = jiffies; 227 dev->last_rx = jiffies;
228 ++priv->stats.rx_packets; 228 ++dev->stats.rx_packets;
229 priv->stats.rx_bytes += skb->len; 229 dev->stats.rx_bytes += skb->len;
230 230
231 skb->dev = dev; 231 skb->dev = dev;
232 /* XXX get correct PACKET_ type here */ 232 /* XXX get correct PACKET_ type here */
@@ -260,8 +260,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
260 ib_dma_unmap_single(priv->ca, tx_req->mapping, 260 ib_dma_unmap_single(priv->ca, tx_req->mapping,
261 tx_req->skb->len, DMA_TO_DEVICE); 261 tx_req->skb->len, DMA_TO_DEVICE);
262 262
263 ++priv->stats.tx_packets; 263 ++dev->stats.tx_packets;
264 priv->stats.tx_bytes += tx_req->skb->len; 264 dev->stats.tx_bytes += tx_req->skb->len;
265 265
266 dev_kfree_skb_any(tx_req->skb); 266 dev_kfree_skb_any(tx_req->skb);
267 267
@@ -281,63 +281,58 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
281 wc->status, wr_id, wc->vendor_err); 281 wc->status, wr_id, wc->vendor_err);
282} 282}
283 283
284int ipoib_poll(struct net_device *dev, int *budget) 284int ipoib_poll(struct napi_struct *napi, int budget)
285{ 285{
286 struct ipoib_dev_priv *priv = netdev_priv(dev); 286 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
287 int max = min(*budget, dev->quota); 287 struct net_device *dev = priv->dev;
288 int done; 288 int done;
289 int t; 289 int t;
290 int empty;
291 int n, i; 290 int n, i;
292 291
293 done = 0; 292 done = 0;
294 empty = 0;
295 293
296 while (max) { 294poll_more:
295 while (done < budget) {
296 int max = (budget - done);
297
297 t = min(IPOIB_NUM_WC, max); 298 t = min(IPOIB_NUM_WC, max);
298 n = ib_poll_cq(priv->cq, t, priv->ibwc); 299 n = ib_poll_cq(priv->cq, t, priv->ibwc);
299 300
300 for (i = 0; i < n; ++i) { 301 for (i = 0; i < n; i++) {
301 struct ib_wc *wc = priv->ibwc + i; 302 struct ib_wc *wc = priv->ibwc + i;
302 303
303 if (wc->wr_id & IPOIB_CM_OP_SRQ) { 304 if (wc->wr_id & IPOIB_CM_OP_SRQ) {
304 ++done; 305 ++done;
305 --max;
306 ipoib_cm_handle_rx_wc(dev, wc); 306 ipoib_cm_handle_rx_wc(dev, wc);
307 } else if (wc->wr_id & IPOIB_OP_RECV) { 307 } else if (wc->wr_id & IPOIB_OP_RECV) {
308 ++done; 308 ++done;
309 --max;
310 ipoib_ib_handle_rx_wc(dev, wc); 309 ipoib_ib_handle_rx_wc(dev, wc);
311 } else 310 } else
312 ipoib_ib_handle_tx_wc(dev, wc); 311 ipoib_ib_handle_tx_wc(dev, wc);
313 } 312 }
314 313
315 if (n != t) { 314 if (n != t)
316 empty = 1;
317 break; 315 break;
318 }
319 } 316 }
320 317
321 dev->quota -= done; 318 if (done < budget) {
322 *budget -= done; 319 netif_rx_complete(dev, napi);
323
324 if (empty) {
325 netif_rx_complete(dev);
326 if (unlikely(ib_req_notify_cq(priv->cq, 320 if (unlikely(ib_req_notify_cq(priv->cq,
327 IB_CQ_NEXT_COMP | 321 IB_CQ_NEXT_COMP |
328 IB_CQ_REPORT_MISSED_EVENTS)) && 322 IB_CQ_REPORT_MISSED_EVENTS)) &&
329 netif_rx_reschedule(dev, 0)) 323 netif_rx_reschedule(dev, napi))
330 return 1; 324 goto poll_more;
331
332 return 0;
333 } 325 }
334 326
335 return 1; 327 return done;
336} 328}
337 329
338void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 330void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
339{ 331{
340 netif_rx_schedule(dev_ptr); 332 struct net_device *dev = dev_ptr;
333 struct ipoib_dev_priv *priv = netdev_priv(dev);
334
335 netif_rx_schedule(dev, &priv->napi);
341} 336}
342 337
343static inline int post_send(struct ipoib_dev_priv *priv, 338static inline int post_send(struct ipoib_dev_priv *priv,
@@ -367,8 +362,8 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
367 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { 362 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
368 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 363 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
369 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); 364 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
370 ++priv->stats.tx_dropped; 365 ++dev->stats.tx_dropped;
371 ++priv->stats.tx_errors; 366 ++dev->stats.tx_errors;
372 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); 367 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
373 return; 368 return;
374 } 369 }
@@ -388,7 +383,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
388 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, 383 addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
389 DMA_TO_DEVICE); 384 DMA_TO_DEVICE);
390 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 385 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
391 ++priv->stats.tx_errors; 386 ++dev->stats.tx_errors;
392 dev_kfree_skb_any(skb); 387 dev_kfree_skb_any(skb);
393 return; 388 return;
394 } 389 }
@@ -397,7 +392,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
397 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 392 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
398 address->ah, qpn, addr, skb->len))) { 393 address->ah, qpn, addr, skb->len))) {
399 ipoib_warn(priv, "post_send failed\n"); 394 ipoib_warn(priv, "post_send failed\n");
400 ++priv->stats.tx_errors; 395 ++dev->stats.tx_errors;
401 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 396 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
402 dev_kfree_skb_any(skb); 397 dev_kfree_skb_any(skb);
403 } else { 398 } else {
@@ -577,7 +572,6 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
577 int i; 572 int i;
578 573
579 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 574 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
580 netif_poll_disable(dev);
581 575
582 ipoib_cm_dev_stop(dev); 576 ipoib_cm_dev_stop(dev);
583 577
@@ -660,7 +654,6 @@ timeout:
660 msleep(1); 654 msleep(1);
661 } 655 }
662 656
663 netif_poll_enable(dev);
664 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); 657 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP);
665 658
666 return 0; 659 return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 894b1dcdf3eb..855c9deca8b7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -98,16 +98,20 @@ int ipoib_open(struct net_device *dev)
98 98
99 ipoib_dbg(priv, "bringing up interface\n"); 99 ipoib_dbg(priv, "bringing up interface\n");
100 100
101 napi_enable(&priv->napi);
101 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 102 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
102 103
103 if (ipoib_pkey_dev_delay_open(dev)) 104 if (ipoib_pkey_dev_delay_open(dev))
104 return 0; 105 return 0;
105 106
106 if (ipoib_ib_dev_open(dev)) 107 if (ipoib_ib_dev_open(dev)) {
108 napi_disable(&priv->napi);
107 return -EINVAL; 109 return -EINVAL;
110 }
108 111
109 if (ipoib_ib_dev_up(dev)) { 112 if (ipoib_ib_dev_up(dev)) {
110 ipoib_ib_dev_stop(dev, 1); 113 ipoib_ib_dev_stop(dev, 1);
114 napi_disable(&priv->napi);
111 return -EINVAL; 115 return -EINVAL;
112 } 116 }
113 117
@@ -140,6 +144,7 @@ static int ipoib_stop(struct net_device *dev)
140 ipoib_dbg(priv, "stopping interface\n"); 144 ipoib_dbg(priv, "stopping interface\n");
141 145
142 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 146 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
147 napi_disable(&priv->napi);
143 148
144 netif_stop_queue(dev); 149 netif_stop_queue(dev);
145 150
@@ -512,7 +517,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
512 517
513 neigh = ipoib_neigh_alloc(skb->dst->neighbour); 518 neigh = ipoib_neigh_alloc(skb->dst->neighbour);
514 if (!neigh) { 519 if (!neigh) {
515 ++priv->stats.tx_dropped; 520 ++dev->stats.tx_dropped;
516 dev_kfree_skb_any(skb); 521 dev_kfree_skb_any(skb);
517 return; 522 return;
518 } 523 }
@@ -577,7 +582,7 @@ err_list:
577err_path: 582err_path:
578 ipoib_neigh_free(dev, neigh); 583 ipoib_neigh_free(dev, neigh);
579err_drop: 584err_drop:
580 ++priv->stats.tx_dropped; 585 ++dev->stats.tx_dropped;
581 dev_kfree_skb_any(skb); 586 dev_kfree_skb_any(skb);
582 587
583 spin_unlock(&priv->lock); 588 spin_unlock(&priv->lock);
@@ -626,7 +631,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
626 } else 631 } else
627 __path_add(dev, path); 632 __path_add(dev, path);
628 } else { 633 } else {
629 ++priv->stats.tx_dropped; 634 ++dev->stats.tx_dropped;
630 dev_kfree_skb_any(skb); 635 dev_kfree_skb_any(skb);
631 } 636 }
632 637
@@ -645,7 +650,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
645 skb_push(skb, sizeof *phdr); 650 skb_push(skb, sizeof *phdr);
646 __skb_queue_tail(&path->queue, skb); 651 __skb_queue_tail(&path->queue, skb);
647 } else { 652 } else {
648 ++priv->stats.tx_dropped; 653 ++dev->stats.tx_dropped;
649 dev_kfree_skb_any(skb); 654 dev_kfree_skb_any(skb);
650 } 655 }
651 656
@@ -713,7 +718,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
713 __skb_queue_tail(&neigh->queue, skb); 718 __skb_queue_tail(&neigh->queue, skb);
714 spin_unlock(&priv->lock); 719 spin_unlock(&priv->lock);
715 } else { 720 } else {
716 ++priv->stats.tx_dropped; 721 ++dev->stats.tx_dropped;
717 dev_kfree_skb_any(skb); 722 dev_kfree_skb_any(skb);
718 } 723 }
719 } else { 724 } else {
@@ -739,7 +744,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
739 IPOIB_QPN(phdr->hwaddr), 744 IPOIB_QPN(phdr->hwaddr),
740 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); 745 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
741 dev_kfree_skb_any(skb); 746 dev_kfree_skb_any(skb);
742 ++priv->stats.tx_dropped; 747 ++dev->stats.tx_dropped;
743 goto out; 748 goto out;
744 } 749 }
745 750
@@ -753,13 +758,6 @@ out:
753 return NETDEV_TX_OK; 758 return NETDEV_TX_OK;
754} 759}
755 760
756static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
757{
758 struct ipoib_dev_priv *priv = netdev_priv(dev);
759
760 return &priv->stats;
761}
762
763static void ipoib_timeout(struct net_device *dev) 761static void ipoib_timeout(struct net_device *dev)
764{ 762{
765 struct ipoib_dev_priv *priv = netdev_priv(dev); 763 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -775,7 +773,7 @@ static void ipoib_timeout(struct net_device *dev)
775static int ipoib_hard_header(struct sk_buff *skb, 773static int ipoib_hard_header(struct sk_buff *skb,
776 struct net_device *dev, 774 struct net_device *dev,
777 unsigned short type, 775 unsigned short type,
778 void *daddr, void *saddr, unsigned len) 776 const void *daddr, const void *saddr, unsigned len)
779{ 777{
780 struct ipoib_header *header; 778 struct ipoib_header *header;
781 779
@@ -856,11 +854,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
856 854
857void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) 855void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
858{ 856{
859 struct ipoib_dev_priv *priv = netdev_priv(dev);
860 struct sk_buff *skb; 857 struct sk_buff *skb;
861 *to_ipoib_neigh(neigh->neighbour) = NULL; 858 *to_ipoib_neigh(neigh->neighbour) = NULL;
862 while ((skb = __skb_dequeue(&neigh->queue))) { 859 while ((skb = __skb_dequeue(&neigh->queue))) {
863 ++priv->stats.tx_dropped; 860 ++dev->stats.tx_dropped;
864 dev_kfree_skb_any(skb); 861 dev_kfree_skb_any(skb);
865 } 862 }
866 if (ipoib_cm_get(neigh)) 863 if (ipoib_cm_get(neigh))
@@ -935,6 +932,10 @@ void ipoib_dev_cleanup(struct net_device *dev)
935 priv->tx_ring = NULL; 932 priv->tx_ring = NULL;
936} 933}
937 934
935static const struct header_ops ipoib_header_ops = {
936 .create = ipoib_hard_header,
937};
938
938static void ipoib_setup(struct net_device *dev) 939static void ipoib_setup(struct net_device *dev)
939{ 940{
940 struct ipoib_dev_priv *priv = netdev_priv(dev); 941 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -943,13 +944,12 @@ static void ipoib_setup(struct net_device *dev)
943 dev->stop = ipoib_stop; 944 dev->stop = ipoib_stop;
944 dev->change_mtu = ipoib_change_mtu; 945 dev->change_mtu = ipoib_change_mtu;
945 dev->hard_start_xmit = ipoib_start_xmit; 946 dev->hard_start_xmit = ipoib_start_xmit;
946 dev->get_stats = ipoib_get_stats;
947 dev->tx_timeout = ipoib_timeout; 947 dev->tx_timeout = ipoib_timeout;
948 dev->hard_header = ipoib_hard_header; 948 dev->header_ops = &ipoib_header_ops;
949 dev->set_multicast_list = ipoib_set_mcast_list; 949 dev->set_multicast_list = ipoib_set_mcast_list;
950 dev->neigh_setup = ipoib_neigh_setup_dev; 950 dev->neigh_setup = ipoib_neigh_setup_dev;
951 dev->poll = ipoib_poll; 951
952 dev->weight = 100; 952 netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
953 953
954 dev->watchdog_timeo = HZ; 954 dev->watchdog_timeo = HZ;
955 955
@@ -973,8 +973,6 @@ static void ipoib_setup(struct net_device *dev)
973 973
974 netif_carrier_off(dev); 974 netif_carrier_off(dev);
975 975
976 SET_MODULE_OWNER(dev);
977
978 priv->dev = dev; 976 priv->dev = dev;
979 977
980 spin_lock_init(&priv->lock); 978 spin_lock_init(&priv->lock);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index aae367057a56..98e904a7f3e8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -125,7 +125,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
125 } 125 }
126 126
127 spin_lock_irqsave(&priv->tx_lock, flags); 127 spin_lock_irqsave(&priv->tx_lock, flags);
128 priv->stats.tx_dropped += tx_dropped; 128 dev->stats.tx_dropped += tx_dropped;
129 spin_unlock_irqrestore(&priv->tx_lock, flags); 129 spin_unlock_irqrestore(&priv->tx_lock, flags);
130 130
131 kfree(mcast); 131 kfree(mcast);
@@ -320,7 +320,7 @@ ipoib_mcast_sendonly_join_complete(int status,
320 /* Flush out any queued packets */ 320 /* Flush out any queued packets */
321 spin_lock_irq(&priv->tx_lock); 321 spin_lock_irq(&priv->tx_lock);
322 while (!skb_queue_empty(&mcast->pkt_queue)) { 322 while (!skb_queue_empty(&mcast->pkt_queue)) {
323 ++priv->stats.tx_dropped; 323 ++dev->stats.tx_dropped;
324 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 324 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
325 } 325 }
326 spin_unlock_irq(&priv->tx_lock); 326 spin_unlock_irq(&priv->tx_lock);
@@ -675,7 +675,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
675 if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || 675 if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) ||
676 !priv->broadcast || 676 !priv->broadcast ||
677 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 677 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
678 ++priv->stats.tx_dropped; 678 ++dev->stats.tx_dropped;
679 dev_kfree_skb_any(skb); 679 dev_kfree_skb_any(skb);
680 goto unlock; 680 goto unlock;
681 } 681 }
@@ -690,7 +690,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
690 if (!mcast) { 690 if (!mcast) {
691 ipoib_warn(priv, "unable to allocate memory for " 691 ipoib_warn(priv, "unable to allocate memory for "
692 "multicast structure\n"); 692 "multicast structure\n");
693 ++priv->stats.tx_dropped; 693 ++dev->stats.tx_dropped;
694 dev_kfree_skb_any(skb); 694 dev_kfree_skb_any(skb);
695 goto out; 695 goto out;
696 } 696 }
@@ -705,7 +705,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
705 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) 705 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
706 skb_queue_tail(&mcast->pkt_queue, skb); 706 skb_queue_tail(&mcast->pkt_queue, skb);
707 else { 707 else {
708 ++priv->stats.tx_dropped; 708 ++dev->stats.tx_dropped;
709 dev_kfree_skb_any(skb); 709 dev_kfree_skb_any(skb);
710 } 710 }
711 711