diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 506 |
1 files changed, 391 insertions, 115 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index da7a13c97eb8..5a11f889e56a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #ifdef __KERNEL__ | 32 | #ifdef __KERNEL__ |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/delay.h> | ||
34 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
35 | #include <asm/cache.h> | 36 | #include <asm/cache.h> |
36 | #include <asm/byteorder.h> | 37 | #include <asm/byteorder.h> |
@@ -38,6 +39,9 @@ | |||
38 | #include <linux/device.h> | 39 | #include <linux/device.h> |
39 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
40 | #include <linux/dmaengine.h> | 41 | #include <linux/dmaengine.h> |
42 | #include <linux/workqueue.h> | ||
43 | |||
44 | #include <net/net_namespace.h> | ||
41 | 45 | ||
42 | struct vlan_group; | 46 | struct vlan_group; |
43 | struct ethtool_ops; | 47 | struct ethtool_ops; |
@@ -246,6 +250,19 @@ struct hh_cache | |||
246 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ | 250 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
247 | ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 251 | ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
248 | 252 | ||
253 | struct header_ops { | ||
254 | int (*create) (struct sk_buff *skb, struct net_device *dev, | ||
255 | unsigned short type, const void *daddr, | ||
256 | const void *saddr, unsigned len); | ||
257 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); | ||
258 | int (*rebuild)(struct sk_buff *skb); | ||
259 | #define HAVE_HEADER_CACHE | ||
260 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh); | ||
261 | void (*cache_update)(struct hh_cache *hh, | ||
262 | const struct net_device *dev, | ||
263 | const unsigned char *haddr); | ||
264 | }; | ||
265 | |||
249 | /* These flag bits are private to the generic network queueing | 266 | /* These flag bits are private to the generic network queueing |
250 | * layer, they may not be explicitly referenced by any other | 267 | * layer, they may not be explicitly referenced by any other |
251 | * code. | 268 | * code. |
@@ -258,7 +275,6 @@ enum netdev_state_t | |||
258 | __LINK_STATE_PRESENT, | 275 | __LINK_STATE_PRESENT, |
259 | __LINK_STATE_SCHED, | 276 | __LINK_STATE_SCHED, |
260 | __LINK_STATE_NOCARRIER, | 277 | __LINK_STATE_NOCARRIER, |
261 | __LINK_STATE_RX_SCHED, | ||
262 | __LINK_STATE_LINKWATCH_PENDING, | 278 | __LINK_STATE_LINKWATCH_PENDING, |
263 | __LINK_STATE_DORMANT, | 279 | __LINK_STATE_DORMANT, |
264 | __LINK_STATE_QDISC_RUNNING, | 280 | __LINK_STATE_QDISC_RUNNING, |
@@ -278,6 +294,120 @@ struct netdev_boot_setup { | |||
278 | extern int __init netdev_boot_setup(char *str); | 294 | extern int __init netdev_boot_setup(char *str); |
279 | 295 | ||
280 | /* | 296 | /* |
297 | * Structure for NAPI scheduling similar to tasklet but with weighting | ||
298 | */ | ||
299 | struct napi_struct { | ||
300 | /* The poll_list must only be managed by the entity which | ||
301 | * changes the state of the NAPI_STATE_SCHED bit. This means | ||
302 | * whoever atomically sets that bit can add this napi_struct | ||
303 | * to the per-cpu poll_list, and whoever clears that bit | ||
304 | * can remove from the list right before clearing the bit. | ||
305 | */ | ||
306 | struct list_head poll_list; | ||
307 | |||
308 | unsigned long state; | ||
309 | int weight; | ||
310 | int (*poll)(struct napi_struct *, int); | ||
311 | #ifdef CONFIG_NETPOLL | ||
312 | spinlock_t poll_lock; | ||
313 | int poll_owner; | ||
314 | struct net_device *dev; | ||
315 | struct list_head dev_list; | ||
316 | #endif | ||
317 | }; | ||
318 | |||
319 | enum | ||
320 | { | ||
321 | NAPI_STATE_SCHED, /* Poll is scheduled */ | ||
322 | }; | ||
323 | |||
324 | extern void FASTCALL(__napi_schedule(struct napi_struct *n)); | ||
325 | |||
326 | /** | ||
327 | * napi_schedule_prep - check if napi can be scheduled | ||
328 | * @n: napi context | ||
329 | * | ||
330 | * Test if NAPI routine is already running, and if not mark | ||
331 | * it as running. This is used as a condition variable | ||
332 | * insure only one NAPI poll instance runs | ||
333 | */ | ||
334 | static inline int napi_schedule_prep(struct napi_struct *n) | ||
335 | { | ||
336 | return !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | ||
337 | } | ||
338 | |||
339 | /** | ||
340 | * napi_schedule - schedule NAPI poll | ||
341 | * @n: napi context | ||
342 | * | ||
343 | * Schedule NAPI poll routine to be called if it is not already | ||
344 | * running. | ||
345 | */ | ||
346 | static inline void napi_schedule(struct napi_struct *n) | ||
347 | { | ||
348 | if (napi_schedule_prep(n)) | ||
349 | __napi_schedule(n); | ||
350 | } | ||
351 | |||
352 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ | ||
353 | static inline int napi_reschedule(struct napi_struct *napi) | ||
354 | { | ||
355 | if (napi_schedule_prep(napi)) { | ||
356 | __napi_schedule(napi); | ||
357 | return 1; | ||
358 | } | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * napi_complete - NAPI processing complete | ||
364 | * @n: napi context | ||
365 | * | ||
366 | * Mark NAPI processing as complete. | ||
367 | */ | ||
368 | static inline void __napi_complete(struct napi_struct *n) | ||
369 | { | ||
370 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | ||
371 | list_del(&n->poll_list); | ||
372 | smp_mb__before_clear_bit(); | ||
373 | clear_bit(NAPI_STATE_SCHED, &n->state); | ||
374 | } | ||
375 | |||
376 | static inline void napi_complete(struct napi_struct *n) | ||
377 | { | ||
378 | local_irq_disable(); | ||
379 | __napi_complete(n); | ||
380 | local_irq_enable(); | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * napi_disable - prevent NAPI from scheduling | ||
385 | * @n: napi context | ||
386 | * | ||
387 | * Stop NAPI from being scheduled on this context. | ||
388 | * Waits till any outstanding processing completes. | ||
389 | */ | ||
390 | static inline void napi_disable(struct napi_struct *n) | ||
391 | { | ||
392 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | ||
393 | msleep_interruptible(1); | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * napi_enable - enable NAPI scheduling | ||
398 | * @n: napi context | ||
399 | * | ||
400 | * Resume NAPI from being scheduled on this context. | ||
401 | * Must be paired with napi_disable. | ||
402 | */ | ||
403 | static inline void napi_enable(struct napi_struct *n) | ||
404 | { | ||
405 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | ||
406 | smp_mb__before_clear_bit(); | ||
407 | clear_bit(NAPI_STATE_SCHED, &n->state); | ||
408 | } | ||
409 | |||
410 | /* | ||
281 | * The DEVICE structure. | 411 | * The DEVICE structure. |
282 | * Actually, this whole structure is a big mistake. It mixes I/O | 412 | * Actually, this whole structure is a big mistake. It mixes I/O |
283 | * data with strictly "high-level" data, and it has to know about | 413 | * data with strictly "high-level" data, and it has to know about |
@@ -319,6 +449,9 @@ struct net_device | |||
319 | unsigned long state; | 449 | unsigned long state; |
320 | 450 | ||
321 | struct list_head dev_list; | 451 | struct list_head dev_list; |
452 | #ifdef CONFIG_NETPOLL | ||
453 | struct list_head napi_list; | ||
454 | #endif | ||
322 | 455 | ||
323 | /* The device initialization function. Called only once. */ | 456 | /* The device initialization function. Called only once. */ |
324 | int (*init)(struct net_device *dev); | 457 | int (*init)(struct net_device *dev); |
@@ -339,8 +472,11 @@ struct net_device | |||
339 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | 472 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ |
340 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | 473 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ |
341 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ | 474 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ |
342 | #define NETIF_F_LLTX 4096 /* LockLess TX */ | 475 | #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ |
476 | /* do not use LLTX in new drivers */ | ||
477 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ | ||
343 | #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ | 478 | #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ |
479 | #define NETIF_F_LRO 32768 /* large receive offload */ | ||
344 | 480 | ||
345 | /* Segmentation offload features */ | 481 | /* Segmentation offload features */ |
346 | #define NETIF_F_GSO_SHIFT 16 | 482 | #define NETIF_F_GSO_SHIFT 16 |
@@ -379,6 +515,9 @@ struct net_device | |||
379 | #endif | 515 | #endif |
380 | const struct ethtool_ops *ethtool_ops; | 516 | const struct ethtool_ops *ethtool_ops; |
381 | 517 | ||
518 | /* Hardware header description */ | ||
519 | const struct header_ops *header_ops; | ||
520 | |||
382 | /* | 521 | /* |
383 | * This marks the end of the "visible" part of the structure. All | 522 | * This marks the end of the "visible" part of the structure. All |
384 | * fields hereafter are internal to the system, and may change at | 523 | * fields hereafter are internal to the system, and may change at |
@@ -430,12 +569,6 @@ struct net_device | |||
430 | /* | 569 | /* |
431 | * Cache line mostly used on receive path (including eth_type_trans()) | 570 | * Cache line mostly used on receive path (including eth_type_trans()) |
432 | */ | 571 | */ |
433 | struct list_head poll_list ____cacheline_aligned_in_smp; | ||
434 | /* Link to poll list */ | ||
435 | |||
436 | int (*poll) (struct net_device *dev, int *quota); | ||
437 | int quota; | ||
438 | int weight; | ||
439 | unsigned long last_rx; /* Time of last Rx */ | 572 | unsigned long last_rx; /* Time of last Rx */ |
440 | /* Interface address info used in eth_type_trans() */ | 573 | /* Interface address info used in eth_type_trans() */ |
441 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast | 574 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast |
@@ -508,13 +641,6 @@ struct net_device | |||
508 | int (*open)(struct net_device *dev); | 641 | int (*open)(struct net_device *dev); |
509 | int (*stop)(struct net_device *dev); | 642 | int (*stop)(struct net_device *dev); |
510 | #define HAVE_NETDEV_POLL | 643 | #define HAVE_NETDEV_POLL |
511 | int (*hard_header) (struct sk_buff *skb, | ||
512 | struct net_device *dev, | ||
513 | unsigned short type, | ||
514 | void *daddr, | ||
515 | void *saddr, | ||
516 | unsigned len); | ||
517 | int (*rebuild_header)(struct sk_buff *skb); | ||
518 | #define HAVE_CHANGE_RX_FLAGS | 644 | #define HAVE_CHANGE_RX_FLAGS |
519 | void (*change_rx_flags)(struct net_device *dev, | 645 | void (*change_rx_flags)(struct net_device *dev, |
520 | int flags); | 646 | int flags); |
@@ -531,12 +657,6 @@ struct net_device | |||
531 | #define HAVE_SET_CONFIG | 657 | #define HAVE_SET_CONFIG |
532 | int (*set_config)(struct net_device *dev, | 658 | int (*set_config)(struct net_device *dev, |
533 | struct ifmap *map); | 659 | struct ifmap *map); |
534 | #define HAVE_HEADER_CACHE | ||
535 | int (*hard_header_cache)(struct neighbour *neigh, | ||
536 | struct hh_cache *hh); | ||
537 | void (*header_cache_update)(struct hh_cache *hh, | ||
538 | struct net_device *dev, | ||
539 | unsigned char * haddr); | ||
540 | #define HAVE_CHANGE_MTU | 660 | #define HAVE_CHANGE_MTU |
541 | int (*change_mtu)(struct net_device *dev, int new_mtu); | 661 | int (*change_mtu)(struct net_device *dev, int new_mtu); |
542 | 662 | ||
@@ -550,8 +670,6 @@ struct net_device | |||
550 | void (*vlan_rx_kill_vid)(struct net_device *dev, | 670 | void (*vlan_rx_kill_vid)(struct net_device *dev, |
551 | unsigned short vid); | 671 | unsigned short vid); |
552 | 672 | ||
553 | int (*hard_header_parse)(struct sk_buff *skb, | ||
554 | unsigned char *haddr); | ||
555 | int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); | 673 | int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); |
556 | #ifdef CONFIG_NETPOLL | 674 | #ifdef CONFIG_NETPOLL |
557 | struct netpoll_info *npinfo; | 675 | struct netpoll_info *npinfo; |
@@ -560,6 +678,9 @@ struct net_device | |||
560 | void (*poll_controller)(struct net_device *dev); | 678 | void (*poll_controller)(struct net_device *dev); |
561 | #endif | 679 | #endif |
562 | 680 | ||
681 | /* Network namespace this network device is inside */ | ||
682 | struct net *nd_net; | ||
683 | |||
563 | /* bridge stuff */ | 684 | /* bridge stuff */ |
564 | struct net_bridge_port *br_port; | 685 | struct net_bridge_port *br_port; |
565 | /* macvlan */ | 686 | /* macvlan */ |
@@ -575,24 +696,46 @@ struct net_device | |||
575 | 696 | ||
576 | /* The TX queue control structures */ | 697 | /* The TX queue control structures */ |
577 | unsigned int egress_subqueue_count; | 698 | unsigned int egress_subqueue_count; |
578 | struct net_device_subqueue egress_subqueue[0]; | 699 | struct net_device_subqueue egress_subqueue[1]; |
579 | }; | 700 | }; |
580 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 701 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
581 | 702 | ||
582 | #define NETDEV_ALIGN 32 | 703 | #define NETDEV_ALIGN 32 |
583 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | 704 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) |
584 | 705 | ||
706 | /** | ||
707 | * netdev_priv - access network device private data | ||
708 | * @dev: network device | ||
709 | * | ||
710 | * Get network device private data | ||
711 | */ | ||
585 | static inline void *netdev_priv(const struct net_device *dev) | 712 | static inline void *netdev_priv(const struct net_device *dev) |
586 | { | 713 | { |
587 | return dev->priv; | 714 | return dev->priv; |
588 | } | 715 | } |
589 | 716 | ||
590 | #define SET_MODULE_OWNER(dev) do { } while (0) | ||
591 | /* Set the sysfs physical device reference for the network logical device | 717 | /* Set the sysfs physical device reference for the network logical device |
592 | * if set prior to registration will cause a symlink during initialization. | 718 | * if set prior to registration will cause a symlink during initialization. |
593 | */ | 719 | */ |
594 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) | 720 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
595 | 721 | ||
722 | static inline void netif_napi_add(struct net_device *dev, | ||
723 | struct napi_struct *napi, | ||
724 | int (*poll)(struct napi_struct *, int), | ||
725 | int weight) | ||
726 | { | ||
727 | INIT_LIST_HEAD(&napi->poll_list); | ||
728 | napi->poll = poll; | ||
729 | napi->weight = weight; | ||
730 | #ifdef CONFIG_NETPOLL | ||
731 | napi->dev = dev; | ||
732 | list_add(&napi->dev_list, &dev->napi_list); | ||
733 | spin_lock_init(&napi->poll_lock); | ||
734 | napi->poll_owner = -1; | ||
735 | #endif | ||
736 | set_bit(NAPI_STATE_SCHED, &napi->state); | ||
737 | } | ||
738 | |||
596 | struct packet_type { | 739 | struct packet_type { |
597 | __be16 type; /* This is really htons(ether_type). */ | 740 | __be16 type; /* This is really htons(ether_type). */ |
598 | struct net_device *dev; /* NULL is wildcarded here */ | 741 | struct net_device *dev; /* NULL is wildcarded here */ |
@@ -610,45 +753,46 @@ struct packet_type { | |||
610 | #include <linux/interrupt.h> | 753 | #include <linux/interrupt.h> |
611 | #include <linux/notifier.h> | 754 | #include <linux/notifier.h> |
612 | 755 | ||
613 | extern struct net_device loopback_dev; /* The loopback */ | ||
614 | extern struct list_head dev_base_head; /* All devices */ | ||
615 | extern rwlock_t dev_base_lock; /* Device list lock */ | 756 | extern rwlock_t dev_base_lock; /* Device list lock */ |
616 | 757 | ||
617 | #define for_each_netdev(d) \ | 758 | |
618 | list_for_each_entry(d, &dev_base_head, dev_list) | 759 | #define for_each_netdev(net, d) \ |
619 | #define for_each_netdev_safe(d, n) \ | 760 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) |
620 | list_for_each_entry_safe(d, n, &dev_base_head, dev_list) | 761 | #define for_each_netdev_safe(net, d, n) \ |
621 | #define for_each_netdev_continue(d) \ | 762 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) |
622 | list_for_each_entry_continue(d, &dev_base_head, dev_list) | 763 | #define for_each_netdev_continue(net, d) \ |
764 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | ||
623 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) | 765 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
624 | 766 | ||
625 | static inline struct net_device *next_net_device(struct net_device *dev) | 767 | static inline struct net_device *next_net_device(struct net_device *dev) |
626 | { | 768 | { |
627 | struct list_head *lh; | 769 | struct list_head *lh; |
770 | struct net *net; | ||
628 | 771 | ||
772 | net = dev->nd_net; | ||
629 | lh = dev->dev_list.next; | 773 | lh = dev->dev_list.next; |
630 | return lh == &dev_base_head ? NULL : net_device_entry(lh); | 774 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
631 | } | 775 | } |
632 | 776 | ||
633 | static inline struct net_device *first_net_device(void) | 777 | static inline struct net_device *first_net_device(struct net *net) |
634 | { | 778 | { |
635 | return list_empty(&dev_base_head) ? NULL : | 779 | return list_empty(&net->dev_base_head) ? NULL : |
636 | net_device_entry(dev_base_head.next); | 780 | net_device_entry(net->dev_base_head.next); |
637 | } | 781 | } |
638 | 782 | ||
639 | extern int netdev_boot_setup_check(struct net_device *dev); | 783 | extern int netdev_boot_setup_check(struct net_device *dev); |
640 | extern unsigned long netdev_boot_base(const char *prefix, int unit); | 784 | extern unsigned long netdev_boot_base(const char *prefix, int unit); |
641 | extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); | 785 | extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); |
642 | extern struct net_device *dev_getfirstbyhwtype(unsigned short type); | 786 | extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); |
643 | extern struct net_device *__dev_getfirstbyhwtype(unsigned short type); | 787 | extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); |
644 | extern void dev_add_pack(struct packet_type *pt); | 788 | extern void dev_add_pack(struct packet_type *pt); |
645 | extern void dev_remove_pack(struct packet_type *pt); | 789 | extern void dev_remove_pack(struct packet_type *pt); |
646 | extern void __dev_remove_pack(struct packet_type *pt); | 790 | extern void __dev_remove_pack(struct packet_type *pt); |
647 | 791 | ||
648 | extern struct net_device *dev_get_by_flags(unsigned short flags, | 792 | extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, |
649 | unsigned short mask); | 793 | unsigned short mask); |
650 | extern struct net_device *dev_get_by_name(const char *name); | 794 | extern struct net_device *dev_get_by_name(struct net *net, const char *name); |
651 | extern struct net_device *__dev_get_by_name(const char *name); | 795 | extern struct net_device *__dev_get_by_name(struct net *net, const char *name); |
652 | extern int dev_alloc_name(struct net_device *dev, const char *name); | 796 | extern int dev_alloc_name(struct net_device *dev, const char *name); |
653 | extern int dev_open(struct net_device *dev); | 797 | extern int dev_open(struct net_device *dev); |
654 | extern int dev_close(struct net_device *dev); | 798 | extern int dev_close(struct net_device *dev); |
@@ -659,14 +803,35 @@ extern void free_netdev(struct net_device *dev); | |||
659 | extern void synchronize_net(void); | 803 | extern void synchronize_net(void); |
660 | extern int register_netdevice_notifier(struct notifier_block *nb); | 804 | extern int register_netdevice_notifier(struct notifier_block *nb); |
661 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | 805 | extern int unregister_netdevice_notifier(struct notifier_block *nb); |
662 | extern int call_netdevice_notifiers(unsigned long val, void *v); | 806 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
663 | extern struct net_device *dev_get_by_index(int ifindex); | 807 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
664 | extern struct net_device *__dev_get_by_index(int ifindex); | 808 | extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
665 | extern int dev_restart(struct net_device *dev); | 809 | extern int dev_restart(struct net_device *dev); |
666 | #ifdef CONFIG_NETPOLL_TRAP | 810 | #ifdef CONFIG_NETPOLL_TRAP |
667 | extern int netpoll_trap(void); | 811 | extern int netpoll_trap(void); |
668 | #endif | 812 | #endif |
669 | 813 | ||
814 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | ||
815 | unsigned short type, | ||
816 | const void *daddr, const void *saddr, | ||
817 | unsigned len) | ||
818 | { | ||
819 | if (!dev->header_ops) | ||
820 | return 0; | ||
821 | |||
822 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | ||
823 | } | ||
824 | |||
825 | static inline int dev_parse_header(const struct sk_buff *skb, | ||
826 | unsigned char *haddr) | ||
827 | { | ||
828 | const struct net_device *dev = skb->dev; | ||
829 | |||
830 | if (!dev->header_ops->parse) | ||
831 | return 0; | ||
832 | return dev->header_ops->parse(skb, haddr); | ||
833 | } | ||
834 | |||
670 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); | 835 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
671 | extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); | 836 | extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); |
672 | static inline int unregister_gifconf(unsigned int family) | 837 | static inline int unregister_gifconf(unsigned int family) |
@@ -678,7 +843,6 @@ static inline int unregister_gifconf(unsigned int family) | |||
678 | * Incoming packets are placed on per-cpu queues so that | 843 | * Incoming packets are placed on per-cpu queues so that |
679 | * no locking is needed. | 844 | * no locking is needed. |
680 | */ | 845 | */ |
681 | |||
682 | struct softnet_data | 846 | struct softnet_data |
683 | { | 847 | { |
684 | struct net_device *output_queue; | 848 | struct net_device *output_queue; |
@@ -686,7 +850,7 @@ struct softnet_data | |||
686 | struct list_head poll_list; | 850 | struct list_head poll_list; |
687 | struct sk_buff *completion_queue; | 851 | struct sk_buff *completion_queue; |
688 | 852 | ||
689 | struct net_device backlog_dev; /* Sorry. 8) */ | 853 | struct napi_struct backlog; |
690 | #ifdef CONFIG_NET_DMA | 854 | #ifdef CONFIG_NET_DMA |
691 | struct dma_chan *net_dma; | 855 | struct dma_chan *net_dma; |
692 | #endif | 856 | #endif |
@@ -704,11 +868,24 @@ static inline void netif_schedule(struct net_device *dev) | |||
704 | __netif_schedule(dev); | 868 | __netif_schedule(dev); |
705 | } | 869 | } |
706 | 870 | ||
871 | /** | ||
872 | * netif_start_queue - allow transmit | ||
873 | * @dev: network device | ||
874 | * | ||
875 | * Allow upper layers to call the device hard_start_xmit routine. | ||
876 | */ | ||
707 | static inline void netif_start_queue(struct net_device *dev) | 877 | static inline void netif_start_queue(struct net_device *dev) |
708 | { | 878 | { |
709 | clear_bit(__LINK_STATE_XOFF, &dev->state); | 879 | clear_bit(__LINK_STATE_XOFF, &dev->state); |
710 | } | 880 | } |
711 | 881 | ||
882 | /** | ||
883 | * netif_wake_queue - restart transmit | ||
884 | * @dev: network device | ||
885 | * | ||
886 | * Allow upper layers to call the device hard_start_xmit routine. | ||
887 | * Used for flow control when transmit resources are available. | ||
888 | */ | ||
712 | static inline void netif_wake_queue(struct net_device *dev) | 889 | static inline void netif_wake_queue(struct net_device *dev) |
713 | { | 890 | { |
714 | #ifdef CONFIG_NETPOLL_TRAP | 891 | #ifdef CONFIG_NETPOLL_TRAP |
@@ -721,16 +898,35 @@ static inline void netif_wake_queue(struct net_device *dev) | |||
721 | __netif_schedule(dev); | 898 | __netif_schedule(dev); |
722 | } | 899 | } |
723 | 900 | ||
901 | /** | ||
902 | * netif_stop_queue - stop transmitted packets | ||
903 | * @dev: network device | ||
904 | * | ||
905 | * Stop upper layers calling the device hard_start_xmit routine. | ||
906 | * Used for flow control when transmit resources are unavailable. | ||
907 | */ | ||
724 | static inline void netif_stop_queue(struct net_device *dev) | 908 | static inline void netif_stop_queue(struct net_device *dev) |
725 | { | 909 | { |
726 | set_bit(__LINK_STATE_XOFF, &dev->state); | 910 | set_bit(__LINK_STATE_XOFF, &dev->state); |
727 | } | 911 | } |
728 | 912 | ||
913 | /** | ||
914 | * netif_queue_stopped - test if transmit queue is flowblocked | ||
915 | * @dev: network device | ||
916 | * | ||
917 | * Test if transmit queue on device is currently unable to send. | ||
918 | */ | ||
729 | static inline int netif_queue_stopped(const struct net_device *dev) | 919 | static inline int netif_queue_stopped(const struct net_device *dev) |
730 | { | 920 | { |
731 | return test_bit(__LINK_STATE_XOFF, &dev->state); | 921 | return test_bit(__LINK_STATE_XOFF, &dev->state); |
732 | } | 922 | } |
733 | 923 | ||
924 | /** | ||
925 | * netif_running - test if up | ||
926 | * @dev: network device | ||
927 | * | ||
928 | * Test if the device has been brought up. | ||
929 | */ | ||
734 | static inline int netif_running(const struct net_device *dev) | 930 | static inline int netif_running(const struct net_device *dev) |
735 | { | 931 | { |
736 | return test_bit(__LINK_STATE_START, &dev->state); | 932 | return test_bit(__LINK_STATE_START, &dev->state); |
@@ -742,6 +938,14 @@ static inline int netif_running(const struct net_device *dev) | |||
742 | * done at the overall netdevice level. | 938 | * done at the overall netdevice level. |
743 | * Also test the device if we're multiqueue. | 939 | * Also test the device if we're multiqueue. |
744 | */ | 940 | */ |
941 | |||
942 | /** | ||
943 | * netif_start_subqueue - allow sending packets on subqueue | ||
944 | * @dev: network device | ||
945 | * @queue_index: sub queue index | ||
946 | * | ||
947 | * Start individual transmit queue of a device with multiple transmit queues. | ||
948 | */ | ||
745 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 949 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
746 | { | 950 | { |
747 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 951 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
@@ -749,6 +953,13 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | |||
749 | #endif | 953 | #endif |
750 | } | 954 | } |
751 | 955 | ||
956 | /** | ||
957 | * netif_stop_subqueue - stop sending packets on subqueue | ||
958 | * @dev: network device | ||
959 | * @queue_index: sub queue index | ||
960 | * | ||
961 | * Stop individual transmit queue of a device with multiple transmit queues. | ||
962 | */ | ||
752 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | 963 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
753 | { | 964 | { |
754 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 965 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
@@ -760,6 +971,13 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | |||
760 | #endif | 971 | #endif |
761 | } | 972 | } |
762 | 973 | ||
974 | /** | ||
975 | * netif_subqueue_stopped - test status of subqueue | ||
976 | * @dev: network device | ||
977 | * @queue_index: sub queue index | ||
978 | * | ||
979 | * Check individual transmit queue of a device with multiple transmit queues. | ||
980 | */ | ||
763 | static inline int netif_subqueue_stopped(const struct net_device *dev, | 981 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
764 | u16 queue_index) | 982 | u16 queue_index) |
765 | { | 983 | { |
@@ -771,6 +989,14 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, | |||
771 | #endif | 989 | #endif |
772 | } | 990 | } |
773 | 991 | ||
992 | |||
993 | /** | ||
994 | * netif_wake_subqueue - allow sending packets on subqueue | ||
995 | * @dev: network device | ||
996 | * @queue_index: sub queue index | ||
997 | * | ||
998 | * Resume individual transmit queue of a device with multiple transmit queues. | ||
999 | */ | ||
774 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | 1000 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
775 | { | 1001 | { |
776 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 1002 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
@@ -784,6 +1010,13 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
784 | #endif | 1010 | #endif |
785 | } | 1011 | } |
786 | 1012 | ||
1013 | /** | ||
1014 | * netif_is_multiqueue - test if device has multiple transmit queues | ||
1015 | * @dev: network device | ||
1016 | * | ||
1017 | * Check if device has multiple transmit queues | ||
1018 | * Always falls if NETDEVICE_MULTIQUEUE is not configured | ||
1019 | */ | ||
787 | static inline int netif_is_multiqueue(const struct net_device *dev) | 1020 | static inline int netif_is_multiqueue(const struct net_device *dev) |
788 | { | 1021 | { |
789 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 1022 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
@@ -796,20 +1029,7 @@ static inline int netif_is_multiqueue(const struct net_device *dev) | |||
796 | /* Use this variant when it is known for sure that it | 1029 | /* Use this variant when it is known for sure that it |
797 | * is executing from interrupt context. | 1030 | * is executing from interrupt context. |
798 | */ | 1031 | */ |
799 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) | 1032 | extern void dev_kfree_skb_irq(struct sk_buff *skb); |
800 | { | ||
801 | if (atomic_dec_and_test(&skb->users)) { | ||
802 | struct softnet_data *sd; | ||
803 | unsigned long flags; | ||
804 | |||
805 | local_irq_save(flags); | ||
806 | sd = &__get_cpu_var(softnet_data); | ||
807 | skb->next = sd->completion_queue; | ||
808 | sd->completion_queue = skb; | ||
809 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | ||
810 | local_irq_restore(flags); | ||
811 | } | ||
812 | } | ||
813 | 1033 | ||
814 | /* Use this variant in places where it could be invoked | 1034 | /* Use this variant in places where it could be invoked |
815 | * either from interrupt or non-interrupt context. | 1035 | * either from interrupt or non-interrupt context. |
@@ -822,29 +1042,41 @@ extern int netif_rx_ni(struct sk_buff *skb); | |||
822 | #define HAVE_NETIF_RECEIVE_SKB 1 | 1042 | #define HAVE_NETIF_RECEIVE_SKB 1 |
823 | extern int netif_receive_skb(struct sk_buff *skb); | 1043 | extern int netif_receive_skb(struct sk_buff *skb); |
824 | extern int dev_valid_name(const char *name); | 1044 | extern int dev_valid_name(const char *name); |
825 | extern int dev_ioctl(unsigned int cmd, void __user *); | 1045 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
826 | extern int dev_ethtool(struct ifreq *); | 1046 | extern int dev_ethtool(struct net *net, struct ifreq *); |
827 | extern unsigned dev_get_flags(const struct net_device *); | 1047 | extern unsigned dev_get_flags(const struct net_device *); |
828 | extern int dev_change_flags(struct net_device *, unsigned); | 1048 | extern int dev_change_flags(struct net_device *, unsigned); |
829 | extern int dev_change_name(struct net_device *, char *); | 1049 | extern int dev_change_name(struct net_device *, char *); |
1050 | extern int dev_change_net_namespace(struct net_device *, | ||
1051 | struct net *, const char *); | ||
830 | extern int dev_set_mtu(struct net_device *, int); | 1052 | extern int dev_set_mtu(struct net_device *, int); |
831 | extern int dev_set_mac_address(struct net_device *, | 1053 | extern int dev_set_mac_address(struct net_device *, |
832 | struct sockaddr *); | 1054 | struct sockaddr *); |
833 | extern int dev_hard_start_xmit(struct sk_buff *skb, | 1055 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
834 | struct net_device *dev); | 1056 | struct net_device *dev); |
835 | 1057 | ||
836 | extern void dev_init(void); | ||
837 | |||
838 | extern int netdev_budget; | 1058 | extern int netdev_budget; |
839 | 1059 | ||
840 | /* Called by rtnetlink.c:rtnl_unlock() */ | 1060 | /* Called by rtnetlink.c:rtnl_unlock() */ |
841 | extern void netdev_run_todo(void); | 1061 | extern void netdev_run_todo(void); |
842 | 1062 | ||
1063 | /** | ||
1064 | * dev_put - release reference to device | ||
1065 | * @dev: network device | ||
1066 | * | ||
1067 | * Release reference to device to allow it to be freed. | ||
1068 | */ | ||
843 | static inline void dev_put(struct net_device *dev) | 1069 | static inline void dev_put(struct net_device *dev) |
844 | { | 1070 | { |
845 | atomic_dec(&dev->refcnt); | 1071 | atomic_dec(&dev->refcnt); |
846 | } | 1072 | } |
847 | 1073 | ||
1074 | /** | ||
1075 | * dev_hold - get reference to device | ||
1076 | * @dev: network device | ||
1077 | * | ||
1078 | * Hold reference to device to keep it from being freed. | ||
1079 | */ | ||
848 | static inline void dev_hold(struct net_device *dev) | 1080 | static inline void dev_hold(struct net_device *dev) |
849 | { | 1081 | { |
850 | atomic_inc(&dev->refcnt); | 1082 | atomic_inc(&dev->refcnt); |
@@ -861,6 +1093,12 @@ static inline void dev_hold(struct net_device *dev) | |||
861 | 1093 | ||
862 | extern void linkwatch_fire_event(struct net_device *dev); | 1094 | extern void linkwatch_fire_event(struct net_device *dev); |
863 | 1095 | ||
1096 | /** | ||
1097 | * netif_carrier_ok - test if carrier present | ||
1098 | * @dev: network device | ||
1099 | * | ||
1100 | * Check if carrier is present on device | ||
1101 | */ | ||
864 | static inline int netif_carrier_ok(const struct net_device *dev) | 1102 | static inline int netif_carrier_ok(const struct net_device *dev) |
865 | { | 1103 | { |
866 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | 1104 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); |
@@ -872,30 +1110,66 @@ extern void netif_carrier_on(struct net_device *dev); | |||
872 | 1110 | ||
873 | extern void netif_carrier_off(struct net_device *dev); | 1111 | extern void netif_carrier_off(struct net_device *dev); |
874 | 1112 | ||
1113 | /** | ||
1114 | * netif_dormant_on - mark device as dormant. | ||
1115 | * @dev: network device | ||
1116 | * | ||
1117 | * Mark device as dormant (as per RFC2863). | ||
1118 | * | ||
1119 | * The dormant state indicates that the relevant interface is not | ||
1120 | * actually in a condition to pass packets (i.e., it is not 'up') but is | ||
1121 | * in a "pending" state, waiting for some external event. For "on- | ||
1122 | * demand" interfaces, this new state identifies the situation where the | ||
1123 | * interface is waiting for events to place it in the up state. | ||
1124 | * | ||
1125 | */ | ||
875 | static inline void netif_dormant_on(struct net_device *dev) | 1126 | static inline void netif_dormant_on(struct net_device *dev) |
876 | { | 1127 | { |
877 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | 1128 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) |
878 | linkwatch_fire_event(dev); | 1129 | linkwatch_fire_event(dev); |
879 | } | 1130 | } |
880 | 1131 | ||
1132 | /** | ||
1133 | * netif_dormant_off - set device as not dormant. | ||
1134 | * @dev: network device | ||
1135 | * | ||
1136 | * Device is not in dormant state. | ||
1137 | */ | ||
881 | static inline void netif_dormant_off(struct net_device *dev) | 1138 | static inline void netif_dormant_off(struct net_device *dev) |
882 | { | 1139 | { |
883 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | 1140 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) |
884 | linkwatch_fire_event(dev); | 1141 | linkwatch_fire_event(dev); |
885 | } | 1142 | } |
886 | 1143 | ||
1144 | /** | ||
1145 | * netif_dormant - test if carrier present | ||
1146 | * @dev: network device | ||
1147 | * | ||
1148 | * Check if carrier is present on device | ||
1149 | */ | ||
887 | static inline int netif_dormant(const struct net_device *dev) | 1150 | static inline int netif_dormant(const struct net_device *dev) |
888 | { | 1151 | { |
889 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | 1152 | return test_bit(__LINK_STATE_DORMANT, &dev->state); |
890 | } | 1153 | } |
891 | 1154 | ||
892 | 1155 | ||
1156 | /** | ||
1157 | * netif_oper_up - test if device is operational | ||
1158 | * @dev: network device | ||
1159 | * | ||
1160 | * Check if carrier is operational | ||
1161 | */ | ||
893 | static inline int netif_oper_up(const struct net_device *dev) { | 1162 | static inline int netif_oper_up(const struct net_device *dev) { |
894 | return (dev->operstate == IF_OPER_UP || | 1163 | return (dev->operstate == IF_OPER_UP || |
895 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | 1164 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); |
896 | } | 1165 | } |
897 | 1166 | ||
898 | /* Hot-plugging. */ | 1167 | /** |
1168 | * netif_device_present - is device available or removed | ||
1169 | * @dev: network device | ||
1170 | * | ||
1171 | * Check if device has not been removed from system. | ||
1172 | */ | ||
899 | static inline int netif_device_present(struct net_device *dev) | 1173 | static inline int netif_device_present(struct net_device *dev) |
900 | { | 1174 | { |
901 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | 1175 | return test_bit(__LINK_STATE_PRESENT, &dev->state); |
@@ -955,46 +1229,38 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |||
955 | return (1 << debug_value) - 1; | 1229 | return (1 << debug_value) - 1; |
956 | } | 1230 | } |
957 | 1231 | ||
958 | /* Test if receive needs to be scheduled */ | ||
959 | static inline int __netif_rx_schedule_prep(struct net_device *dev) | ||
960 | { | ||
961 | return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
962 | } | ||
963 | |||
964 | /* Test if receive needs to be scheduled but only if up */ | 1232 | /* Test if receive needs to be scheduled but only if up */ |
965 | static inline int netif_rx_schedule_prep(struct net_device *dev) | 1233 | static inline int netif_rx_schedule_prep(struct net_device *dev, |
1234 | struct napi_struct *napi) | ||
966 | { | 1235 | { |
967 | return netif_running(dev) && __netif_rx_schedule_prep(dev); | 1236 | return netif_running(dev) && napi_schedule_prep(napi); |
968 | } | 1237 | } |
969 | 1238 | ||
970 | /* Add interface to tail of rx poll list. This assumes that _prep has | 1239 | /* Add interface to tail of rx poll list. This assumes that _prep has |
971 | * already been called and returned 1. | 1240 | * already been called and returned 1. |
972 | */ | 1241 | */ |
973 | 1242 | static inline void __netif_rx_schedule(struct net_device *dev, | |
974 | extern void __netif_rx_schedule(struct net_device *dev); | 1243 | struct napi_struct *napi) |
1244 | { | ||
1245 | dev_hold(dev); | ||
1246 | __napi_schedule(napi); | ||
1247 | } | ||
975 | 1248 | ||
976 | /* Try to reschedule poll. Called by irq handler. */ | 1249 | /* Try to reschedule poll. Called by irq handler. */ |
977 | 1250 | ||
978 | static inline void netif_rx_schedule(struct net_device *dev) | 1251 | static inline void netif_rx_schedule(struct net_device *dev, |
1252 | struct napi_struct *napi) | ||
979 | { | 1253 | { |
980 | if (netif_rx_schedule_prep(dev)) | 1254 | if (netif_rx_schedule_prep(dev, napi)) |
981 | __netif_rx_schedule(dev); | 1255 | __netif_rx_schedule(dev, napi); |
982 | } | 1256 | } |
983 | 1257 | ||
984 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). | 1258 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ |
985 | * Do not inline this? | 1259 | static inline int netif_rx_reschedule(struct net_device *dev, |
986 | */ | 1260 | struct napi_struct *napi) |
987 | static inline int netif_rx_reschedule(struct net_device *dev, int undo) | ||
988 | { | 1261 | { |
989 | if (netif_rx_schedule_prep(dev)) { | 1262 | if (napi_schedule_prep(napi)) { |
990 | unsigned long flags; | 1263 | __netif_rx_schedule(dev, napi); |
991 | |||
992 | dev->quota += undo; | ||
993 | |||
994 | local_irq_save(flags); | ||
995 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); | ||
996 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
997 | local_irq_restore(flags); | ||
998 | return 1; | 1264 | return 1; |
999 | } | 1265 | } |
1000 | return 0; | 1266 | return 0; |
@@ -1003,12 +1269,11 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) | |||
1003 | /* same as netif_rx_complete, except that local_irq_save(flags) | 1269 | /* same as netif_rx_complete, except that local_irq_save(flags) |
1004 | * has already been issued | 1270 | * has already been issued |
1005 | */ | 1271 | */ |
1006 | static inline void __netif_rx_complete(struct net_device *dev) | 1272 | static inline void __netif_rx_complete(struct net_device *dev, |
1273 | struct napi_struct *napi) | ||
1007 | { | 1274 | { |
1008 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); | 1275 | __napi_complete(napi); |
1009 | list_del(&dev->poll_list); | 1276 | dev_put(dev); |
1010 | smp_mb__before_clear_bit(); | ||
1011 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
1012 | } | 1277 | } |
1013 | 1278 | ||
1014 | /* Remove interface from poll list: it must be in the poll list | 1279 | /* Remove interface from poll list: it must be in the poll list |
@@ -1016,32 +1281,31 @@ static inline void __netif_rx_complete(struct net_device *dev) | |||
1016 | * it completes the work. The device cannot be out of poll list at this | 1281 | * it completes the work. The device cannot be out of poll list at this |
1017 | * moment, it is BUG(). | 1282 | * moment, it is BUG(). |
1018 | */ | 1283 | */ |
1019 | static inline void netif_rx_complete(struct net_device *dev) | 1284 | static inline void netif_rx_complete(struct net_device *dev, |
1285 | struct napi_struct *napi) | ||
1020 | { | 1286 | { |
1021 | unsigned long flags; | 1287 | unsigned long flags; |
1022 | 1288 | ||
1023 | local_irq_save(flags); | 1289 | local_irq_save(flags); |
1024 | __netif_rx_complete(dev); | 1290 | __netif_rx_complete(dev, napi); |
1025 | local_irq_restore(flags); | 1291 | local_irq_restore(flags); |
1026 | } | 1292 | } |
1027 | 1293 | ||
1028 | static inline void netif_poll_disable(struct net_device *dev) | 1294 | /** |
1029 | { | 1295 | * netif_tx_lock - grab network device transmit lock |
1030 | while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) | 1296 | * @dev: network device |
1031 | /* No hurry. */ | 1297 | * |
1032 | schedule_timeout_interruptible(1); | 1298 | * Get network device transmit lock |
1033 | } | 1299 | */ |
1034 | 1300 | static inline void __netif_tx_lock(struct net_device *dev, int cpu) | |
1035 | static inline void netif_poll_enable(struct net_device *dev) | ||
1036 | { | 1301 | { |
1037 | smp_mb__before_clear_bit(); | 1302 | spin_lock(&dev->_xmit_lock); |
1038 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | 1303 | dev->xmit_lock_owner = cpu; |
1039 | } | 1304 | } |
1040 | 1305 | ||
1041 | static inline void netif_tx_lock(struct net_device *dev) | 1306 | static inline void netif_tx_lock(struct net_device *dev) |
1042 | { | 1307 | { |
1043 | spin_lock(&dev->_xmit_lock); | 1308 | __netif_tx_lock(dev, smp_processor_id()); |
1044 | dev->xmit_lock_owner = smp_processor_id(); | ||
1045 | } | 1309 | } |
1046 | 1310 | ||
1047 | static inline void netif_tx_lock_bh(struct net_device *dev) | 1311 | static inline void netif_tx_lock_bh(struct net_device *dev) |
@@ -1070,6 +1334,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) | |||
1070 | spin_unlock_bh(&dev->_xmit_lock); | 1334 | spin_unlock_bh(&dev->_xmit_lock); |
1071 | } | 1335 | } |
1072 | 1336 | ||
1337 | #define HARD_TX_LOCK(dev, cpu) { \ | ||
1338 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | ||
1339 | __netif_tx_lock(dev, cpu); \ | ||
1340 | } \ | ||
1341 | } | ||
1342 | |||
1343 | #define HARD_TX_UNLOCK(dev) { \ | ||
1344 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | ||
1345 | netif_tx_unlock(dev); \ | ||
1346 | } \ | ||
1347 | } | ||
1348 | |||
1073 | static inline void netif_tx_disable(struct net_device *dev) | 1349 | static inline void netif_tx_disable(struct net_device *dev) |
1074 | { | 1350 | { |
1075 | netif_tx_lock_bh(dev); | 1351 | netif_tx_lock_bh(dev); |
@@ -1098,16 +1374,14 @@ extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all | |||
1098 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | 1374 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); |
1099 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); | 1375 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); |
1100 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | 1376 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); |
1101 | extern void dev_mc_discard(struct net_device *dev); | ||
1102 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); | 1377 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); |
1103 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | 1378 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); |
1104 | extern void __dev_addr_discard(struct dev_addr_list **list); | ||
1105 | extern void dev_set_promiscuity(struct net_device *dev, int inc); | 1379 | extern void dev_set_promiscuity(struct net_device *dev, int inc); |
1106 | extern void dev_set_allmulti(struct net_device *dev, int inc); | 1380 | extern void dev_set_allmulti(struct net_device *dev, int inc); |
1107 | extern void netdev_state_change(struct net_device *dev); | 1381 | extern void netdev_state_change(struct net_device *dev); |
1108 | extern void netdev_features_change(struct net_device *dev); | 1382 | extern void netdev_features_change(struct net_device *dev); |
1109 | /* Load a device via the kmod */ | 1383 | /* Load a device via the kmod */ |
1110 | extern void dev_load(const char *name); | 1384 | extern void dev_load(struct net *net, const char *name); |
1111 | extern void dev_mcast_init(void); | 1385 | extern void dev_mcast_init(void); |
1112 | extern int netdev_max_backlog; | 1386 | extern int netdev_max_backlog; |
1113 | extern int weight_p; | 1387 | extern int weight_p; |
@@ -1133,6 +1407,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); | |||
1133 | 1407 | ||
1134 | extern void linkwatch_run_queue(void); | 1408 | extern void linkwatch_run_queue(void); |
1135 | 1409 | ||
1410 | extern int netdev_compute_features(unsigned long all, unsigned long one); | ||
1411 | |||
1136 | static inline int net_gso_ok(int features, int gso_type) | 1412 | static inline int net_gso_ok(int features, int gso_type) |
1137 | { | 1413 | { |
1138 | int feature = gso_type << NETIF_F_GSO_SHIFT; | 1414 | int feature = gso_type << NETIF_F_GSO_SHIFT; |