aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/netdevice.h361
-rw-r--r--include/linux/netpoll.h55
2 files changed, 330 insertions, 86 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e679b2751665..b93575db8cce 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -31,6 +31,7 @@
31 31
32#ifdef __KERNEL__ 32#ifdef __KERNEL__
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/delay.h>
34#include <asm/atomic.h> 35#include <asm/atomic.h>
35#include <asm/cache.h> 36#include <asm/cache.h>
36#include <asm/byteorder.h> 37#include <asm/byteorder.h>
@@ -38,6 +39,7 @@
38#include <linux/device.h> 39#include <linux/device.h>
39#include <linux/percpu.h> 40#include <linux/percpu.h>
40#include <linux/dmaengine.h> 41#include <linux/dmaengine.h>
42#include <linux/workqueue.h>
41 43
42struct vlan_group; 44struct vlan_group;
43struct ethtool_ops; 45struct ethtool_ops;
@@ -258,7 +260,6 @@ enum netdev_state_t
258 __LINK_STATE_PRESENT, 260 __LINK_STATE_PRESENT,
259 __LINK_STATE_SCHED, 261 __LINK_STATE_SCHED,
260 __LINK_STATE_NOCARRIER, 262 __LINK_STATE_NOCARRIER,
261 __LINK_STATE_RX_SCHED,
262 __LINK_STATE_LINKWATCH_PENDING, 263 __LINK_STATE_LINKWATCH_PENDING,
263 __LINK_STATE_DORMANT, 264 __LINK_STATE_DORMANT,
264 __LINK_STATE_QDISC_RUNNING, 265 __LINK_STATE_QDISC_RUNNING,
@@ -278,6 +279,110 @@ struct netdev_boot_setup {
278extern int __init netdev_boot_setup(char *str); 279extern int __init netdev_boot_setup(char *str);
279 280
280/* 281/*
282 * Structure for NAPI scheduling similar to tasklet but with weighting
283 */
284struct napi_struct {
285 /* The poll_list must only be managed by the entity which
286 * changes the state of the NAPI_STATE_SCHED bit. This means
287 * whoever atomically sets that bit can add this napi_struct
288 * to the per-cpu poll_list, and whoever clears that bit
289 * can remove from the list right before clearing the bit.
290 */
291 struct list_head poll_list;
292
293 unsigned long state;
294 int weight;
295 int (*poll)(struct napi_struct *, int);
296#ifdef CONFIG_NETPOLL
297 spinlock_t poll_lock;
298 int poll_owner;
299 struct net_device *dev;
300 struct list_head dev_list;
301#endif
302};
303
304enum
305{
306 NAPI_STATE_SCHED, /* Poll is scheduled */
307};
308
309extern void FASTCALL(__napi_schedule(struct napi_struct *n));
310
311/**
312 * napi_schedule_prep - check if napi can be scheduled
313 * @n: napi context
314 *
315 * Test if NAPI routine is already running, and if not mark
316 * it as running. This is used as a condition variable
317 * insure only one NAPI poll instance runs
318 */
319static inline int napi_schedule_prep(struct napi_struct *n)
320{
321 return !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
322}
323
324/**
325 * napi_schedule - schedule NAPI poll
326 * @n: napi context
327 *
328 * Schedule NAPI poll routine to be called if it is not already
329 * running.
330 */
331static inline void napi_schedule(struct napi_struct *n)
332{
333 if (napi_schedule_prep(n))
334 __napi_schedule(n);
335}
336
337/**
338 * napi_complete - NAPI processing complete
339 * @n: napi context
340 *
341 * Mark NAPI processing as complete.
342 */
343static inline void __napi_complete(struct napi_struct *n)
344{
345 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
346 list_del(&n->poll_list);
347 smp_mb__before_clear_bit();
348 clear_bit(NAPI_STATE_SCHED, &n->state);
349}
350
351static inline void napi_complete(struct napi_struct *n)
352{
353 local_irq_disable();
354 __napi_complete(n);
355 local_irq_enable();
356}
357
358/**
359 * napi_disable - prevent NAPI from scheduling
360 * @n: napi context
361 *
362 * Stop NAPI from being scheduled on this context.
363 * Waits till any outstanding processing completes.
364 */
365static inline void napi_disable(struct napi_struct *n)
366{
367 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
368 msleep_interruptible(1);
369}
370
371/**
372 * napi_enable - enable NAPI scheduling
373 * @n: napi context
374 *
375 * Resume NAPI from being scheduled on this context.
376 * Must be paired with napi_disable.
377 */
378static inline void napi_enable(struct napi_struct *n)
379{
380 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
381 smp_mb__before_clear_bit();
382 clear_bit(NAPI_STATE_SCHED, &n->state);
383}
384
385/*
281 * The DEVICE structure. 386 * The DEVICE structure.
282 * Actually, this whole structure is a big mistake. It mixes I/O 387 * Actually, this whole structure is a big mistake. It mixes I/O
283 * data with strictly "high-level" data, and it has to know about 388 * data with strictly "high-level" data, and it has to know about
@@ -319,6 +424,9 @@ struct net_device
319 unsigned long state; 424 unsigned long state;
320 425
321 struct list_head dev_list; 426 struct list_head dev_list;
427#ifdef CONFIG_NETPOLL
428 struct list_head napi_list;
429#endif
322 430
323 /* The device initialization function. Called only once. */ 431 /* The device initialization function. Called only once. */
324 int (*init)(struct net_device *dev); 432 int (*init)(struct net_device *dev);
@@ -430,12 +538,6 @@ struct net_device
430/* 538/*
431 * Cache line mostly used on receive path (including eth_type_trans()) 539 * Cache line mostly used on receive path (including eth_type_trans())
432 */ 540 */
433 struct list_head poll_list ____cacheline_aligned_in_smp;
434 /* Link to poll list */
435
436 int (*poll) (struct net_device *dev, int *quota);
437 int quota;
438 int weight;
439 unsigned long last_rx; /* Time of last Rx */ 541 unsigned long last_rx; /* Time of last Rx */
440 /* Interface address info used in eth_type_trans() */ 542 /* Interface address info used in eth_type_trans() */
441 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast 543 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
@@ -582,6 +684,12 @@ struct net_device
582#define NETDEV_ALIGN 32 684#define NETDEV_ALIGN 32
583#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) 685#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
584 686
687/**
688 * netdev_priv - access network device private data
689 * @dev: network device
690 *
691 * Get network device private data
692 */
585static inline void *netdev_priv(const struct net_device *dev) 693static inline void *netdev_priv(const struct net_device *dev)
586{ 694{
587 return dev->priv; 695 return dev->priv;
@@ -593,6 +701,23 @@ static inline void *netdev_priv(const struct net_device *dev)
593 */ 701 */
594#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 702#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
595 703
704static inline void netif_napi_add(struct net_device *dev,
705 struct napi_struct *napi,
706 int (*poll)(struct napi_struct *, int),
707 int weight)
708{
709 INIT_LIST_HEAD(&napi->poll_list);
710 napi->poll = poll;
711 napi->weight = weight;
712#ifdef CONFIG_NETPOLL
713 napi->dev = dev;
714 list_add(&napi->dev_list, &dev->napi_list);
715 spin_lock_init(&napi->poll_lock);
716 napi->poll_owner = -1;
717#endif
718 set_bit(NAPI_STATE_SCHED, &napi->state);
719}
720
596struct packet_type { 721struct packet_type {
597 __be16 type; /* This is really htons(ether_type). */ 722 __be16 type; /* This is really htons(ether_type). */
598 struct net_device *dev; /* NULL is wildcarded here */ 723 struct net_device *dev; /* NULL is wildcarded here */
@@ -678,7 +803,6 @@ static inline int unregister_gifconf(unsigned int family)
678 * Incoming packets are placed on per-cpu queues so that 803 * Incoming packets are placed on per-cpu queues so that
679 * no locking is needed. 804 * no locking is needed.
680 */ 805 */
681
682struct softnet_data 806struct softnet_data
683{ 807{
684 struct net_device *output_queue; 808 struct net_device *output_queue;
@@ -686,7 +810,7 @@ struct softnet_data
686 struct list_head poll_list; 810 struct list_head poll_list;
687 struct sk_buff *completion_queue; 811 struct sk_buff *completion_queue;
688 812
689 struct net_device backlog_dev; /* Sorry. 8) */ 813 struct napi_struct backlog;
690#ifdef CONFIG_NET_DMA 814#ifdef CONFIG_NET_DMA
691 struct dma_chan *net_dma; 815 struct dma_chan *net_dma;
692#endif 816#endif
@@ -704,11 +828,24 @@ static inline void netif_schedule(struct net_device *dev)
704 __netif_schedule(dev); 828 __netif_schedule(dev);
705} 829}
706 830
831/**
832 * netif_start_queue - allow transmit
833 * @dev: network device
834 *
835 * Allow upper layers to call the device hard_start_xmit routine.
836 */
707static inline void netif_start_queue(struct net_device *dev) 837static inline void netif_start_queue(struct net_device *dev)
708{ 838{
709 clear_bit(__LINK_STATE_XOFF, &dev->state); 839 clear_bit(__LINK_STATE_XOFF, &dev->state);
710} 840}
711 841
842/**
843 * netif_wake_queue - restart transmit
844 * @dev: network device
845 *
846 * Allow upper layers to call the device hard_start_xmit routine.
847 * Used for flow control when transmit resources are available.
848 */
712static inline void netif_wake_queue(struct net_device *dev) 849static inline void netif_wake_queue(struct net_device *dev)
713{ 850{
714#ifdef CONFIG_NETPOLL_TRAP 851#ifdef CONFIG_NETPOLL_TRAP
@@ -721,16 +858,35 @@ static inline void netif_wake_queue(struct net_device *dev)
721 __netif_schedule(dev); 858 __netif_schedule(dev);
722} 859}
723 860
861/**
862 * netif_stop_queue - stop transmitted packets
863 * @dev: network device
864 *
865 * Stop upper layers calling the device hard_start_xmit routine.
866 * Used for flow control when transmit resources are unavailable.
867 */
724static inline void netif_stop_queue(struct net_device *dev) 868static inline void netif_stop_queue(struct net_device *dev)
725{ 869{
726 set_bit(__LINK_STATE_XOFF, &dev->state); 870 set_bit(__LINK_STATE_XOFF, &dev->state);
727} 871}
728 872
873/**
874 * netif_queue_stopped - test if transmit queue is flowblocked
875 * @dev: network device
876 *
877 * Test if transmit queue on device is currently unable to send.
878 */
729static inline int netif_queue_stopped(const struct net_device *dev) 879static inline int netif_queue_stopped(const struct net_device *dev)
730{ 880{
731 return test_bit(__LINK_STATE_XOFF, &dev->state); 881 return test_bit(__LINK_STATE_XOFF, &dev->state);
732} 882}
733 883
884/**
885 * netif_running - test if up
886 * @dev: network device
887 *
888 * Test if the device has been brought up.
889 */
734static inline int netif_running(const struct net_device *dev) 890static inline int netif_running(const struct net_device *dev)
735{ 891{
736 return test_bit(__LINK_STATE_START, &dev->state); 892 return test_bit(__LINK_STATE_START, &dev->state);
@@ -742,6 +898,14 @@ static inline int netif_running(const struct net_device *dev)
742 * done at the overall netdevice level. 898 * done at the overall netdevice level.
743 * Also test the device if we're multiqueue. 899 * Also test the device if we're multiqueue.
744 */ 900 */
901
902/**
903 * netif_start_subqueue - allow sending packets on subqueue
904 * @dev: network device
905 * @queue_index: sub queue index
906 *
907 * Start individual transmit queue of a device with multiple transmit queues.
908 */
745static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 909static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
746{ 910{
747#ifdef CONFIG_NETDEVICES_MULTIQUEUE 911#ifdef CONFIG_NETDEVICES_MULTIQUEUE
@@ -749,6 +913,13 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
749#endif 913#endif
750} 914}
751 915
916/**
917 * netif_stop_subqueue - stop sending packets on subqueue
918 * @dev: network device
919 * @queue_index: sub queue index
920 *
921 * Stop individual transmit queue of a device with multiple transmit queues.
922 */
752static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 923static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
753{ 924{
754#ifdef CONFIG_NETDEVICES_MULTIQUEUE 925#ifdef CONFIG_NETDEVICES_MULTIQUEUE
@@ -760,6 +931,13 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
760#endif 931#endif
761} 932}
762 933
934/**
935 * netif_subqueue_stopped - test status of subqueue
936 * @dev: network device
937 * @queue_index: sub queue index
938 *
939 * Check individual transmit queue of a device with multiple transmit queues.
940 */
763static inline int netif_subqueue_stopped(const struct net_device *dev, 941static inline int netif_subqueue_stopped(const struct net_device *dev,
764 u16 queue_index) 942 u16 queue_index)
765{ 943{
@@ -771,6 +949,14 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
771#endif 949#endif
772} 950}
773 951
952
953/**
954 * netif_wake_subqueue - allow sending packets on subqueue
955 * @dev: network device
956 * @queue_index: sub queue index
957 *
958 * Resume individual transmit queue of a device with multiple transmit queues.
959 */
774static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 960static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
775{ 961{
776#ifdef CONFIG_NETDEVICES_MULTIQUEUE 962#ifdef CONFIG_NETDEVICES_MULTIQUEUE
@@ -784,6 +970,13 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
784#endif 970#endif
785} 971}
786 972
973/**
974 * netif_is_multiqueue - test if device has multiple transmit queues
975 * @dev: network device
976 *
977 * Check if device has multiple transmit queues
978 * Always falls if NETDEVICE_MULTIQUEUE is not configured
979 */
787static inline int netif_is_multiqueue(const struct net_device *dev) 980static inline int netif_is_multiqueue(const struct net_device *dev)
788{ 981{
789#ifdef CONFIG_NETDEVICES_MULTIQUEUE 982#ifdef CONFIG_NETDEVICES_MULTIQUEUE
@@ -796,20 +989,7 @@ static inline int netif_is_multiqueue(const struct net_device *dev)
796/* Use this variant when it is known for sure that it 989/* Use this variant when it is known for sure that it
797 * is executing from interrupt context. 990 * is executing from interrupt context.
798 */ 991 */
799static inline void dev_kfree_skb_irq(struct sk_buff *skb) 992extern void dev_kfree_skb_irq(struct sk_buff *skb);
800{
801 if (atomic_dec_and_test(&skb->users)) {
802 struct softnet_data *sd;
803 unsigned long flags;
804
805 local_irq_save(flags);
806 sd = &__get_cpu_var(softnet_data);
807 skb->next = sd->completion_queue;
808 sd->completion_queue = skb;
809 raise_softirq_irqoff(NET_TX_SOFTIRQ);
810 local_irq_restore(flags);
811 }
812}
813 993
814/* Use this variant in places where it could be invoked 994/* Use this variant in places where it could be invoked
815 * either from interrupt or non-interrupt context. 995 * either from interrupt or non-interrupt context.
@@ -833,18 +1013,28 @@ extern int dev_set_mac_address(struct net_device *,
833extern int dev_hard_start_xmit(struct sk_buff *skb, 1013extern int dev_hard_start_xmit(struct sk_buff *skb,
834 struct net_device *dev); 1014 struct net_device *dev);
835 1015
836extern void dev_init(void);
837
838extern int netdev_budget; 1016extern int netdev_budget;
839 1017
840/* Called by rtnetlink.c:rtnl_unlock() */ 1018/* Called by rtnetlink.c:rtnl_unlock() */
841extern void netdev_run_todo(void); 1019extern void netdev_run_todo(void);
842 1020
1021/**
1022 * dev_put - release reference to device
1023 * @dev: network device
1024 *
1025 * Hold reference to device to keep it from being freed.
1026 */
843static inline void dev_put(struct net_device *dev) 1027static inline void dev_put(struct net_device *dev)
844{ 1028{
845 atomic_dec(&dev->refcnt); 1029 atomic_dec(&dev->refcnt);
846} 1030}
847 1031
1032/**
1033 * dev_hold - get reference to device
1034 * @dev: network device
1035 *
1036 * Release reference to device to allow it to be freed.
1037 */
848static inline void dev_hold(struct net_device *dev) 1038static inline void dev_hold(struct net_device *dev)
849{ 1039{
850 atomic_inc(&dev->refcnt); 1040 atomic_inc(&dev->refcnt);
@@ -861,6 +1051,12 @@ static inline void dev_hold(struct net_device *dev)
861 1051
862extern void linkwatch_fire_event(struct net_device *dev); 1052extern void linkwatch_fire_event(struct net_device *dev);
863 1053
1054/**
1055 * netif_carrier_ok - test if carrier present
1056 * @dev: network device
1057 *
1058 * Check if carrier is present on device
1059 */
864static inline int netif_carrier_ok(const struct net_device *dev) 1060static inline int netif_carrier_ok(const struct net_device *dev)
865{ 1061{
866 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 1062 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
@@ -872,30 +1068,66 @@ extern void netif_carrier_on(struct net_device *dev);
872 1068
873extern void netif_carrier_off(struct net_device *dev); 1069extern void netif_carrier_off(struct net_device *dev);
874 1070
1071/**
1072 * netif_dormant_on - mark device as dormant.
1073 * @dev: network device
1074 *
1075 * Mark device as dormant (as per RFC2863).
1076 *
1077 * The dormant state indicates that the relevant interface is not
1078 * actually in a condition to pass packets (i.e., it is not 'up') but is
1079 * in a "pending" state, waiting for some external event. For "on-
1080 * demand" interfaces, this new state identifies the situation where the
1081 * interface is waiting for events to place it in the up state.
1082 *
1083 */
875static inline void netif_dormant_on(struct net_device *dev) 1084static inline void netif_dormant_on(struct net_device *dev)
876{ 1085{
877 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 1086 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
878 linkwatch_fire_event(dev); 1087 linkwatch_fire_event(dev);
879} 1088}
880 1089
1090/**
1091 * netif_dormant_off - set device as not dormant.
1092 * @dev: network device
1093 *
1094 * Device is not in dormant state.
1095 */
881static inline void netif_dormant_off(struct net_device *dev) 1096static inline void netif_dormant_off(struct net_device *dev)
882{ 1097{
883 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 1098 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
884 linkwatch_fire_event(dev); 1099 linkwatch_fire_event(dev);
885} 1100}
886 1101
1102/**
1103 * netif_dormant - test if carrier present
1104 * @dev: network device
1105 *
1106 * Check if carrier is present on device
1107 */
887static inline int netif_dormant(const struct net_device *dev) 1108static inline int netif_dormant(const struct net_device *dev)
888{ 1109{
889 return test_bit(__LINK_STATE_DORMANT, &dev->state); 1110 return test_bit(__LINK_STATE_DORMANT, &dev->state);
890} 1111}
891 1112
892 1113
1114/**
1115 * netif_oper_up - test if device is operational
1116 * @dev: network device
1117 *
1118 * Check if carrier is operational
1119 */
893static inline int netif_oper_up(const struct net_device *dev) { 1120static inline int netif_oper_up(const struct net_device *dev) {
894 return (dev->operstate == IF_OPER_UP || 1121 return (dev->operstate == IF_OPER_UP ||
895 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 1122 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
896} 1123}
897 1124
898/* Hot-plugging. */ 1125/**
1126 * netif_device_present - is device available or removed
1127 * @dev: network device
1128 *
1129 * Check if device has not been removed from system.
1130 */
899static inline int netif_device_present(struct net_device *dev) 1131static inline int netif_device_present(struct net_device *dev)
900{ 1132{
901 return test_bit(__LINK_STATE_PRESENT, &dev->state); 1133 return test_bit(__LINK_STATE_PRESENT, &dev->state);
@@ -955,46 +1187,38 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
955 return (1 << debug_value) - 1; 1187 return (1 << debug_value) - 1;
956} 1188}
957 1189
958/* Test if receive needs to be scheduled */
959static inline int __netif_rx_schedule_prep(struct net_device *dev)
960{
961 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
962}
963
964/* Test if receive needs to be scheduled but only if up */ 1190/* Test if receive needs to be scheduled but only if up */
965static inline int netif_rx_schedule_prep(struct net_device *dev) 1191static inline int netif_rx_schedule_prep(struct net_device *dev,
1192 struct napi_struct *napi)
966{ 1193{
967 return netif_running(dev) && __netif_rx_schedule_prep(dev); 1194 return netif_running(dev) && napi_schedule_prep(napi);
968} 1195}
969 1196
970/* Add interface to tail of rx poll list. This assumes that _prep has 1197/* Add interface to tail of rx poll list. This assumes that _prep has
971 * already been called and returned 1. 1198 * already been called and returned 1.
972 */ 1199 */
973 1200static inline void __netif_rx_schedule(struct net_device *dev,
974extern void __netif_rx_schedule(struct net_device *dev); 1201 struct napi_struct *napi)
1202{
1203 dev_hold(dev);
1204 __napi_schedule(napi);
1205}
975 1206
976/* Try to reschedule poll. Called by irq handler. */ 1207/* Try to reschedule poll. Called by irq handler. */
977 1208
978static inline void netif_rx_schedule(struct net_device *dev) 1209static inline void netif_rx_schedule(struct net_device *dev,
1210 struct napi_struct *napi)
979{ 1211{
980 if (netif_rx_schedule_prep(dev)) 1212 if (netif_rx_schedule_prep(dev, napi))
981 __netif_rx_schedule(dev); 1213 __netif_rx_schedule(dev, napi);
982} 1214}
983 1215
984/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). 1216/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
985 * Do not inline this? 1217static inline int netif_rx_reschedule(struct net_device *dev,
986 */ 1218 struct napi_struct *napi)
987static inline int netif_rx_reschedule(struct net_device *dev, int undo)
988{ 1219{
989 if (netif_rx_schedule_prep(dev)) { 1220 if (napi_schedule_prep(napi)) {
990 unsigned long flags; 1221 __netif_rx_schedule(dev, napi);
991
992 dev->quota += undo;
993
994 local_irq_save(flags);
995 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
996 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
997 local_irq_restore(flags);
998 return 1; 1222 return 1;
999 } 1223 }
1000 return 0; 1224 return 0;
@@ -1003,12 +1227,11 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo)
1003/* same as netif_rx_complete, except that local_irq_save(flags) 1227/* same as netif_rx_complete, except that local_irq_save(flags)
1004 * has already been issued 1228 * has already been issued
1005 */ 1229 */
1006static inline void __netif_rx_complete(struct net_device *dev) 1230static inline void __netif_rx_complete(struct net_device *dev,
1231 struct napi_struct *napi)
1007{ 1232{
1008 BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); 1233 __napi_complete(napi);
1009 list_del(&dev->poll_list); 1234 dev_put(dev);
1010 smp_mb__before_clear_bit();
1011 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
1012} 1235}
1013 1236
1014/* Remove interface from poll list: it must be in the poll list 1237/* Remove interface from poll list: it must be in the poll list
@@ -1016,28 +1239,22 @@ static inline void __netif_rx_complete(struct net_device *dev)
1016 * it completes the work. The device cannot be out of poll list at this 1239 * it completes the work. The device cannot be out of poll list at this
1017 * moment, it is BUG(). 1240 * moment, it is BUG().
1018 */ 1241 */
1019static inline void netif_rx_complete(struct net_device *dev) 1242static inline void netif_rx_complete(struct net_device *dev,
1243 struct napi_struct *napi)
1020{ 1244{
1021 unsigned long flags; 1245 unsigned long flags;
1022 1246
1023 local_irq_save(flags); 1247 local_irq_save(flags);
1024 __netif_rx_complete(dev); 1248 __netif_rx_complete(dev, napi);
1025 local_irq_restore(flags); 1249 local_irq_restore(flags);
1026} 1250}
1027 1251
1028static inline void netif_poll_disable(struct net_device *dev) 1252/**
1029{ 1253 * netif_tx_lock - grab network device transmit lock
1030 while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) 1254 * @dev: network device
1031 /* No hurry. */ 1255 *
1032 schedule_timeout_interruptible(1); 1256 * Get network device transmit lock
1033} 1257 */
1034
1035static inline void netif_poll_enable(struct net_device *dev)
1036{
1037 smp_mb__before_clear_bit();
1038 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
1039}
1040
1041static inline void netif_tx_lock(struct net_device *dev) 1258static inline void netif_tx_lock(struct net_device *dev)
1042{ 1259{
1043 spin_lock(&dev->_xmit_lock); 1260 spin_lock(&dev->_xmit_lock);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 29930b71a9aa..08dcc39ec18d 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -25,8 +25,6 @@ struct netpoll {
25 25
26struct netpoll_info { 26struct netpoll_info {
27 atomic_t refcnt; 27 atomic_t refcnt;
28 spinlock_t poll_lock;
29 int poll_owner;
30 int rx_flags; 28 int rx_flags;
31 spinlock_t rx_lock; 29 spinlock_t rx_lock;
32 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 30 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
@@ -64,32 +62,61 @@ static inline int netpoll_rx(struct sk_buff *skb)
64 return ret; 62 return ret;
65} 63}
66 64
67static inline void *netpoll_poll_lock(struct net_device *dev) 65static inline int netpoll_receive_skb(struct sk_buff *skb)
68{ 66{
67 if (!list_empty(&skb->dev->napi_list))
68 return netpoll_rx(skb);
69 return 0;
70}
71
72static inline void *netpoll_poll_lock(struct napi_struct *napi)
73{
74 struct net_device *dev = napi->dev;
75
69 rcu_read_lock(); /* deal with race on ->npinfo */ 76 rcu_read_lock(); /* deal with race on ->npinfo */
70 if (dev->npinfo) { 77 if (dev && dev->npinfo) {
71 spin_lock(&dev->npinfo->poll_lock); 78 spin_lock(&napi->poll_lock);
72 dev->npinfo->poll_owner = smp_processor_id(); 79 napi->poll_owner = smp_processor_id();
73 return dev->npinfo; 80 return napi;
74 } 81 }
75 return NULL; 82 return NULL;
76} 83}
77 84
78static inline void netpoll_poll_unlock(void *have) 85static inline void netpoll_poll_unlock(void *have)
79{ 86{
80 struct netpoll_info *npi = have; 87 struct napi_struct *napi = have;
81 88
82 if (npi) { 89 if (napi) {
83 npi->poll_owner = -1; 90 napi->poll_owner = -1;
84 spin_unlock(&npi->poll_lock); 91 spin_unlock(&napi->poll_lock);
85 } 92 }
86 rcu_read_unlock(); 93 rcu_read_unlock();
87} 94}
88 95
96static inline void netpoll_netdev_init(struct net_device *dev)
97{
98 INIT_LIST_HEAD(&dev->napi_list);
99}
100
89#else 101#else
90#define netpoll_rx(a) 0 102static inline int netpoll_rx(struct sk_buff *skb)
91#define netpoll_poll_lock(a) NULL 103{
92#define netpoll_poll_unlock(a) 104 return 0;
105}
106static inline int netpoll_receive_skb(struct sk_buff *skb)
107{
108 return 0;
109}
110static inline void *netpoll_poll_lock(struct napi_struct *napi)
111{
112 return NULL;
113}
114static inline void netpoll_poll_unlock(void *have)
115{
116}
117static inline void netpoll_netdev_init(struct net_device *dev)
118{
119}
93#endif 120#endif
94 121
95#endif 122#endif