diff options
-rw-r--r-- | include/linux/netdevice.h | 10 | ||||
-rw-r--r-- | net/core/netpoll.c | 8 |
2 files changed, 18 insertions, 0 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8590d685d935..79cc3dab4be7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -261,6 +261,8 @@ enum netdev_state_t | |||
261 | __LINK_STATE_LINKWATCH_PENDING, | 261 | __LINK_STATE_LINKWATCH_PENDING, |
262 | __LINK_STATE_DORMANT, | 262 | __LINK_STATE_DORMANT, |
263 | __LINK_STATE_QDISC_RUNNING, | 263 | __LINK_STATE_QDISC_RUNNING, |
264 | /* Set by the netpoll NAPI code */ | ||
265 | __LINK_STATE_POLL_LIST_FROZEN, | ||
264 | }; | 266 | }; |
265 | 267 | ||
266 | 268 | ||
@@ -1014,6 +1016,14 @@ static inline void netif_rx_complete(struct net_device *dev) | |||
1014 | { | 1016 | { |
1015 | unsigned long flags; | 1017 | unsigned long flags; |
1016 | 1018 | ||
1019 | #ifdef CONFIG_NETPOLL | ||
1020 | /* Prevent race with netpoll - yes, this is a kludge. | ||
1021 | * But at least it doesn't penalize the non-netpoll | ||
1022 | * code path. */ | ||
1023 | if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state)) | ||
1024 | return; | ||
1025 | #endif | ||
1026 | |||
1017 | local_irq_save(flags); | 1027 | local_irq_save(flags); |
1018 | __netif_rx_complete(dev); | 1028 | __netif_rx_complete(dev); |
1019 | local_irq_restore(flags); | 1029 | local_irq_restore(flags); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index de1b26aa5720..d1264e9a50a8 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -124,6 +124,13 @@ static void poll_napi(struct netpoll *np) | |||
124 | if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && | 124 | if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && |
125 | npinfo->poll_owner != smp_processor_id() && | 125 | npinfo->poll_owner != smp_processor_id() && |
126 | spin_trylock(&npinfo->poll_lock)) { | 126 | spin_trylock(&npinfo->poll_lock)) { |
127 | /* When calling dev->poll from poll_napi, we may end up in | ||
128 | * netif_rx_complete. However, only the CPU to which the | ||
129 | * device was queued is allowed to remove it from poll_list. | ||
130 | * Setting POLL_LIST_FROZEN tells netif_rx_complete | ||
131 | * to leave the NAPI state alone. | ||
132 | */ | ||
133 | set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state); | ||
127 | npinfo->rx_flags |= NETPOLL_RX_DROP; | 134 | npinfo->rx_flags |= NETPOLL_RX_DROP; |
128 | atomic_inc(&trapped); | 135 | atomic_inc(&trapped); |
129 | 136 | ||
@@ -131,6 +138,7 @@ static void poll_napi(struct netpoll *np) | |||
131 | 138 | ||
132 | atomic_dec(&trapped); | 139 | atomic_dec(&trapped); |
133 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; | 140 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; |
141 | clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state); | ||
134 | spin_unlock(&npinfo->poll_lock); | 142 | spin_unlock(&npinfo->poll_lock); |
135 | } | 143 | } |
136 | } | 144 | } |