aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/netpoll.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index de1b26aa5720..d1264e9a50a8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -124,6 +124,13 @@ static void poll_napi(struct netpoll *np)
124 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && 124 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
125 npinfo->poll_owner != smp_processor_id() && 125 npinfo->poll_owner != smp_processor_id() &&
126 spin_trylock(&npinfo->poll_lock)) { 126 spin_trylock(&npinfo->poll_lock)) {
127 /* When calling dev->poll from poll_napi, we may end up in
128 * netif_rx_complete. However, only the CPU to which the
129 * device was queued is allowed to remove it from poll_list.
130 * Setting POLL_LIST_FROZEN tells netif_rx_complete
131 * to leave the NAPI state alone.
132 */
133 set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
127 npinfo->rx_flags |= NETPOLL_RX_DROP; 134 npinfo->rx_flags |= NETPOLL_RX_DROP;
128 atomic_inc(&trapped); 135 atomic_inc(&trapped);
129 136
@@ -131,6 +138,7 @@ static void poll_napi(struct netpoll *np)
131 138
132 atomic_dec(&trapped); 139 atomic_dec(&trapped);
133 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 140 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
141 clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
134 spin_unlock(&npinfo->poll_lock); 142 spin_unlock(&npinfo->poll_lock);
135 } 143 }
136} 144}