diff options
author | Eric Dumazet <edumazet@google.com> | 2015-01-15 20:04:22 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-16 01:02:42 -0500 |
commit | ac64da0b83d82abe62f78b3d0e21cca31aea24fa (patch) | |
tree | 1cacda83c251cfdc161757317dca62fd6fb16a50 /net | |
parent | 57d737c5e21cd26f63272aa4b0e34680788e12ed (diff) |
net: rps: fix cpu unplug
softnet_data.input_pkt_queue is protected by a spinlock that
we must hold when transferring packets from victim queue to an active
one. This is because other cpus could still be trying to enqueue packets
into victim queue.
A second problem is that when we transfert the NAPI poll_list from
victim to current cpu, we absolutely need to special case the percpu
backlog, because we do not want to add complex locking to protect
process_queue : Only owner cpu is allowed to manipulate it, unless cpu
is offline.
Based on initial patch from Prasad Sodagudi & Subash Abhinov
Kasiviswanathan.
This version is better because we do not slow down packet processing,
only make migration safer.
Reported-by: Prasad Sodagudi <psodagud@codeaurora.org>
Reported-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 20 |
1 files changed, 15 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 683d493aa1bf..171420e75b03 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -7072,10 +7072,20 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
7072 | oldsd->output_queue = NULL; | 7072 | oldsd->output_queue = NULL; |
7073 | oldsd->output_queue_tailp = &oldsd->output_queue; | 7073 | oldsd->output_queue_tailp = &oldsd->output_queue; |
7074 | } | 7074 | } |
7075 | /* Append NAPI poll list from offline CPU. */ | 7075 | /* Append NAPI poll list from offline CPU, with one exception : |
7076 | if (!list_empty(&oldsd->poll_list)) { | 7076 | * process_backlog() must be called by cpu owning percpu backlog. |
7077 | list_splice_init(&oldsd->poll_list, &sd->poll_list); | 7077 | * We properly handle process_queue & input_pkt_queue later. |
7078 | raise_softirq_irqoff(NET_RX_SOFTIRQ); | 7078 | */ |
7079 | while (!list_empty(&oldsd->poll_list)) { | ||
7080 | struct napi_struct *napi = list_first_entry(&oldsd->poll_list, | ||
7081 | struct napi_struct, | ||
7082 | poll_list); | ||
7083 | |||
7084 | list_del_init(&napi->poll_list); | ||
7085 | if (napi->poll == process_backlog) | ||
7086 | napi->state = 0; | ||
7087 | else | ||
7088 | ____napi_schedule(sd, napi); | ||
7079 | } | 7089 | } |
7080 | 7090 | ||
7081 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 7091 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
@@ -7086,7 +7096,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
7086 | netif_rx_internal(skb); | 7096 | netif_rx_internal(skb); |
7087 | input_queue_head_incr(oldsd); | 7097 | input_queue_head_incr(oldsd); |
7088 | } | 7098 | } |
7089 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { | 7099 | while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { |
7090 | netif_rx_internal(skb); | 7100 | netif_rx_internal(skb); |
7091 | input_queue_head_incr(oldsd); | 7101 | input_queue_head_incr(oldsd); |
7092 | } | 7102 | } |