summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2014-06-30 12:50:40 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-07 22:24:34 -0400
commit11ef7a8996d5d433c9cd75d80651297eccbf6d42 (patch)
treed0e9606987ba8a5d35429c0e61c9610ac7448eed /net
parent68b7107b62983f2cff0948292429d5f5999df096 (diff)
net: Performance fix for process_backlog
In process_backlog the input_pkt_queue is only checked once for new packets and quota is artificially reduced to reflect precisely the number of packets on the input_pkt_queue so that the loop exits appropriately. This patches changes the behavior to be more straightforward and less convoluted. Packets are processed until either the quota is met or there are no more packets to process. This patch seems to provide a small, but noticeable performance improvement. The performance improvement is a result of staying in the process_backlog loop longer which can reduce number of IPI's. Performance data using super_netperf TCP_RR with 200 flows: Before fix: 88.06% CPU utilization 125/190/309 90/95/99% latencies 1.46808e+06 tps 1145382 intrs.sec. With fix: 87.73% CPU utilization 122/183/296 90/95/99% latencies 1.4921e+06 tps 1021674.30 intrs./sec. Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 30eedf677913..77c19c7bb490 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4227,9 +4227,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
4227#endif 4227#endif
4228 napi->weight = weight_p; 4228 napi->weight = weight_p;
4229 local_irq_disable(); 4229 local_irq_disable();
4230 while (work < quota) { 4230 while (1) {
4231 struct sk_buff *skb; 4231 struct sk_buff *skb;
4232 unsigned int qlen;
4233 4232
4234 while ((skb = __skb_dequeue(&sd->process_queue))) { 4233 while ((skb = __skb_dequeue(&sd->process_queue))) {
4235 local_irq_enable(); 4234 local_irq_enable();
@@ -4243,24 +4242,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
4243 } 4242 }
4244 4243
4245 rps_lock(sd); 4244 rps_lock(sd);
4246 qlen = skb_queue_len(&sd->input_pkt_queue); 4245 if (skb_queue_empty(&sd->input_pkt_queue)) {
4247 if (qlen)
4248 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4249 &sd->process_queue);
4250
4251 if (qlen < quota - work) {
4252 /* 4246 /*
4253 * Inline a custom version of __napi_complete(). 4247 * Inline a custom version of __napi_complete().
4254 * only current cpu owns and manipulates this napi, 4248 * only current cpu owns and manipulates this napi,
4255 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4249 * and NAPI_STATE_SCHED is the only possible flag set
4256 * we can use a plain write instead of clear_bit(), 4250 * on backlog.
4251 * We can use a plain write instead of clear_bit(),
4257 * and we dont need an smp_mb() memory barrier. 4252 * and we dont need an smp_mb() memory barrier.
4258 */ 4253 */
4259 list_del(&napi->poll_list); 4254 list_del(&napi->poll_list);
4260 napi->state = 0; 4255 napi->state = 0;
4256 rps_unlock(sd);
4261 4257
4262 quota = work + qlen; 4258 break;
4263 } 4259 }
4260
4261 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4262 &sd->process_queue);
4264 rps_unlock(sd); 4263 rps_unlock(sd);
4265 } 4264 }
4266 local_irq_enable(); 4265 local_irq_enable();