aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChangli Gao <xiaosuo@gmail.com>2010-05-02 01:42:16 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-03 01:26:57 -0400
commitdee42870a423ad485129f43cddfe7275479f11d8 (patch)
treee30433d3b55ee248eb672765fe6705be160d882c
parent7ef527377b88ff05fb122a47619ea506c631c914 (diff)
net: fix softnet_stat
Per cpu variable softnet_data.total was shared between IRQ and SoftIRQ context without any protection. And enqueue_to_backlog should update the netdev_rx_stat of the target CPU. This patch renames softnet_data.total to softnet_data.processed: the number of packets processed in uppper levels(IP stacks). softnet_stat data is moved into softnet_data. Signed-off-by: Changli Gao <xiaosuo@gmail.com> ---- include/linux/netdevice.h | 17 +++++++---------- net/core/dev.c | 26 ++++++++++++-------------- net/sched/sch_generic.c | 2 +- 3 files changed, 20 insertions(+), 25 deletions(-) Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--net/core/dev.c26
-rw-r--r--net/sched/sch_generic.c2
3 files changed, 20 insertions, 25 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 40d4c20d034b..c39938f8a8d8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -218,16 +218,6 @@ struct neighbour;
218struct neigh_parms; 218struct neigh_parms;
219struct sk_buff; 219struct sk_buff;
220 220
221struct netif_rx_stats {
222 unsigned total;
223 unsigned dropped;
224 unsigned time_squeeze;
225 unsigned cpu_collision;
226 unsigned received_rps;
227};
228
229DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
230
231struct netdev_hw_addr { 221struct netdev_hw_addr {
232 struct list_head list; 222 struct list_head list;
233 unsigned char addr[MAX_ADDR_LEN]; 223 unsigned char addr[MAX_ADDR_LEN];
@@ -1390,6 +1380,12 @@ struct softnet_data {
1390 struct sk_buff *completion_queue; 1380 struct sk_buff *completion_queue;
1391 struct sk_buff_head process_queue; 1381 struct sk_buff_head process_queue;
1392 1382
1383 /* stats */
1384 unsigned processed;
1385 unsigned time_squeeze;
1386 unsigned cpu_collision;
1387 unsigned received_rps;
1388
1393#ifdef CONFIG_RPS 1389#ifdef CONFIG_RPS
1394 struct softnet_data *rps_ipi_list; 1390 struct softnet_data *rps_ipi_list;
1395 1391
@@ -1399,6 +1395,7 @@ struct softnet_data {
1399 unsigned int cpu; 1395 unsigned int cpu;
1400 unsigned int input_queue_head; 1396 unsigned int input_queue_head;
1401#endif 1397#endif
1398 unsigned dropped;
1402 struct sk_buff_head input_pkt_queue; 1399 struct sk_buff_head input_pkt_queue;
1403 struct napi_struct backlog; 1400 struct napi_struct backlog;
1404}; 1401};
diff --git a/net/core/dev.c b/net/core/dev.c
index 100dcbd29739..36d53be4fca6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2205,8 +2205,6 @@ int netdev_max_backlog __read_mostly = 1000;
2205int netdev_budget __read_mostly = 300; 2205int netdev_budget __read_mostly = 300;
2206int weight_p __read_mostly = 64; /* old backlog weight */ 2206int weight_p __read_mostly = 64; /* old backlog weight */
2207 2207
2208DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2209
2210#ifdef CONFIG_RPS 2208#ifdef CONFIG_RPS
2211 2209
2212/* One global table that all flow-based protocols share. */ 2210/* One global table that all flow-based protocols share. */
@@ -2366,7 +2364,7 @@ static void rps_trigger_softirq(void *data)
2366 struct softnet_data *sd = data; 2364 struct softnet_data *sd = data;
2367 2365
2368 __napi_schedule(&sd->backlog); 2366 __napi_schedule(&sd->backlog);
2369 __get_cpu_var(netdev_rx_stat).received_rps++; 2367 sd->received_rps++;
2370} 2368}
2371 2369
2372#endif /* CONFIG_RPS */ 2370#endif /* CONFIG_RPS */
@@ -2405,7 +2403,6 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2405 sd = &per_cpu(softnet_data, cpu); 2403 sd = &per_cpu(softnet_data, cpu);
2406 2404
2407 local_irq_save(flags); 2405 local_irq_save(flags);
2408 __get_cpu_var(netdev_rx_stat).total++;
2409 2406
2410 rps_lock(sd); 2407 rps_lock(sd);
2411 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { 2408 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
@@ -2429,9 +2426,9 @@ enqueue:
2429 goto enqueue; 2426 goto enqueue;
2430 } 2427 }
2431 2428
2429 sd->dropped++;
2432 rps_unlock(sd); 2430 rps_unlock(sd);
2433 2431
2434 __get_cpu_var(netdev_rx_stat).dropped++;
2435 local_irq_restore(flags); 2432 local_irq_restore(flags);
2436 2433
2437 kfree_skb(skb); 2434 kfree_skb(skb);
@@ -2806,7 +2803,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
2806 skb->dev = master; 2803 skb->dev = master;
2807 } 2804 }
2808 2805
2809 __get_cpu_var(netdev_rx_stat).total++; 2806 __get_cpu_var(softnet_data).processed++;
2810 2807
2811 skb_reset_network_header(skb); 2808 skb_reset_network_header(skb);
2812 skb_reset_transport_header(skb); 2809 skb_reset_transport_header(skb);
@@ -3490,7 +3487,7 @@ out:
3490 return; 3487 return;
3491 3488
3492softnet_break: 3489softnet_break:
3493 __get_cpu_var(netdev_rx_stat).time_squeeze++; 3490 sd->time_squeeze++;
3494 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3491 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3495 goto out; 3492 goto out;
3496} 3493}
@@ -3691,17 +3688,17 @@ static int dev_seq_show(struct seq_file *seq, void *v)
3691 return 0; 3688 return 0;
3692} 3689}
3693 3690
3694static struct netif_rx_stats *softnet_get_online(loff_t *pos) 3691static struct softnet_data *softnet_get_online(loff_t *pos)
3695{ 3692{
3696 struct netif_rx_stats *rc = NULL; 3693 struct softnet_data *sd = NULL;
3697 3694
3698 while (*pos < nr_cpu_ids) 3695 while (*pos < nr_cpu_ids)
3699 if (cpu_online(*pos)) { 3696 if (cpu_online(*pos)) {
3700 rc = &per_cpu(netdev_rx_stat, *pos); 3697 sd = &per_cpu(softnet_data, *pos);
3701 break; 3698 break;
3702 } else 3699 } else
3703 ++*pos; 3700 ++*pos;
3704 return rc; 3701 return sd;
3705} 3702}
3706 3703
3707static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) 3704static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3721,12 +3718,12 @@ static void softnet_seq_stop(struct seq_file *seq, void *v)
3721 3718
3722static int softnet_seq_show(struct seq_file *seq, void *v) 3719static int softnet_seq_show(struct seq_file *seq, void *v)
3723{ 3720{
3724 struct netif_rx_stats *s = v; 3721 struct softnet_data *sd = v;
3725 3722
3726 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 3723 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3727 s->total, s->dropped, s->time_squeeze, 0, 3724 sd->processed, sd->dropped, sd->time_squeeze, 0,
3728 0, 0, 0, 0, /* was fastroute */ 3725 0, 0, 0, 0, /* was fastroute */
3729 s->cpu_collision, s->received_rps); 3726 sd->cpu_collision, sd->received_rps);
3730 return 0; 3727 return 0;
3731} 3728}
3732 3729
@@ -5869,6 +5866,7 @@ static int __init net_dev_init(void)
5869 for_each_possible_cpu(i) { 5866 for_each_possible_cpu(i) {
5870 struct softnet_data *sd = &per_cpu(softnet_data, i); 5867 struct softnet_data *sd = &per_cpu(softnet_data, i);
5871 5868
5869 memset(sd, 0, sizeof(*sd));
5872 skb_queue_head_init(&sd->input_pkt_queue); 5870 skb_queue_head_init(&sd->input_pkt_queue);
5873 skb_queue_head_init(&sd->process_queue); 5871 skb_queue_head_init(&sd->process_queue);
5874 sd->completion_queue = NULL; 5872 sd->completion_queue = NULL;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index aeddabfb8e4e..a969b111bd76 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -94,7 +94,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
94 * Another cpu is holding lock, requeue & delay xmits for 94 * Another cpu is holding lock, requeue & delay xmits for
95 * some time. 95 * some time.
96 */ 96 */
97 __get_cpu_var(netdev_rx_stat).cpu_collision++; 97 __get_cpu_var(softnet_data).cpu_collision++;
98 ret = dev_requeue_skb(skb, q); 98 ret = dev_requeue_skb(skb, q);
99 } 99 }
100 100