aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorNeil Horman <nhorman@tuxdriver.com>2012-05-01 04:18:02 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-02 21:02:48 -0400
commit4fdcfa12843bca38d0c9deff70c8720e4e8f515f (patch)
tree3ef2a92b1d6d322f9b72185e58b9d35a15564692 /net
parenta4723848d05dd31d298c551fb77ad28481309999 (diff)
drop_monitor: prevent init path from scheduling on the wrong cpu
I just noticed after some recent updates, that the init path for the drop monitor protocol has a minor error. drop monitor maintains a per cpu structure, that gets initalized from a single cpu. Normally this is fine, as the protocol isn't in use yet, but I recently made a change that causes a failed skb allocation to reschedule itself . Given the current code, the implication is that this workqueue reschedule will take place on the wrong cpu. If drop monitor is used early during the boot process, its possible that two cpus will access a single per-cpu structure in parallel, possibly leading to data corruption. This patch fixes the situation, by storing the cpu number that a given instance of this per-cpu data should be accessed from. In the case of a need for a reschedule, the cpu stored in the struct is assigned the rescheule, rather than the currently executing cpu Tested successfully by myself. Signed-off-by: Neil Horman <nhorman@tuxdriver.com> CC: David Miller <davem@davemloft.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/drop_monitor.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7592943513e3..a7cad741df01 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -49,6 +49,7 @@ struct per_cpu_dm_data {
49 struct sk_buff __rcu *skb; 49 struct sk_buff __rcu *skb;
50 atomic_t dm_hit_count; 50 atomic_t dm_hit_count;
51 struct timer_list send_timer; 51 struct timer_list send_timer;
52 int cpu;
52}; 53};
53 54
54struct dm_hw_stat_delta { 55struct dm_hw_stat_delta {
@@ -73,7 +74,6 @@ static int dm_hit_limit = 64;
73static int dm_delay = 1; 74static int dm_delay = 1;
74static unsigned long dm_hw_check_delta = 2*HZ; 75static unsigned long dm_hw_check_delta = 2*HZ;
75static LIST_HEAD(hw_stats_list); 76static LIST_HEAD(hw_stats_list);
76static int initialized = 0;
77 77
78static void reset_per_cpu_data(struct per_cpu_dm_data *data) 78static void reset_per_cpu_data(struct per_cpu_dm_data *data)
79{ 79{
@@ -96,8 +96,8 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
96 sizeof(struct net_dm_alert_msg)); 96 sizeof(struct net_dm_alert_msg));
97 msg = nla_data(nla); 97 msg = nla_data(nla);
98 memset(msg, 0, al); 98 memset(msg, 0, al);
99 } else if (initialized) 99 } else
100 schedule_work_on(smp_processor_id(), &data->dm_alert_work); 100 schedule_work_on(data->cpu, &data->dm_alert_work);
101 101
102 /* 102 /*
103 * Don't need to lock this, since we are guaranteed to only 103 * Don't need to lock this, since we are guaranteed to only
@@ -121,6 +121,8 @@ static void send_dm_alert(struct work_struct *unused)
121 struct sk_buff *skb; 121 struct sk_buff *skb;
122 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 122 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
123 123
124 WARN_ON_ONCE(data->cpu != smp_processor_id());
125
124 /* 126 /*
125 * Grab the skb we're about to send 127 * Grab the skb we're about to send
126 */ 128 */
@@ -404,14 +406,14 @@ static int __init init_net_drop_monitor(void)
404 406
405 for_each_present_cpu(cpu) { 407 for_each_present_cpu(cpu) {
406 data = &per_cpu(dm_cpu_data, cpu); 408 data = &per_cpu(dm_cpu_data, cpu);
407 reset_per_cpu_data(data); 409 data->cpu = cpu;
408 INIT_WORK(&data->dm_alert_work, send_dm_alert); 410 INIT_WORK(&data->dm_alert_work, send_dm_alert);
409 init_timer(&data->send_timer); 411 init_timer(&data->send_timer);
410 data->send_timer.data = cpu; 412 data->send_timer.data = cpu;
411 data->send_timer.function = sched_send_work; 413 data->send_timer.function = sched_send_work;
414 reset_per_cpu_data(data);
412 } 415 }
413 416
414 initialized = 1;
415 417
416 goto out; 418 goto out;
417 419