aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/drop_monitor.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2012-05-15 07:39:25 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-05-15 07:39:25 -0400
commit69ecdbac144147a80747914d9b6ea3472e2d93e7 (patch)
treee1bb68c3568f370cd6a7d2ae29a051bee244ce0f /net/core/drop_monitor.c
parentb7dc4cd17506284a47eeb9160207e43c7d5486fe (diff)
parentb6255ee3d82798eb1eee9fb1cca713317b5afae8 (diff)
Merge remote-tracking branch 'linus/master' into staging/for_v3.5
* linus/master: (805 commits) tty: Fix LED error return openvswitch: checking wrong variable in queue_userspace_packet() bonding: Fix LACPDU rx_dropped commit. Linux 3.4-rc7 ARM: EXYNOS: fix ctrlbit for exynos5_clk_pdma1 ARM: EXYNOS: use s5p-timer for UniversalC210 board ARM / mach-shmobile: Invalidate caches when booting secondary cores ARM / mach-shmobile: sh73a0 SMP TWD boot regression fix ARM / mach-shmobile: r8a7779 SMP TWD boot regression fix ARM: mach-shmobile: convert ag5evm to use the generic MMC GPIO hotplug helper ARM: mach-shmobile: convert mackerel to use the generic MMC GPIO hotplug helper MAINTAINERS: Add myself as the cpufreq maintainer dm mpath: check if scsi_dh module already loaded before trying to load dm thin: correct module description dm thin: fix unprotected use of prepared_discards list dm thin: reinstate missing mempool_free in cell_release_singleton gpio/exynos: Fix compiler warnings when non-exynos machines are selected gpio: pch9: Use proper flow type handlers powerpc/irq: Fix another case of lazy IRQ state getting out of sync ks8851: Update link status during link change interrupt ... Conflicts: drivers/media/common/tuners/xc5000.c drivers/media/common/tuners/xc5000.h drivers/usb/gadget/uvc_queue.c
Diffstat (limited to 'net/core/drop_monitor.c')
-rw-r--r--net/core/drop_monitor.c89
1 files changed, 65 insertions, 24 deletions
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38e060f..a7cad741df01 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused);
42 * netlink alerts 42 * netlink alerts
43 */ 43 */
44static int trace_state = TRACE_OFF; 44static int trace_state = TRACE_OFF;
45static DEFINE_SPINLOCK(trace_state_lock); 45static DEFINE_MUTEX(trace_state_mutex);
46 46
47struct per_cpu_dm_data { 47struct per_cpu_dm_data {
48 struct work_struct dm_alert_work; 48 struct work_struct dm_alert_work;
49 struct sk_buff *skb; 49 struct sk_buff __rcu *skb;
50 atomic_t dm_hit_count; 50 atomic_t dm_hit_count;
51 struct timer_list send_timer; 51 struct timer_list send_timer;
52 int cpu;
52}; 53};
53 54
54struct dm_hw_stat_delta { 55struct dm_hw_stat_delta {
@@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
79 size_t al; 80 size_t al;
80 struct net_dm_alert_msg *msg; 81 struct net_dm_alert_msg *msg;
81 struct nlattr *nla; 82 struct nlattr *nla;
83 struct sk_buff *skb;
84 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
82 85
83 al = sizeof(struct net_dm_alert_msg); 86 al = sizeof(struct net_dm_alert_msg);
84 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 87 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
85 al += sizeof(struct nlattr); 88 al += sizeof(struct nlattr);
86 89
87 data->skb = genlmsg_new(al, GFP_KERNEL); 90 skb = genlmsg_new(al, GFP_KERNEL);
88 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 91
89 0, NET_DM_CMD_ALERT); 92 if (skb) {
90 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); 93 genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
91 msg = nla_data(nla); 94 0, NET_DM_CMD_ALERT);
92 memset(msg, 0, al); 95 nla = nla_reserve(skb, NLA_UNSPEC,
93 atomic_set(&data->dm_hit_count, dm_hit_limit); 96 sizeof(struct net_dm_alert_msg));
97 msg = nla_data(nla);
98 memset(msg, 0, al);
99 } else
100 schedule_work_on(data->cpu, &data->dm_alert_work);
101
102 /*
103 * Don't need to lock this, since we are guaranteed to only
104 * run this on a single cpu at a time.
105 * Note also that we only update data->skb if the old and new skb
106 * pointers don't match. This ensures that we don't continually call
107 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
108 */
109 if (skb != oskb) {
110 rcu_assign_pointer(data->skb, skb);
111
112 synchronize_rcu();
113
114 atomic_set(&data->dm_hit_count, dm_hit_limit);
115 }
116
94} 117}
95 118
96static void send_dm_alert(struct work_struct *unused) 119static void send_dm_alert(struct work_struct *unused)
97{ 120{
98 struct sk_buff *skb; 121 struct sk_buff *skb;
99 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 122 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
123
124 WARN_ON_ONCE(data->cpu != smp_processor_id());
100 125
101 /* 126 /*
102 * Grab the skb we're about to send 127 * Grab the skb we're about to send
103 */ 128 */
104 skb = data->skb; 129 skb = rcu_dereference_protected(data->skb, 1);
105 130
106 /* 131 /*
107 * Replace it with a new one 132 * Replace it with a new one
@@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused)
111 /* 136 /*
112 * Ship it! 137 * Ship it!
113 */ 138 */
114 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 139 if (skb)
140 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
115 141
142 put_cpu_var(dm_cpu_data);
116} 143}
117 144
118/* 145/*
@@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused)
123 */ 150 */
124static void sched_send_work(unsigned long unused) 151static void sched_send_work(unsigned long unused)
125{ 152{
126 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 153 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
154
155 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
127 156
128 schedule_work(&data->dm_alert_work); 157 put_cpu_var(dm_cpu_data);
129} 158}
130 159
131static void trace_drop_common(struct sk_buff *skb, void *location) 160static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
134 struct nlmsghdr *nlh; 163 struct nlmsghdr *nlh;
135 struct nlattr *nla; 164 struct nlattr *nla;
136 int i; 165 int i;
137 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 166 struct sk_buff *dskb;
167 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
168
169
170 rcu_read_lock();
171 dskb = rcu_dereference(data->skb);
138 172
173 if (!dskb)
174 goto out;
139 175
140 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { 176 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
141 /* 177 /*
@@ -144,12 +180,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
144 goto out; 180 goto out;
145 } 181 }
146 182
147 nlh = (struct nlmsghdr *)data->skb->data; 183 nlh = (struct nlmsghdr *)dskb->data;
148 nla = genlmsg_data(nlmsg_data(nlh)); 184 nla = genlmsg_data(nlmsg_data(nlh));
149 msg = nla_data(nla); 185 msg = nla_data(nla);
150 for (i = 0; i < msg->entries; i++) { 186 for (i = 0; i < msg->entries; i++) {
151 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 187 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
152 msg->points[i].count++; 188 msg->points[i].count++;
189 atomic_inc(&data->dm_hit_count);
153 goto out; 190 goto out;
154 } 191 }
155 } 192 }
@@ -157,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
157 /* 194 /*
158 * We need to create a new entry 195 * We need to create a new entry
159 */ 196 */
160 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 197 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
161 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 198 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
162 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 199 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
163 msg->points[msg->entries].count = 1; 200 msg->points[msg->entries].count = 1;
@@ -169,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
169 } 206 }
170 207
171out: 208out:
209 rcu_read_unlock();
210 put_cpu_var(dm_cpu_data);
172 return; 211 return;
173} 212}
174 213
@@ -213,7 +252,7 @@ static int set_all_monitor_traces(int state)
213 struct dm_hw_stat_delta *new_stat = NULL; 252 struct dm_hw_stat_delta *new_stat = NULL;
214 struct dm_hw_stat_delta *temp; 253 struct dm_hw_stat_delta *temp;
215 254
216 spin_lock(&trace_state_lock); 255 mutex_lock(&trace_state_mutex);
217 256
218 if (state == trace_state) { 257 if (state == trace_state) {
219 rc = -EAGAIN; 258 rc = -EAGAIN;
@@ -252,7 +291,7 @@ static int set_all_monitor_traces(int state)
252 rc = -EINPROGRESS; 291 rc = -EINPROGRESS;
253 292
254out_unlock: 293out_unlock:
255 spin_unlock(&trace_state_lock); 294 mutex_unlock(&trace_state_mutex);
256 295
257 return rc; 296 return rc;
258} 297}
@@ -295,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
295 334
296 new_stat->dev = dev; 335 new_stat->dev = dev;
297 new_stat->last_rx = jiffies; 336 new_stat->last_rx = jiffies;
298 spin_lock(&trace_state_lock); 337 mutex_lock(&trace_state_mutex);
299 list_add_rcu(&new_stat->list, &hw_stats_list); 338 list_add_rcu(&new_stat->list, &hw_stats_list);
300 spin_unlock(&trace_state_lock); 339 mutex_unlock(&trace_state_mutex);
301 break; 340 break;
302 case NETDEV_UNREGISTER: 341 case NETDEV_UNREGISTER:
303 spin_lock(&trace_state_lock); 342 mutex_lock(&trace_state_mutex);
304 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { 343 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
305 if (new_stat->dev == dev) { 344 if (new_stat->dev == dev) {
306 new_stat->dev = NULL; 345 new_stat->dev = NULL;
@@ -311,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
311 } 350 }
312 } 351 }
313 } 352 }
314 spin_unlock(&trace_state_lock); 353 mutex_unlock(&trace_state_mutex);
315 break; 354 break;
316 } 355 }
317out: 356out:
@@ -367,13 +406,15 @@ static int __init init_net_drop_monitor(void)
367 406
368 for_each_present_cpu(cpu) { 407 for_each_present_cpu(cpu) {
369 data = &per_cpu(dm_cpu_data, cpu); 408 data = &per_cpu(dm_cpu_data, cpu);
370 reset_per_cpu_data(data); 409 data->cpu = cpu;
371 INIT_WORK(&data->dm_alert_work, send_dm_alert); 410 INIT_WORK(&data->dm_alert_work, send_dm_alert);
372 init_timer(&data->send_timer); 411 init_timer(&data->send_timer);
373 data->send_timer.data = cpu; 412 data->send_timer.data = cpu;
374 data->send_timer.function = sched_send_work; 413 data->send_timer.function = sched_send_work;
414 reset_per_cpu_data(data);
375 } 415 }
376 416
417
377 goto out; 418 goto out;
378 419
379out_unreg: 420out_unreg: