aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-03-19 07:35:58 -0400
committerDavid S. Miller <davem@davemloft.net>2013-03-20 13:28:39 -0400
commit8fdc929f5727d999d11ba3763b92f6eeacc096f9 (patch)
tree8046d7dbd13f091d31245806a2802a2f4808f4cf
parent7fa6f34081f168975af72be51715bdc6601931f7 (diff)
dynticks: avoid flow_cache_flush() interrupting every core
Previously, if you did an "ifconfig down" or similar on one core, and the kernel had CONFIG_XFRM enabled, every core would be interrupted to check its percpu flow list for items that could be garbage collected. With this change, we generate a mask of cores that actually have any percpu items, and only interrupt those cores. When we are trying to isolate a set of cpus from interrupts, this is important to do. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/core/flow.c42
1 files changed, 39 insertions, 3 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index c56ea6f7f6c7..7fae13537b6b 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -323,6 +323,24 @@ static void flow_cache_flush_tasklet(unsigned long data)
323 complete(&info->completion); 323 complete(&info->completion);
324} 324}
325 325
326/*
327 * Return whether a cpu needs flushing. Conservatively, we assume
328 * the presence of any entries means the core may require flushing,
329 * since the flow_cache_ops.check() function may assume it's running
330 * on the same core as the per-cpu cache component.
331 */
332static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
333{
334 struct flow_cache_percpu *fcp;
335 int i;
336
337 fcp = &per_cpu(*fc->percpu, cpu);
338 for (i = 0; i < flow_cache_hash_size(fc); i++)
339 if (!hlist_empty(&fcp->hash_table[i]))
340 return 0;
341 return 1;
342}
343
326static void flow_cache_flush_per_cpu(void *data) 344static void flow_cache_flush_per_cpu(void *data)
327{ 345{
328 struct flow_flush_info *info = data; 346 struct flow_flush_info *info = data;
@@ -337,22 +355,40 @@ void flow_cache_flush(void)
337{ 355{
338 struct flow_flush_info info; 356 struct flow_flush_info info;
339 static DEFINE_MUTEX(flow_flush_sem); 357 static DEFINE_MUTEX(flow_flush_sem);
358 cpumask_var_t mask;
359 int i, self;
360
361 /* Track which cpus need flushing to avoid disturbing all cores. */
362 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
363 return;
364 cpumask_clear(mask);
340 365
341 /* Don't want cpus going down or up during this. */ 366 /* Don't want cpus going down or up during this. */
342 get_online_cpus(); 367 get_online_cpus();
343 mutex_lock(&flow_flush_sem); 368 mutex_lock(&flow_flush_sem);
344 info.cache = &flow_cache_global; 369 info.cache = &flow_cache_global;
345 atomic_set(&info.cpuleft, num_online_cpus()); 370 for_each_online_cpu(i)
371 if (!flow_cache_percpu_empty(info.cache, i))
372 cpumask_set_cpu(i, mask);
373 atomic_set(&info.cpuleft, cpumask_weight(mask));
374 if (atomic_read(&info.cpuleft) == 0)
375 goto done;
376
346 init_completion(&info.completion); 377 init_completion(&info.completion);
347 378
348 local_bh_disable(); 379 local_bh_disable();
349 smp_call_function(flow_cache_flush_per_cpu, &info, 0); 380 self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
350 flow_cache_flush_tasklet((unsigned long)&info); 381 on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
382 if (self)
383 flow_cache_flush_tasklet((unsigned long)&info);
351 local_bh_enable(); 384 local_bh_enable();
352 385
353 wait_for_completion(&info.completion); 386 wait_for_completion(&info.completion);
387
388done:
354 mutex_unlock(&flow_flush_sem); 389 mutex_unlock(&flow_flush_sem);
355 put_online_cpus(); 390 put_online_cpus();
391 free_cpumask_var(mask);
356} 392}
357 393
358static void flow_cache_flush_task(struct work_struct *work) 394static void flow_cache_flush_task(struct work_struct *work)