aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c84
1 files changed, 0 insertions, 84 deletions
diff --git a/mm/slub.c b/mm/slub.c
index dbb206503a8d..bd2efae02bcd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2530,90 +2530,6 @@ static struct notifier_block __cpuinitdata slab_notifier =
2530 2530
2531#endif 2531#endif
2532 2532
2533#ifdef CONFIG_NUMA
2534
2535/*****************************************************************
2536 * Generic reaper used to support the page allocator
2537 * (the cpu slabs are reaped by a per slab workqueue).
2538 *
2539 * Maybe move this to the page allocator?
2540 ****************************************************************/
2541
2542static DEFINE_PER_CPU(unsigned long, reap_node);
2543
2544static void init_reap_node(int cpu)
2545{
2546 int node;
2547
2548 node = next_node(cpu_to_node(cpu), node_online_map);
2549 if (node == MAX_NUMNODES)
2550 node = first_node(node_online_map);
2551
2552 __get_cpu_var(reap_node) = node;
2553}
2554
2555static void next_reap_node(void)
2556{
2557 int node = __get_cpu_var(reap_node);
2558
2559 /*
2560 * Also drain per cpu pages on remote zones
2561 */
2562 if (node != numa_node_id())
2563 drain_node_pages(node);
2564
2565 node = next_node(node, node_online_map);
2566 if (unlikely(node >= MAX_NUMNODES))
2567 node = first_node(node_online_map);
2568 __get_cpu_var(reap_node) = node;
2569}
2570#else
2571#define init_reap_node(cpu) do { } while (0)
2572#define next_reap_node(void) do { } while (0)
2573#endif
2574
2575#define REAPTIMEOUT_CPUC (2*HZ)
2576
2577#ifdef CONFIG_SMP
2578static DEFINE_PER_CPU(struct delayed_work, reap_work);
2579
2580static void cache_reap(struct work_struct *unused)
2581{
2582 next_reap_node();
2583 schedule_delayed_work(&__get_cpu_var(reap_work),
2584 REAPTIMEOUT_CPUC);
2585}
2586
2587static void __devinit start_cpu_timer(int cpu)
2588{
2589 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
2590
2591 /*
2592 * When this gets called from do_initcalls via cpucache_init(),
2593 * init_workqueues() has already run, so keventd will be setup
2594 * at that time.
2595 */
2596 if (keventd_up() && reap_work->work.func == NULL) {
2597 init_reap_node(cpu);
2598 INIT_DELAYED_WORK(reap_work, cache_reap);
2599 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
2600 }
2601}
2602
2603static int __init cpucache_init(void)
2604{
2605 int cpu;
2606
2607 /*
2608 * Register the timers that drain pcp pages and update vm statistics
2609 */
2610 for_each_online_cpu(cpu)
2611 start_cpu_timer(cpu);
2612 return 0;
2613}
2614__initcall(cpucache_init);
2615#endif
2616
2617void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 2533void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2618{ 2534{
2619 struct kmem_cache *s = get_slab(size, gfpflags); 2535 struct kmem_cache *s = get_slab(size, gfpflags);