aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-09 05:35:14 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:56 -0400
commit4037d452202e34214e8a939fa5621b2b3bbb45b7 (patch)
tree31b59c0ca94fba4d53b6738b0bad3d1e9fde3063 /mm/slub.c
parent77461ab33229d48614402decfb1b2eaa6d446861 (diff)
Move remote node draining out of slab allocators
Currently the slab allocators contain callbacks into the page allocator to perform the draining of pagesets on remote nodes. This requires SLUB to have a whole subsystem in order to be compatible with SLAB. Moving node draining out of the slab allocators avoids a section of code in SLUB. Move the node draining so that is is done when the vm statistics are updated. At that point we are already touching all the cachelines with the pagesets of a processor. Add a expire counter there. If we have to update per zone or global vm statistics then assume that the pageset will require subsequent draining. The expire counter will be decremented on each vm stats update pass until it reaches zero. Then we will drain one batch from the pageset. The draining will cause vm counter updates which will then cause another expiration until the pcp is empty. So we will drain a batch every 3 seconds. Note that remote node draining is a somewhat esoteric feature that is required on large NUMA systems because otherwise significant portions of system memory can become trapped in pcp queues. The number of pcp is determined by the number of processors and nodes in a system. A system with 4 processors and 2 nodes has 8 pcps which is okay. But a system with 1024 processors and 512 nodes has 512k pcps with a high potential for large amount of memory being caught in them. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c84
1 files changed, 0 insertions, 84 deletions
diff --git a/mm/slub.c b/mm/slub.c
index dbb206503a8d..bd2efae02bcd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2530,90 +2530,6 @@ static struct notifier_block __cpuinitdata slab_notifier =
2530 2530
2531#endif 2531#endif
2532 2532
2533#ifdef CONFIG_NUMA
2534
2535/*****************************************************************
2536 * Generic reaper used to support the page allocator
2537 * (the cpu slabs are reaped by a per slab workqueue).
2538 *
2539 * Maybe move this to the page allocator?
2540 ****************************************************************/
2541
2542static DEFINE_PER_CPU(unsigned long, reap_node);
2543
2544static void init_reap_node(int cpu)
2545{
2546 int node;
2547
2548 node = next_node(cpu_to_node(cpu), node_online_map);
2549 if (node == MAX_NUMNODES)
2550 node = first_node(node_online_map);
2551
2552 __get_cpu_var(reap_node) = node;
2553}
2554
2555static void next_reap_node(void)
2556{
2557 int node = __get_cpu_var(reap_node);
2558
2559 /*
2560 * Also drain per cpu pages on remote zones
2561 */
2562 if (node != numa_node_id())
2563 drain_node_pages(node);
2564
2565 node = next_node(node, node_online_map);
2566 if (unlikely(node >= MAX_NUMNODES))
2567 node = first_node(node_online_map);
2568 __get_cpu_var(reap_node) = node;
2569}
2570#else
2571#define init_reap_node(cpu) do { } while (0)
2572#define next_reap_node(void) do { } while (0)
2573#endif
2574
2575#define REAPTIMEOUT_CPUC (2*HZ)
2576
2577#ifdef CONFIG_SMP
2578static DEFINE_PER_CPU(struct delayed_work, reap_work);
2579
2580static void cache_reap(struct work_struct *unused)
2581{
2582 next_reap_node();
2583 schedule_delayed_work(&__get_cpu_var(reap_work),
2584 REAPTIMEOUT_CPUC);
2585}
2586
2587static void __devinit start_cpu_timer(int cpu)
2588{
2589 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
2590
2591 /*
2592 * When this gets called from do_initcalls via cpucache_init(),
2593 * init_workqueues() has already run, so keventd will be setup
2594 * at that time.
2595 */
2596 if (keventd_up() && reap_work->work.func == NULL) {
2597 init_reap_node(cpu);
2598 INIT_DELAYED_WORK(reap_work, cache_reap);
2599 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
2600 }
2601}
2602
2603static int __init cpucache_init(void)
2604{
2605 int cpu;
2606
2607 /*
2608 * Register the timers that drain pcp pages and update vm statistics
2609 */
2610 for_each_online_cpu(cpu)
2611 start_cpu_timer(cpu);
2612 return 0;
2613}
2614__initcall(cpucache_init);
2615#endif
2616
2617void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 2533void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2618{ 2534{
2619 struct kmem_cache *s = get_slab(size, gfpflags); 2535 struct kmem_cache *s = get_slab(size, gfpflags);