diff options
author | Robin Holt <holt@sgi.com> | 2005-04-25 16:13:16 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-04-25 16:13:16 -0400 |
commit | fde740e4dd4a05ca8957490d468fa9b2770f5bd6 (patch) | |
tree | 04bc0221bc6c59379a17f3631fc4bd3c886e1d61 /arch/ia64 | |
parent | ff3eb55ed97db3f12964beeffe3d34602d295367 (diff) |
[IA64] Percpu quicklist for combined allocator for pgd/pmd/pte.
This patch introduces using the quicklists for pgd, pmd, and pte levels
by combining the alloc and free functions into a common set of routines.
This greatly simplifies the reading of this header file.
This patch is simple but necessary for large numa configurations.
It simply ensures that only pages from the local node are added to a
cpus quicklist. This prevents the trapping of pages on a remote nodes
quicklist by starting a process, touching a large number of pages to
fill pmd and pte entries, migrating to another node, and then unmapping
or exiting. With those conditions, the pages get trapped and if the
machine has more than 100 nodes of the same size, the calculation of
the pgtable high water mark will be larger than any single node so page
table cache flushing will never occur.
I ran lmbench lat_proc fork and lat_proc exec on a zx1 with and without
this patch and did not notice any change.
On an sn2 machine, there was a slight improvement which is possibly
due to pages from other nodes trapped on the test node before starting
the run. I did not investigate further.
This patch shrinks the quicklist based upon free memory on the node
instead of the high/low water marks. I have written it to enable
preemption periodically and recalculate the amount to shrink every time
we have freed enough pages that the quicklist size should have grown.
I rescan the nodes zones each pass because other processess may be
draining node memory at the same time as we are adding.
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/mm/contig.c | 3 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 3 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 74 |
3 files changed, 51 insertions, 29 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 6daf15ac8940..91a055f5731f 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -61,7 +61,8 @@ show_mem (void) | |||
61 | printk("%d reserved pages\n", reserved); | 61 | printk("%d reserved pages\n", reserved); |
62 | printk("%d pages shared\n", shared); | 62 | printk("%d pages shared\n", shared); |
63 | printk("%d pages swap cached\n", cached); | 63 | printk("%d pages swap cached\n", cached); |
64 | printk("%ld pages in page table cache\n", pgtable_cache_size); | 64 | printk("%ld pages in page table cache\n", |
65 | pgtable_quicklist_total_size()); | ||
65 | } | 66 | } |
66 | 67 | ||
67 | /* physical address where the bootmem map is located */ | 68 | /* physical address where the bootmem map is located */ |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 3456a9b6971e..c00710929390 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -582,7 +582,8 @@ void show_mem(void) | |||
582 | printk("%d reserved pages\n", total_reserved); | 582 | printk("%d reserved pages\n", total_reserved); |
583 | printk("%d pages shared\n", total_shared); | 583 | printk("%d pages shared\n", total_shared); |
584 | printk("%d pages swap cached\n", total_cached); | 584 | printk("%d pages swap cached\n", total_cached); |
585 | printk("Total of %ld pages in page table cache\n", pgtable_cache_size); | 585 | printk("Total of %ld pages in page table cache\n", |
586 | pgtable_quicklist_total_size()); | ||
586 | printk("%d free buffer pages\n", nr_free_buffer_pages()); | 587 | printk("%d free buffer pages\n", nr_free_buffer_pages()); |
587 | } | 588 | } |
588 | 589 | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 65cf839573ea..4892be53e227 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -39,6 +39,9 @@ | |||
39 | 39 | ||
40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
41 | 41 | ||
42 | DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist); | ||
43 | DEFINE_PER_CPU(long, __pgtable_quicklist_size); | ||
44 | |||
42 | extern void ia64_tlb_init (void); | 45 | extern void ia64_tlb_init (void); |
43 | 46 | ||
44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | 47 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; |
@@ -50,27 +53,53 @@ struct page *vmem_map; | |||
50 | EXPORT_SYMBOL(vmem_map); | 53 | EXPORT_SYMBOL(vmem_map); |
51 | #endif | 54 | #endif |
52 | 55 | ||
53 | static int pgt_cache_water[2] = { 25, 50 }; | 56 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ |
54 | |||
55 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ | ||
56 | EXPORT_SYMBOL(zero_page_memmap_ptr); | 57 | EXPORT_SYMBOL(zero_page_memmap_ptr); |
57 | 58 | ||
59 | #define MIN_PGT_PAGES 25UL | ||
60 | #define MAX_PGT_FREES_PER_PASS 16 | ||
61 | #define PGT_FRACTION_OF_NODE_MEM 16 | ||
62 | |||
63 | static inline long | ||
64 | max_pgt_pages(void) | ||
65 | { | ||
66 | u64 node_free_pages, max_pgt_pages; | ||
67 | |||
68 | #ifndef CONFIG_NUMA | ||
69 | node_free_pages = nr_free_pages(); | ||
70 | #else | ||
71 | node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); | ||
72 | #endif | ||
73 | max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; | ||
74 | max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); | ||
75 | return max_pgt_pages; | ||
76 | } | ||
77 | |||
78 | static inline long | ||
79 | min_pages_to_free(void) | ||
80 | { | ||
81 | long pages_to_free; | ||
82 | |||
83 | pages_to_free = pgtable_quicklist_size - max_pgt_pages(); | ||
84 | pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS); | ||
85 | return pages_to_free; | ||
86 | } | ||
87 | |||
58 | void | 88 | void |
59 | check_pgt_cache (void) | 89 | check_pgt_cache(void) |
60 | { | 90 | { |
61 | int low, high; | 91 | long pages_to_free; |
62 | 92 | ||
63 | low = pgt_cache_water[0]; | 93 | if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES)) |
64 | high = pgt_cache_water[1]; | 94 | return; |
65 | 95 | ||
66 | preempt_disable(); | 96 | preempt_disable(); |
67 | if (pgtable_cache_size > (u64) high) { | 97 | while (unlikely((pages_to_free = min_pages_to_free()) > 0)) { |
68 | do { | 98 | while (pages_to_free--) { |
69 | if (pgd_quicklist) | 99 | free_page((unsigned long)pgtable_quicklist_alloc()); |
70 | free_page((unsigned long)pgd_alloc_one_fast(NULL)); | 100 | } |
71 | if (pmd_quicklist) | 101 | preempt_enable(); |
72 | free_page((unsigned long)pmd_alloc_one_fast(NULL, 0)); | 102 | preempt_disable(); |
73 | } while (pgtable_cache_size > (u64) low); | ||
74 | } | 103 | } |
75 | preempt_enable(); | 104 | preempt_enable(); |
76 | } | 105 | } |
@@ -523,11 +552,14 @@ void | |||
523 | mem_init (void) | 552 | mem_init (void) |
524 | { | 553 | { |
525 | long reserved_pages, codesize, datasize, initsize; | 554 | long reserved_pages, codesize, datasize, initsize; |
526 | unsigned long num_pgt_pages; | ||
527 | pg_data_t *pgdat; | 555 | pg_data_t *pgdat; |
528 | int i; | 556 | int i; |
529 | static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; | 557 | static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; |
530 | 558 | ||
559 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); | ||
560 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); | ||
561 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); | ||
562 | |||
531 | #ifdef CONFIG_PCI | 563 | #ifdef CONFIG_PCI |
532 | /* | 564 | /* |
533 | * This needs to be called _after_ the command line has been parsed but _before_ | 565 | * This needs to be called _after_ the command line has been parsed but _before_ |
@@ -564,18 +596,6 @@ mem_init (void) | |||
564 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, | 596 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, |
565 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); | 597 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); |
566 | 598 | ||
567 | /* | ||
568 | * Allow for enough (cached) page table pages so that we can map the entire memory | ||
569 | * at least once. Each task also needs a couple of page tables pages, so add in a | ||
570 | * fudge factor for that (don't use "threads-max" here; that would be wrong!). | ||
571 | * Don't allow the cache to be more than 10% of total memory, though. | ||
572 | */ | ||
573 | # define NUM_TASKS 500 /* typical number of tasks */ | ||
574 | num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS; | ||
575 | if (num_pgt_pages > nr_free_pages() / 10) | ||
576 | num_pgt_pages = nr_free_pages() / 10; | ||
577 | if (num_pgt_pages > (u64) pgt_cache_water[1]) | ||
578 | pgt_cache_water[1] = num_pgt_pages; | ||
579 | 599 | ||
580 | /* | 600 | /* |
581 | * For fsyscall entrpoints with no light-weight handler, use the ordinary | 601 | * For fsyscall entrpoints with no light-weight handler, use the ordinary |