aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-11 01:42:53 -0400
committerTony Luck <tony.luck@intel.com>2007-05-11 12:40:00 -0400
commit2bd62a40f63bd628c43a2f3637b252d0967659b0 (patch)
tree73b5dbda741421812f487ff0a9b753109fb105b4
parentcdc7dbdfe6edac177acb32e4ca56b525d0743fe7 (diff)
[IA64] Quicklist support for IA64
IA64 is the origin of the quicklist implementation. So cut out the pieces that are now in core code and modify the functions called. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/init.c51
-rw-r--r--include/asm-ia64/pgalloc.h82
5 files changed, 22 insertions, 119 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6e41471449c0..de1bff659969 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -31,6 +31,10 @@ config ZONE_DMA
31 def_bool y 31 def_bool y
32 depends on !IA64_SGI_SN2 32 depends on !IA64_SGI_SN2
33 33
34config QUICKLIST
35 bool
36 default y
37
34config MMU 38config MMU
35 bool 39 bool
36 default y 40 default y
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 44ce5ed9444c..7ac8592a35b6 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -88,7 +88,7 @@ void show_mem(void)
88 printk(KERN_INFO "%d pages shared\n", total_shared); 88 printk(KERN_INFO "%d pages shared\n", total_shared);
89 printk(KERN_INFO "%d pages swap cached\n", total_cached); 89 printk(KERN_INFO "%d pages swap cached\n", total_cached);
90 printk(KERN_INFO "Total of %ld pages in page table cache\n", 90 printk(KERN_INFO "Total of %ld pages in page table cache\n",
91 pgtable_quicklist_total_size()); 91 quicklist_total_size());
92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); 92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
93} 93}
94 94
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 94844442812a..38085ac18338 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -561,7 +561,7 @@ void show_mem(void)
561 printk(KERN_INFO "%d pages shared\n", total_shared); 561 printk(KERN_INFO "%d pages shared\n", total_shared);
562 printk(KERN_INFO "%d pages swap cached\n", total_cached); 562 printk(KERN_INFO "%d pages swap cached\n", total_cached);
563 printk(KERN_INFO "Total of %ld pages in page table cache\n", 563 printk(KERN_INFO "Total of %ld pages in page table cache\n",
564 pgtable_quicklist_total_size()); 564 quicklist_total_size());
565 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); 565 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
566} 566}
567 567
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index cffb1e8325e8..c14abefabafa 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -39,9 +39,6 @@
39 39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41 41
42DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
43DEFINE_PER_CPU(long, __pgtable_quicklist_size);
44
45extern void ia64_tlb_init (void); 42extern void ia64_tlb_init (void);
46 43
47unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
@@ -56,54 +53,6 @@ EXPORT_SYMBOL(vmem_map);
56struct page *zero_page_memmap_ptr; /* map entry for zero page */ 53struct page *zero_page_memmap_ptr; /* map entry for zero page */
57EXPORT_SYMBOL(zero_page_memmap_ptr); 54EXPORT_SYMBOL(zero_page_memmap_ptr);
58 55
59#define MIN_PGT_PAGES 25UL
60#define MAX_PGT_FREES_PER_PASS 16L
61#define PGT_FRACTION_OF_NODE_MEM 16
62
63static inline long
64max_pgt_pages(void)
65{
66 u64 node_free_pages, max_pgt_pages;
67
68#ifndef CONFIG_NUMA
69 node_free_pages = nr_free_pages();
70#else
71 node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
72#endif
73 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
74 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
75 return max_pgt_pages;
76}
77
78static inline long
79min_pages_to_free(void)
80{
81 long pages_to_free;
82
83 pages_to_free = pgtable_quicklist_size - max_pgt_pages();
84 pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
85 return pages_to_free;
86}
87
88void
89check_pgt_cache(void)
90{
91 long pages_to_free;
92
93 if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
94 return;
95
96 preempt_disable();
97 while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
98 while (pages_to_free--) {
99 free_page((unsigned long)pgtable_quicklist_alloc());
100 }
101 preempt_enable();
102 preempt_disable();
103 }
104 preempt_enable();
105}
106
107void 56void
108lazy_mmu_prot_update (pte_t pte) 57lazy_mmu_prot_update (pte_t pte)
109{ 58{
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 560c287b1233..67552cad5173 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -18,71 +18,18 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/page-flags.h> 19#include <linux/page-flags.h>
20#include <linux/threads.h> 20#include <linux/threads.h>
21#include <linux/quicklist.h>
21 22
22#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
23 24
24DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
25#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
26DECLARE_PER_CPU(long, __pgtable_quicklist_size);
27#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
28
29static inline long pgtable_quicklist_total_size(void)
30{
31 long ql_size = 0;
32 int cpuid;
33
34 for_each_online_cpu(cpuid) {
35 ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
36 }
37 return ql_size;
38}
39
40static inline void *pgtable_quicklist_alloc(void)
41{
42 unsigned long *ret = NULL;
43
44 preempt_disable();
45
46 ret = pgtable_quicklist;
47 if (likely(ret != NULL)) {
48 pgtable_quicklist = (unsigned long *)(*ret);
49 ret[0] = 0;
50 --pgtable_quicklist_size;
51 preempt_enable();
52 } else {
53 preempt_enable();
54 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
55 }
56
57 return ret;
58}
59
60static inline void pgtable_quicklist_free(void *pgtable_entry)
61{
62#ifdef CONFIG_NUMA
63 int nid = page_to_nid(virt_to_page(pgtable_entry));
64
65 if (unlikely(nid != numa_node_id())) {
66 free_page((unsigned long)pgtable_entry);
67 return;
68 }
69#endif
70
71 preempt_disable();
72 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
73 pgtable_quicklist = (unsigned long *)pgtable_entry;
74 ++pgtable_quicklist_size;
75 preempt_enable();
76}
77
78static inline pgd_t *pgd_alloc(struct mm_struct *mm) 25static inline pgd_t *pgd_alloc(struct mm_struct *mm)
79{ 26{
80 return pgtable_quicklist_alloc(); 27 return quicklist_alloc(0, GFP_KERNEL, NULL);
81} 28}
82 29
83static inline void pgd_free(pgd_t * pgd) 30static inline void pgd_free(pgd_t * pgd)
84{ 31{
85 pgtable_quicklist_free(pgd); 32 quicklist_free(0, NULL, pgd);
86} 33}
87 34
88#ifdef CONFIG_PGTABLE_4 35#ifdef CONFIG_PGTABLE_4
@@ -94,12 +41,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
94 41
95static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 42static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
96{ 43{
97 return pgtable_quicklist_alloc(); 44 return quicklist_alloc(0, GFP_KERNEL, NULL);
98} 45}
99 46
100static inline void pud_free(pud_t * pud) 47static inline void pud_free(pud_t * pud)
101{ 48{
102 pgtable_quicklist_free(pud); 49 quicklist_free(0, NULL, pud);
103} 50}
104#define __pud_free_tlb(tlb, pud) pud_free(pud) 51#define __pud_free_tlb(tlb, pud) pud_free(pud)
105#endif /* CONFIG_PGTABLE_4 */ 52#endif /* CONFIG_PGTABLE_4 */
@@ -112,12 +59,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
112 59
113static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 60static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
114{ 61{
115 return pgtable_quicklist_alloc(); 62 return quicklist_alloc(0, GFP_KERNEL, NULL);
116} 63}
117 64
118static inline void pmd_free(pmd_t * pmd) 65static inline void pmd_free(pmd_t * pmd)
119{ 66{
120 pgtable_quicklist_free(pmd); 67 quicklist_free(0, NULL, pmd);
121} 68}
122 69
123#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) 70#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
@@ -137,28 +84,31 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
137static inline struct page *pte_alloc_one(struct mm_struct *mm, 84static inline struct page *pte_alloc_one(struct mm_struct *mm,
138 unsigned long addr) 85 unsigned long addr)
139{ 86{
140 void *pg = pgtable_quicklist_alloc(); 87 void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
141 return pg ? virt_to_page(pg) : NULL; 88 return pg ? virt_to_page(pg) : NULL;
142} 89}
143 90
144static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
145 unsigned long addr) 92 unsigned long addr)
146{ 93{
147 return pgtable_quicklist_alloc(); 94 return quicklist_alloc(0, GFP_KERNEL, NULL);
148} 95}
149 96
150static inline void pte_free(struct page *pte) 97static inline void pte_free(struct page *pte)
151{ 98{
152 pgtable_quicklist_free(page_address(pte)); 99 quicklist_free_page(0, NULL, pte);
153} 100}
154 101
155static inline void pte_free_kernel(pte_t * pte) 102static inline void pte_free_kernel(pte_t * pte)
156{ 103{
157 pgtable_quicklist_free(pte); 104 quicklist_free(0, NULL, pte);
158} 105}
159 106
160#define __pte_free_tlb(tlb, pte) pte_free(pte) 107static inline void check_pgt_cache(void)
108{
109 quicklist_trim(0, NULL, 25, 16);
110}
161 111
162extern void check_pgt_cache(void); 112#define __pte_free_tlb(tlb, pte) pte_free(pte)
163 113
164#endif /* _ASM_IA64_PGALLOC_H */ 114#endif /* _ASM_IA64_PGALLOC_H */