diff options
author | Andy Whitcroft <apw@shadowen.org> | 2007-10-16 04:24:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:42:51 -0400 |
commit | 29c71111d0557385328211b130246a90f9223b46 (patch) | |
tree | 5588a49ee548d38e15bd7541cec29e069b9e457c | |
parent | 8f6aac419bd590f535fb110875a51f7db2b62b5b (diff) |
vmemmap: generify initialisation via helpers
Convert the common vmemmap population into initialisation helpers for use by
architecture vmemmap populators. All architecture implementing the
SPARSEMEM_VMEMMAP variant supply an architecture specific vmemmap_populate()
initialiser, which may make use of the helpers.
This allows us to clean up and remove the initialisation Kconfig entries.
With this patch there is a single SPARSEMEM_VMEMMAP_ENABLE Kconfig option to
indicate use of that variant.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 9 | ||||
-rw-r--r-- | mm/Kconfig | 13 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 159 |
3 files changed, 83 insertions, 98 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index d216abbd0574..fbff8e481cc4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1219,10 +1219,15 @@ extern int randomize_va_space; | |||
1219 | const char * arch_vma_name(struct vm_area_struct *vma); | 1219 | const char * arch_vma_name(struct vm_area_struct *vma); |
1220 | 1220 | ||
1221 | struct page *sparse_early_mem_map_populate(unsigned long pnum, int nid); | 1221 | struct page *sparse_early_mem_map_populate(unsigned long pnum, int nid); |
1222 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1222 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
1223 | int vmemmap_populate_pmd(pud_t *, unsigned long, unsigned long, int); | 1223 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); |
1224 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); | ||
1225 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); | ||
1224 | void *vmemmap_alloc_block(unsigned long size, int node); | 1226 | void *vmemmap_alloc_block(unsigned long size, int node); |
1225 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); | 1227 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); |
1228 | int vmemmap_populate_basepages(struct page *start_page, | ||
1229 | unsigned long pages, int node); | ||
1230 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | ||
1226 | 1231 | ||
1227 | #endif /* __KERNEL__ */ | 1232 | #endif /* __KERNEL__ */ |
1228 | #endif /* _LINUX_MM_H */ | 1233 | #endif /* _LINUX_MM_H */ |
diff --git a/mm/Kconfig b/mm/Kconfig index a7609cbcb00d..b06730668412 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -112,6 +112,19 @@ config SPARSEMEM_EXTREME | |||
112 | def_bool y | 112 | def_bool y |
113 | depends on SPARSEMEM && !SPARSEMEM_STATIC | 113 | depends on SPARSEMEM && !SPARSEMEM_STATIC |
114 | 114 | ||
115 | # | ||
116 | # SPARSEMEM_VMEMMAP uses a virtually mapped mem_map to optimise pfn_to_page | ||
117 | # and page_to_pfn. The most efficient option where kernel virtual space is | ||
118 | # not under pressure. | ||
119 | # | ||
120 | config SPARSEMEM_VMEMMAP_ENABLE | ||
121 | def_bool n | ||
122 | |||
123 | config SPARSEMEM_VMEMMAP | ||
124 | bool | ||
125 | depends on SPARSEMEM | ||
126 | default y if (SPARSEMEM_VMEMMAP_ENABLE) | ||
127 | |||
115 | # eventually, we can have this option just 'select SPARSEMEM' | 128 | # eventually, we can have this option just 'select SPARSEMEM' |
116 | config MEMORY_HOTPLUG | 129 | config MEMORY_HOTPLUG |
117 | bool "Allow for memory hot-add" | 130 | bool "Allow for memory hot-add" |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 7bb7a4b96d74..4f2d4854f840 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -14,21 +14,8 @@ | |||
14 | * case the overhead consists of a few additional pages that are | 14 | * case the overhead consists of a few additional pages that are |
15 | * allocated to create a view of memory for vmemmap. | 15 | * allocated to create a view of memory for vmemmap. |
16 | * | 16 | * |
17 | * Special Kconfig settings: | 17 | * The architecture is expected to provide a vmemmap_populate() function |
18 | * | 18 | * to instantiate the mapping. |
19 | * CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP | ||
20 | * | ||
21 | * The architecture has its own functions to populate the memory | ||
22 | * map and provides a vmemmap_populate function. | ||
23 | * | ||
24 | * CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD | ||
25 | * | ||
26 | * The architecture provides functions to populate the pmd level | ||
27 | * of the vmemmap mappings. Allowing mappings using large pages | ||
28 | * where available. | ||
29 | * | ||
30 | * If neither are set then PAGE_SIZE mappings are generated which | ||
31 | * require one PTE/TLB per PAGE_SIZE chunk of the virtual memory map. | ||
32 | */ | 19 | */ |
33 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
34 | #include <linux/mmzone.h> | 21 | #include <linux/mmzone.h> |
@@ -60,7 +47,6 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) | |||
60 | __pa(MAX_DMA_ADDRESS)); | 47 | __pa(MAX_DMA_ADDRESS)); |
61 | } | 48 | } |
62 | 49 | ||
63 | #ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP | ||
64 | void __meminit vmemmap_verify(pte_t *pte, int node, | 50 | void __meminit vmemmap_verify(pte_t *pte, int node, |
65 | unsigned long start, unsigned long end) | 51 | unsigned long start, unsigned long end) |
66 | { | 52 | { |
@@ -72,103 +58,84 @@ void __meminit vmemmap_verify(pte_t *pte, int node, | |||
72 | "page_structs\n", start, end - 1); | 58 | "page_structs\n", start, end - 1); |
73 | } | 59 | } |
74 | 60 | ||
75 | #ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD | 61 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
76 | static int __meminit vmemmap_populate_pte(pmd_t *pmd, unsigned long addr, | ||
77 | unsigned long end, int node) | ||
78 | { | 62 | { |
79 | pte_t *pte; | 63 | pte_t *pte = pte_offset_kernel(pmd, addr); |
80 | 64 | if (pte_none(*pte)) { | |
81 | for (pte = pte_offset_kernel(pmd, addr); addr < end; | 65 | pte_t entry; |
82 | pte++, addr += PAGE_SIZE) | 66 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
83 | if (pte_none(*pte)) { | 67 | if (!p) |
84 | pte_t entry; | 68 | return 0; |
85 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 69 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
86 | if (!p) | 70 | set_pte_at(&init_mm, addr, pte, entry); |
87 | return -ENOMEM; | 71 | } |
88 | 72 | return pte; | |
89 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); | ||
90 | set_pte(pte, entry); | ||
91 | |||
92 | } else | ||
93 | vmemmap_verify(pte, node, addr + PAGE_SIZE, end); | ||
94 | |||
95 | return 0; | ||
96 | } | 73 | } |
97 | 74 | ||
98 | int __meminit vmemmap_populate_pmd(pud_t *pud, unsigned long addr, | 75 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
99 | unsigned long end, int node) | ||
100 | { | 76 | { |
101 | pmd_t *pmd; | 77 | pmd_t *pmd = pmd_offset(pud, addr); |
102 | int error = 0; | 78 | if (pmd_none(*pmd)) { |
103 | unsigned long next; | 79 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
104 | 80 | if (!p) | |
105 | for (pmd = pmd_offset(pud, addr); addr < end && !error; | 81 | return 0; |
106 | pmd++, addr = next) { | 82 | pmd_populate_kernel(&init_mm, pmd, p); |
107 | if (pmd_none(*pmd)) { | ||
108 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | ||
109 | if (!p) | ||
110 | return -ENOMEM; | ||
111 | |||
112 | pmd_populate_kernel(&init_mm, pmd, p); | ||
113 | } else | ||
114 | vmemmap_verify((pte_t *)pmd, node, | ||
115 | pmd_addr_end(addr, end), end); | ||
116 | next = pmd_addr_end(addr, end); | ||
117 | error = vmemmap_populate_pte(pmd, addr, next, node); | ||
118 | } | 83 | } |
119 | return error; | 84 | return pmd; |
120 | } | 85 | } |
121 | #endif /* CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD */ | ||
122 | 86 | ||
123 | static int __meminit vmemmap_populate_pud(pgd_t *pgd, unsigned long addr, | 87 | pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) |
124 | unsigned long end, int node) | ||
125 | { | 88 | { |
126 | pud_t *pud; | 89 | pud_t *pud = pud_offset(pgd, addr); |
127 | int error = 0; | 90 | if (pud_none(*pud)) { |
128 | unsigned long next; | 91 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
129 | 92 | if (!p) | |
130 | for (pud = pud_offset(pgd, addr); addr < end && !error; | 93 | return 0; |
131 | pud++, addr = next) { | 94 | pud_populate(&init_mm, pud, p); |
132 | if (pud_none(*pud)) { | 95 | } |
133 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 96 | return pud; |
134 | if (!p) | 97 | } |
135 | return -ENOMEM; | ||
136 | 98 | ||
137 | pud_populate(&init_mm, pud, p); | 99 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
138 | } | 100 | { |
139 | next = pud_addr_end(addr, end); | 101 | pgd_t *pgd = pgd_offset_k(addr); |
140 | error = vmemmap_populate_pmd(pud, addr, next, node); | 102 | if (pgd_none(*pgd)) { |
103 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | ||
104 | if (!p) | ||
105 | return 0; | ||
106 | pgd_populate(&init_mm, pgd, p); | ||
141 | } | 107 | } |
142 | return error; | 108 | return pgd; |
143 | } | 109 | } |
144 | 110 | ||
145 | int __meminit vmemmap_populate(struct page *start_page, | 111 | int __meminit vmemmap_populate_basepages(struct page *start_page, |
146 | unsigned long nr, int node) | 112 | unsigned long size, int node) |
147 | { | 113 | { |
148 | pgd_t *pgd; | ||
149 | unsigned long addr = (unsigned long)start_page; | 114 | unsigned long addr = (unsigned long)start_page; |
150 | unsigned long end = (unsigned long)(start_page + nr); | 115 | unsigned long end = (unsigned long)(start_page + size); |
151 | unsigned long next; | 116 | pgd_t *pgd; |
152 | int error = 0; | 117 | pud_t *pud; |
153 | 118 | pmd_t *pmd; | |
154 | printk(KERN_DEBUG "[%lx-%lx] Virtual memory section" | 119 | pte_t *pte; |
155 | " (%ld pages) node %d\n", addr, end - 1, nr, node); | ||
156 | |||
157 | for (pgd = pgd_offset_k(addr); addr < end && !error; | ||
158 | pgd++, addr = next) { | ||
159 | if (pgd_none(*pgd)) { | ||
160 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | ||
161 | if (!p) | ||
162 | return -ENOMEM; | ||
163 | 120 | ||
164 | pgd_populate(&init_mm, pgd, p); | 121 | for (; addr < end; addr += PAGE_SIZE) { |
165 | } | 122 | pgd = vmemmap_pgd_populate(addr, node); |
166 | next = pgd_addr_end(addr,end); | 123 | if (!pgd) |
167 | error = vmemmap_populate_pud(pgd, addr, next, node); | 124 | return -ENOMEM; |
125 | pud = vmemmap_pud_populate(pgd, addr, node); | ||
126 | if (!pud) | ||
127 | return -ENOMEM; | ||
128 | pmd = vmemmap_pmd_populate(pud, addr, node); | ||
129 | if (!pmd) | ||
130 | return -ENOMEM; | ||
131 | pte = vmemmap_pte_populate(pmd, addr, node); | ||
132 | if (!pte) | ||
133 | return -ENOMEM; | ||
134 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); | ||
168 | } | 135 | } |
169 | return error; | 136 | |
137 | return 0; | ||
170 | } | 138 | } |
171 | #endif /* !CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP */ | ||
172 | 139 | ||
173 | struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) | 140 | struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) |
174 | { | 141 | { |