aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-21 22:56:24 -0400
committerTejun Heo <tj@kernel.org>2009-06-21 22:56:24 -0400
commit0ff2587fd54bd6f66bc6914ada4eb77a7e819a5b (patch)
tree942ec74ec5050317dc4d92d049c1bdda9db605c1
parent97c9bf0618cd40b05b4859c1f8a90d8ad97fefb2 (diff)
x86: prepare setup_pcpu_lpage() for pageattr fix
Make the following changes in preparation of coming pageattr updates. * Define and use array of struct pcpul_ent instead of array of pointers. The only difference is ->cpu field which is set but unused yet. * Rename variables according to the above change. * Rename local variable vm to pcpul_vm and move it out of the function. [ Impact: no functional difference ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jan Beulich <JBeulich@novell.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/setup_percpu.c58
1 files changed, 33 insertions, 25 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 8794c0c94d2c..7d38941e2b8c 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -137,8 +137,14 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
137 * better than only using 4k mappings while still being NUMA friendly. 137 * better than only using 4k mappings while still being NUMA friendly.
138 */ 138 */
139#ifdef CONFIG_NEED_MULTIPLE_NODES 139#ifdef CONFIG_NEED_MULTIPLE_NODES
140struct pcpul_ent {
141 unsigned int cpu;
142 void *ptr;
143};
144
140static size_t pcpul_size __initdata; 145static size_t pcpul_size __initdata;
141static void **pcpul_ptrs __initdata; 146static struct pcpul_ent *pcpul_map __initdata;
147static struct vm_struct pcpul_vm;
142 148
143static struct page * __init pcpul_get_page(unsigned int cpu, int pageno) 149static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
144{ 150{
@@ -147,13 +153,12 @@ static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
147 if (off >= pcpul_size) 153 if (off >= pcpul_size)
148 return NULL; 154 return NULL;
149 155
150 return virt_to_page(pcpul_ptrs[cpu] + off); 156 return virt_to_page(pcpul_map[cpu].ptr + off);
151} 157}
152 158
153static ssize_t __init setup_pcpu_lpage(size_t static_size) 159static ssize_t __init setup_pcpu_lpage(size_t static_size)
154{ 160{
155 static struct vm_struct vm; 161 size_t map_size, dyn_size;
156 size_t ptrs_size, dyn_size;
157 unsigned int cpu; 162 unsigned int cpu;
158 ssize_t ret; 163 ssize_t ret;
159 164
@@ -180,12 +185,14 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size)
180 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; 185 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
181 186
182 /* allocate pointer array and alloc large pages */ 187 /* allocate pointer array and alloc large pages */
183 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_ptrs[0])); 188 map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
184 pcpul_ptrs = alloc_bootmem(ptrs_size); 189 pcpul_map = alloc_bootmem(map_size);
185 190
186 for_each_possible_cpu(cpu) { 191 for_each_possible_cpu(cpu) {
187 pcpul_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); 192 pcpul_map[cpu].cpu = cpu;
188 if (!pcpul_ptrs[cpu]) 193 pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
194 PMD_SIZE);
195 if (!pcpul_map[cpu].ptr)
189 goto enomem; 196 goto enomem;
190 197
191 /* 198 /*
@@ -196,42 +203,43 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size)
196 * not well-specified to have a PAT-incompatible area 203 * not well-specified to have a PAT-incompatible area
197 * (unmapped RAM, device memory, etc.) in that hole. 204 * (unmapped RAM, device memory, etc.) in that hole.
198 */ 205 */
199 free_bootmem(__pa(pcpul_ptrs[cpu] + pcpul_size), 206 free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
200 PMD_SIZE - pcpul_size); 207 PMD_SIZE - pcpul_size);
201 208
202 memcpy(pcpul_ptrs[cpu], __per_cpu_load, static_size); 209 memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
203 } 210 }
204 211
205 /* allocate address and map */ 212 /* allocate address and map */
206 vm.flags = VM_ALLOC; 213 pcpul_vm.flags = VM_ALLOC;
207 vm.size = num_possible_cpus() * PMD_SIZE; 214 pcpul_vm.size = num_possible_cpus() * PMD_SIZE;
208 vm_area_register_early(&vm, PMD_SIZE); 215 vm_area_register_early(&pcpul_vm, PMD_SIZE);
209 216
210 for_each_possible_cpu(cpu) { 217 for_each_possible_cpu(cpu) {
211 pmd_t *pmd; 218 pmd_t *pmd, pmd_v;
212 219
213 pmd = populate_extra_pmd((unsigned long)vm.addr 220 pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
214 + cpu * PMD_SIZE); 221 cpu * PMD_SIZE);
215 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpul_ptrs[cpu])), 222 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
216 PAGE_KERNEL_LARGE)); 223 PAGE_KERNEL_LARGE);
224 set_pmd(pmd, pmd_v);
217 } 225 }
218 226
219 /* we're ready, commit */ 227 /* we're ready, commit */
220 pr_info("PERCPU: Remapped at %p with large pages, static data " 228 pr_info("PERCPU: Remapped at %p with large pages, static data "
221 "%zu bytes\n", vm.addr, static_size); 229 "%zu bytes\n", pcpul_vm.addr, static_size);
222 230
223 ret = pcpu_setup_first_chunk(pcpul_get_page, static_size, 231 ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
224 PERCPU_FIRST_CHUNK_RESERVE, dyn_size, 232 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
225 PMD_SIZE, vm.addr, NULL); 233 PMD_SIZE, pcpul_vm.addr, NULL);
226 goto out_free_ar; 234 goto out_free_map;
227 235
228enomem: 236enomem:
229 for_each_possible_cpu(cpu) 237 for_each_possible_cpu(cpu)
230 if (pcpul_ptrs[cpu]) 238 if (pcpul_map[cpu].ptr)
231 free_bootmem(__pa(pcpul_ptrs[cpu]), pcpul_size); 239 free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
232 ret = -ENOMEM; 240 ret = -ENOMEM;
233out_free_ar: 241out_free_map:
234 free_bootmem(__pa(pcpul_ptrs), ptrs_size); 242 free_bootmem(__pa(pcpul_map), map_size);
235 return ret; 243 return ret;
236} 244}
237#else 245#else