aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c418
1 files changed, 232 insertions, 186 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0f9c8c890658..cc50a13ce8d9 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -43,12 +43,10 @@
43#include <asm/proto.h> 43#include <asm/proto.h>
44#include <asm/smp.h> 44#include <asm/smp.h>
45#include <asm/sections.h> 45#include <asm/sections.h>
46#include <asm/kdebug.h>
47#include <asm/numa.h>
46 48
47#ifndef Dprintk 49const struct dma_mapping_ops *dma_ops;
48#define Dprintk(x...)
49#endif
50
51const struct dma_mapping_ops* dma_ops;
52EXPORT_SYMBOL(dma_ops); 50EXPORT_SYMBOL(dma_ops);
53 51
54static unsigned long dma_reserve __initdata; 52static unsigned long dma_reserve __initdata;
@@ -65,22 +63,26 @@ void show_mem(void)
65{ 63{
66 long i, total = 0, reserved = 0; 64 long i, total = 0, reserved = 0;
67 long shared = 0, cached = 0; 65 long shared = 0, cached = 0;
68 pg_data_t *pgdat;
69 struct page *page; 66 struct page *page;
67 pg_data_t *pgdat;
70 68
71 printk(KERN_INFO "Mem-info:\n"); 69 printk(KERN_INFO "Mem-info:\n");
72 show_free_areas(); 70 show_free_areas();
73 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 71 printk(KERN_INFO "Free swap: %6ldkB\n",
72 nr_swap_pages << (PAGE_SHIFT-10));
74 73
75 for_each_online_pgdat(pgdat) { 74 for_each_online_pgdat(pgdat) {
76 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 75 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
77 /* this loop can take a while with 256 GB and 4k pages 76 /*
78 so update the NMI watchdog */ 77 * This loop can take a while with 256 GB and
79 if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) { 78 * 4k pages so defer the NMI watchdog:
79 */
80 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
80 touch_nmi_watchdog(); 81 touch_nmi_watchdog();
81 } 82
82 if (!pfn_valid(pgdat->node_start_pfn + i)) 83 if (!pfn_valid(pgdat->node_start_pfn + i))
83 continue; 84 continue;
85
84 page = pfn_to_page(pgdat->node_start_pfn + i); 86 page = pfn_to_page(pgdat->node_start_pfn + i);
85 total++; 87 total++;
86 if (PageReserved(page)) 88 if (PageReserved(page))
@@ -89,51 +91,58 @@ void show_mem(void)
89 cached++; 91 cached++;
90 else if (page_count(page)) 92 else if (page_count(page))
91 shared += page_count(page) - 1; 93 shared += page_count(page) - 1;
92 } 94 }
93 } 95 }
94 printk(KERN_INFO "%lu pages of RAM\n", total); 96 printk(KERN_INFO "%lu pages of RAM\n", total);
95 printk(KERN_INFO "%lu reserved pages\n",reserved); 97 printk(KERN_INFO "%lu reserved pages\n", reserved);
96 printk(KERN_INFO "%lu pages shared\n",shared); 98 printk(KERN_INFO "%lu pages shared\n", shared);
97 printk(KERN_INFO "%lu pages swap cached\n",cached); 99 printk(KERN_INFO "%lu pages swap cached\n", cached);
98} 100}
99 101
100int after_bootmem; 102int after_bootmem;
101 103
102static __init void *spp_getpage(void) 104static __init void *spp_getpage(void)
103{ 105{
104 void *ptr; 106 void *ptr;
107
105 if (after_bootmem) 108 if (after_bootmem)
106 ptr = (void *) get_zeroed_page(GFP_ATOMIC); 109 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
107 else 110 else
108 ptr = alloc_bootmem_pages(PAGE_SIZE); 111 ptr = alloc_bootmem_pages(PAGE_SIZE);
109 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
110 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
111 112
112 Dprintk("spp_getpage %p\n", ptr); 113 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
114 panic("set_pte_phys: cannot allocate page data %s\n",
115 after_bootmem ? "after bootmem" : "");
116 }
117
118 pr_debug("spp_getpage %p\n", ptr);
119
113 return ptr; 120 return ptr;
114} 121}
115 122
116static __init void set_pte_phys(unsigned long vaddr, 123static __init void
117 unsigned long phys, pgprot_t prot) 124set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
118{ 125{
119 pgd_t *pgd; 126 pgd_t *pgd;
120 pud_t *pud; 127 pud_t *pud;
121 pmd_t *pmd; 128 pmd_t *pmd;
122 pte_t *pte, new_pte; 129 pte_t *pte, new_pte;
123 130
124 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys); 131 pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
125 132
126 pgd = pgd_offset_k(vaddr); 133 pgd = pgd_offset_k(vaddr);
127 if (pgd_none(*pgd)) { 134 if (pgd_none(*pgd)) {
128 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n"); 135 printk(KERN_ERR
136 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
129 return; 137 return;
130 } 138 }
131 pud = pud_offset(pgd, vaddr); 139 pud = pud_offset(pgd, vaddr);
132 if (pud_none(*pud)) { 140 if (pud_none(*pud)) {
133 pmd = (pmd_t *) spp_getpage(); 141 pmd = (pmd_t *) spp_getpage();
134 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); 142 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
135 if (pmd != pmd_offset(pud, 0)) { 143 if (pmd != pmd_offset(pud, 0)) {
136 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0)); 144 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
145 pmd, pmd_offset(pud, 0));
137 return; 146 return;
138 } 147 }
139 } 148 }
@@ -142,7 +151,7 @@ static __init void set_pte_phys(unsigned long vaddr,
142 pte = (pte_t *) spp_getpage(); 151 pte = (pte_t *) spp_getpage();
143 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); 152 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
144 if (pte != pte_offset_kernel(pmd, 0)) { 153 if (pte != pte_offset_kernel(pmd, 0)) {
145 printk("PAGETABLE BUG #02!\n"); 154 printk(KERN_ERR "PAGETABLE BUG #02!\n");
146 return; 155 return;
147 } 156 }
148 } 157 }
@@ -162,33 +171,35 @@ static __init void set_pte_phys(unsigned long vaddr,
162} 171}
163 172
164/* NOTE: this is meant to be run only at boot */ 173/* NOTE: this is meant to be run only at boot */
165void __init 174void __init
166__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 175__set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
167{ 176{
168 unsigned long address = __fix_to_virt(idx); 177 unsigned long address = __fix_to_virt(idx);
169 178
170 if (idx >= __end_of_fixed_addresses) { 179 if (idx >= __end_of_fixed_addresses) {
171 printk("Invalid __set_fixmap\n"); 180 printk(KERN_ERR "Invalid __set_fixmap\n");
172 return; 181 return;
173 } 182 }
174 set_pte_phys(address, phys, prot); 183 set_pte_phys(address, phys, prot);
175} 184}
176 185
177unsigned long __meminitdata table_start, table_end; 186static unsigned long __initdata table_start;
187static unsigned long __meminitdata table_end;
178 188
179static __meminit void *alloc_low_page(unsigned long *phys) 189static __meminit void *alloc_low_page(unsigned long *phys)
180{ 190{
181 unsigned long pfn = table_end++; 191 unsigned long pfn = table_end++;
182 void *adr; 192 void *adr;
183 193
184 if (after_bootmem) { 194 if (after_bootmem) {
185 adr = (void *)get_zeroed_page(GFP_ATOMIC); 195 adr = (void *)get_zeroed_page(GFP_ATOMIC);
186 *phys = __pa(adr); 196 *phys = __pa(adr);
197
187 return adr; 198 return adr;
188 } 199 }
189 200
190 if (pfn >= end_pfn) 201 if (pfn >= end_pfn)
191 panic("alloc_low_page: ran out of memory"); 202 panic("alloc_low_page: ran out of memory");
192 203
193 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); 204 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
194 memset(adr, 0, PAGE_SIZE); 205 memset(adr, 0, PAGE_SIZE);
@@ -197,44 +208,49 @@ static __meminit void *alloc_low_page(unsigned long *phys)
197} 208}
198 209
199static __meminit void unmap_low_page(void *adr) 210static __meminit void unmap_low_page(void *adr)
200{ 211{
201
202 if (after_bootmem) 212 if (after_bootmem)
203 return; 213 return;
204 214
205 early_iounmap(adr, PAGE_SIZE); 215 early_iounmap(adr, PAGE_SIZE);
206} 216}
207 217
208/* Must run before zap_low_mappings */ 218/* Must run before zap_low_mappings */
209__meminit void *early_ioremap(unsigned long addr, unsigned long size) 219__meminit void *early_ioremap(unsigned long addr, unsigned long size)
210{ 220{
211 unsigned long vaddr;
212 pmd_t *pmd, *last_pmd; 221 pmd_t *pmd, *last_pmd;
222 unsigned long vaddr;
213 int i, pmds; 223 int i, pmds;
214 224
215 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; 225 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
216 vaddr = __START_KERNEL_map; 226 vaddr = __START_KERNEL_map;
217 pmd = level2_kernel_pgt; 227 pmd = level2_kernel_pgt;
218 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1; 228 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
229
219 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) { 230 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
220 for (i = 0; i < pmds; i++) { 231 for (i = 0; i < pmds; i++) {
221 if (pmd_present(pmd[i])) 232 if (pmd_present(pmd[i]))
222 goto next; 233 goto continue_outer_loop;
223 } 234 }
224 vaddr += addr & ~PMD_MASK; 235 vaddr += addr & ~PMD_MASK;
225 addr &= PMD_MASK; 236 addr &= PMD_MASK;
237
226 for (i = 0; i < pmds; i++, addr += PMD_SIZE) 238 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
227 set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE)); 239 set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
228 __flush_tlb(); 240 __flush_tlb_all();
241
229 return (void *)vaddr; 242 return (void *)vaddr;
230 next: 243continue_outer_loop:
231 ; 244 ;
232 } 245 }
233 printk("early_ioremap(0x%lx, %lu) failed\n", addr, size); 246 printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
247
234 return NULL; 248 return NULL;
235} 249}
236 250
237/* To avoid virtual aliases later */ 251/*
252 * To avoid virtual aliases later:
253 */
238__meminit void early_iounmap(void *addr, unsigned long size) 254__meminit void early_iounmap(void *addr, unsigned long size)
239{ 255{
240 unsigned long vaddr; 256 unsigned long vaddr;
@@ -244,9 +260,11 @@ __meminit void early_iounmap(void *addr, unsigned long size)
244 vaddr = (unsigned long)addr; 260 vaddr = (unsigned long)addr;
245 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; 261 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
246 pmd = level2_kernel_pgt + pmd_index(vaddr); 262 pmd = level2_kernel_pgt + pmd_index(vaddr);
263
247 for (i = 0; i < pmds; i++) 264 for (i = 0; i < pmds; i++)
248 pmd_clear(pmd + i); 265 pmd_clear(pmd + i);
249 __flush_tlb(); 266
267 __flush_tlb_all();
250} 268}
251 269
252static void __meminit 270static void __meminit
@@ -259,16 +277,17 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
259 pmd_t *pmd = pmd_page + pmd_index(address); 277 pmd_t *pmd = pmd_page + pmd_index(address);
260 278
261 if (address >= end) { 279 if (address >= end) {
262 if (!after_bootmem) 280 if (!after_bootmem) {
263 for (; i < PTRS_PER_PMD; i++, pmd++) 281 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0)); 282 set_pmd(pmd, __pmd(0));
283 }
265 break; 284 break;
266 } 285 }
267 286
268 if (pmd_val(*pmd)) 287 if (pmd_val(*pmd))
269 continue; 288 continue;
270 289
271 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; 290 entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address;
272 entry &= __supported_pte_mask; 291 entry &= __supported_pte_mask;
273 set_pmd(pmd, __pmd(entry)); 292 set_pmd(pmd, __pmd(entry));
274 } 293 }
@@ -277,19 +296,19 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
277static void __meminit 296static void __meminit
278phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) 297phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
279{ 298{
280 pmd_t *pmd = pmd_offset(pud,0); 299 pmd_t *pmd = pmd_offset(pud, 0);
281 spin_lock(&init_mm.page_table_lock); 300 spin_lock(&init_mm.page_table_lock);
282 phys_pmd_init(pmd, address, end); 301 phys_pmd_init(pmd, address, end);
283 spin_unlock(&init_mm.page_table_lock); 302 spin_unlock(&init_mm.page_table_lock);
284 __flush_tlb_all(); 303 __flush_tlb_all();
285} 304}
286 305
287static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) 306static void __meminit
288{ 307phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
308{
289 int i = pud_index(addr); 309 int i = pud_index(addr);
290 310
291 311 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
292 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
293 unsigned long pmd_phys; 312 unsigned long pmd_phys;
294 pud_t *pud = pud_page + pud_index(addr); 313 pud_t *pud = pud_page + pud_index(addr);
295 pmd_t *pmd; 314 pmd_t *pmd;
@@ -297,10 +316,11 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
297 if (addr >= end) 316 if (addr >= end)
298 break; 317 break;
299 318
300 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) { 319 if (!after_bootmem &&
301 set_pud(pud, __pud(0)); 320 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
321 set_pud(pud, __pud(0));
302 continue; 322 continue;
303 } 323 }
304 324
305 if (pud_val(*pud)) { 325 if (pud_val(*pud)) {
306 phys_pmd_update(pud, addr, end); 326 phys_pmd_update(pud, addr, end);
@@ -308,14 +328,16 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
308 } 328 }
309 329
310 pmd = alloc_low_page(&pmd_phys); 330 pmd = alloc_low_page(&pmd_phys);
331
311 spin_lock(&init_mm.page_table_lock); 332 spin_lock(&init_mm.page_table_lock);
312 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); 333 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
313 phys_pmd_init(pmd, addr, end); 334 phys_pmd_init(pmd, addr, end);
314 spin_unlock(&init_mm.page_table_lock); 335 spin_unlock(&init_mm.page_table_lock);
336
315 unmap_low_page(pmd); 337 unmap_low_page(pmd);
316 } 338 }
317 __flush_tlb(); 339 __flush_tlb_all();
318} 340}
319 341
320static void __init find_early_table_space(unsigned long end) 342static void __init find_early_table_space(unsigned long end)
321{ 343{
@@ -326,14 +348,23 @@ static void __init find_early_table_space(unsigned long end)
326 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + 348 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
327 round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 349 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
328 350
329 /* RED-PEN putting page tables only on node 0 could 351 /*
330 cause a hotspot and fill up ZONE_DMA. The page tables 352 * RED-PEN putting page tables only on node 0 could
331 need roughly 0.5KB per GB. */ 353 * cause a hotspot and fill up ZONE_DMA. The page tables
332 start = 0x8000; 354 * need roughly 0.5KB per GB.
333 table_start = find_e820_area(start, end, tables); 355 */
356 start = 0x8000;
357 table_start = find_e820_area(start, end, tables);
334 if (table_start == -1UL) 358 if (table_start == -1UL)
335 panic("Cannot find space for the kernel page tables"); 359 panic("Cannot find space for the kernel page tables");
336 360
361 /*
362 * When you have a lot of RAM like 256GB, early_table will not fit
363 * into 0x8000 range, find_e820_area() will find area after kernel
364 * bss but the table_start is not page aligned, so need to round it
365 * up to avoid overlap with bss:
366 */
367 table_start = round_up(table_start, PAGE_SIZE);
337 table_start >>= PAGE_SHIFT; 368 table_start >>= PAGE_SHIFT;
338 table_end = table_start; 369 table_end = table_start;
339 370
@@ -342,20 +373,23 @@ static void __init find_early_table_space(unsigned long end)
342 (table_start << PAGE_SHIFT) + tables); 373 (table_start << PAGE_SHIFT) + tables);
343} 374}
344 375
345/* Setup the direct mapping of the physical memory at PAGE_OFFSET. 376/*
346 This runs before bootmem is initialized and gets pages directly from the 377 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
347 physical memory. To access them they are temporarily mapped. */ 378 * This runs before bootmem is initialized and gets pages directly from
379 * the physical memory. To access them they are temporarily mapped.
380 */
348void __init_refok init_memory_mapping(unsigned long start, unsigned long end) 381void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
349{ 382{
350 unsigned long next; 383 unsigned long next;
351 384
352 Dprintk("init_memory_mapping\n"); 385 pr_debug("init_memory_mapping\n");
353 386
354 /* 387 /*
355 * Find space for the kernel direct mapping tables. 388 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory 389 *
357 * mapped. Unfortunately this is done currently before the nodes are 390 * Later we should allocate these tables in the local node of the
358 * discovered. 391 * memory mapped. Unfortunately this is done currently before the
392 * nodes are discovered.
359 */ 393 */
360 if (!after_bootmem) 394 if (!after_bootmem)
361 find_early_table_space(end); 395 find_early_table_space(end);
@@ -364,8 +398,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
364 end = (unsigned long)__va(end); 398 end = (unsigned long)__va(end);
365 399
366 for (; start < end; start = next) { 400 for (; start < end; start = next) {
367 unsigned long pud_phys;
368 pgd_t *pgd = pgd_offset_k(start); 401 pgd_t *pgd = pgd_offset_k(start);
402 unsigned long pud_phys;
369 pud_t *pud; 403 pud_t *pud;
370 404
371 if (after_bootmem) 405 if (after_bootmem)
@@ -374,23 +408,26 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
374 pud = alloc_low_page(&pud_phys); 408 pud = alloc_low_page(&pud_phys);
375 409
376 next = start + PGDIR_SIZE; 410 next = start + PGDIR_SIZE;
377 if (next > end) 411 if (next > end)
378 next = end; 412 next = end;
379 phys_pud_init(pud, __pa(start), __pa(next)); 413 phys_pud_init(pud, __pa(start), __pa(next));
380 if (!after_bootmem) 414 if (!after_bootmem)
381 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); 415 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
382 unmap_low_page(pud); 416 unmap_low_page(pud);
383 } 417 }
384 418
385 if (!after_bootmem) 419 if (!after_bootmem)
386 mmu_cr4_features = read_cr4(); 420 mmu_cr4_features = read_cr4();
387 __flush_tlb_all(); 421 __flush_tlb_all();
422
423 reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
388} 424}
389 425
390#ifndef CONFIG_NUMA 426#ifndef CONFIG_NUMA
391void __init paging_init(void) 427void __init paging_init(void)
392{ 428{
393 unsigned long max_zone_pfns[MAX_NR_ZONES]; 429 unsigned long max_zone_pfns[MAX_NR_ZONES];
430
394 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 431 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
395 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 432 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
396 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 433 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
@@ -402,39 +439,48 @@ void __init paging_init(void)
402} 439}
403#endif 440#endif
404 441
405/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches 442/*
406 from the CPU leading to inconsistent cache lines. address and size 443 * Unmap a kernel mapping if it exists. This is useful to avoid
407 must be aligned to 2MB boundaries. 444 * prefetches from the CPU leading to inconsistent cache lines.
408 Does nothing when the mapping doesn't exist. */ 445 * address and size must be aligned to 2MB boundaries.
409void __init clear_kernel_mapping(unsigned long address, unsigned long size) 446 * Does nothing when the mapping doesn't exist.
447 */
448void __init clear_kernel_mapping(unsigned long address, unsigned long size)
410{ 449{
411 unsigned long end = address + size; 450 unsigned long end = address + size;
412 451
413 BUG_ON(address & ~LARGE_PAGE_MASK); 452 BUG_ON(address & ~LARGE_PAGE_MASK);
414 BUG_ON(size & ~LARGE_PAGE_MASK); 453 BUG_ON(size & ~LARGE_PAGE_MASK);
415 454
416 for (; address < end; address += LARGE_PAGE_SIZE) { 455 for (; address < end; address += LARGE_PAGE_SIZE) {
417 pgd_t *pgd = pgd_offset_k(address); 456 pgd_t *pgd = pgd_offset_k(address);
418 pud_t *pud; 457 pud_t *pud;
419 pmd_t *pmd; 458 pmd_t *pmd;
459
420 if (pgd_none(*pgd)) 460 if (pgd_none(*pgd))
421 continue; 461 continue;
462
422 pud = pud_offset(pgd, address); 463 pud = pud_offset(pgd, address);
423 if (pud_none(*pud)) 464 if (pud_none(*pud))
424 continue; 465 continue;
466
425 pmd = pmd_offset(pud, address); 467 pmd = pmd_offset(pud, address);
426 if (!pmd || pmd_none(*pmd)) 468 if (!pmd || pmd_none(*pmd))
427 continue; 469 continue;
428 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 470
429 /* Could handle this, but it should not happen currently. */ 471 if (!(pmd_val(*pmd) & _PAGE_PSE)) {
430 printk(KERN_ERR 472 /*
431 "clear_kernel_mapping: mapping has been split. will leak memory\n"); 473 * Could handle this, but it should not happen
432 pmd_ERROR(*pmd); 474 * currently:
475 */
476 printk(KERN_ERR "clear_kernel_mapping: "
477 "mapping has been split. will leak memory\n");
478 pmd_ERROR(*pmd);
433 } 479 }
434 set_pmd(pmd, __pmd(0)); 480 set_pmd(pmd, __pmd(0));
435 } 481 }
436 __flush_tlb_all(); 482 __flush_tlb_all();
437} 483}
438 484
439/* 485/*
440 * Memory hotplug specific functions 486 * Memory hotplug specific functions
@@ -461,16 +507,12 @@ int arch_add_memory(int nid, u64 start, u64 size)
461 unsigned long nr_pages = size >> PAGE_SHIFT; 507 unsigned long nr_pages = size >> PAGE_SHIFT;
462 int ret; 508 int ret;
463 509
464 init_memory_mapping(start, (start + size -1)); 510 init_memory_mapping(start, start + size-1);
465 511
466 ret = __add_pages(zone, start_pfn, nr_pages); 512 ret = __add_pages(zone, start_pfn, nr_pages);
467 if (ret) 513 WARN_ON(1);
468 goto error;
469 514
470 return ret; 515 return ret;
471error:
472 printk("%s: Problem encountered in __add_pages!\n", __func__);
473 return ret;
474} 516}
475EXPORT_SYMBOL_GPL(arch_add_memory); 517EXPORT_SYMBOL_GPL(arch_add_memory);
476 518
@@ -484,36 +526,8 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
484 526
485#endif /* CONFIG_MEMORY_HOTPLUG */ 527#endif /* CONFIG_MEMORY_HOTPLUG */
486 528
487#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 529static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
488/* 530 kcore_modules, kcore_vsyscall;
489 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
490 * just online the pages.
491 */
492int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
493{
494 int err = -EIO;
495 unsigned long pfn;
496 unsigned long total = 0, mem = 0;
497 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
498 if (pfn_valid(pfn)) {
499 online_page(pfn_to_page(pfn));
500 err = 0;
501 mem++;
502 }
503 total++;
504 }
505 if (!err) {
506 z->spanned_pages += total;
507 z->present_pages += mem;
508 z->zone_pgdat->node_spanned_pages += total;
509 z->zone_pgdat->node_present_pages += mem;
510 }
511 return err;
512}
513#endif
514
515static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
516 kcore_vsyscall;
517 531
518void __init mem_init(void) 532void __init mem_init(void)
519{ 533{
@@ -521,8 +535,15 @@ void __init mem_init(void)
521 535
522 pci_iommu_alloc(); 536 pci_iommu_alloc();
523 537
524 /* clear the zero-page */ 538 /* clear_bss() already clear the empty_zero_page */
525 memset(empty_zero_page, 0, PAGE_SIZE); 539
540 /* temporary debugging - double check it's true: */
541 {
542 int i;
543
544 for (i = 0; i < 1024; i++)
545 WARN_ON_ONCE(empty_zero_page[i]);
546 }
526 547
527 reservedpages = 0; 548 reservedpages = 0;
528 549
@@ -534,7 +555,6 @@ void __init mem_init(void)
534#endif 555#endif
535 reservedpages = end_pfn - totalram_pages - 556 reservedpages = end_pfn - totalram_pages -
536 absent_pages_in_range(0, end_pfn); 557 absent_pages_in_range(0, end_pfn);
537
538 after_bootmem = 1; 558 after_bootmem = 1;
539 559
540 codesize = (unsigned long) &_etext - (unsigned long) &_text; 560 codesize = (unsigned long) &_etext - (unsigned long) &_text;
@@ -542,15 +562,16 @@ void __init mem_init(void)
542 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 562 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
543 563
544 /* Register memory areas for /proc/kcore */ 564 /* Register memory areas for /proc/kcore */
545 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 565 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
546 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 566 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
547 VMALLOC_END-VMALLOC_START); 567 VMALLOC_END-VMALLOC_START);
548 kclist_add(&kcore_kernel, &_stext, _end - _stext); 568 kclist_add(&kcore_kernel, &_stext, _end - _stext);
549 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); 569 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
550 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 570 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
551 VSYSCALL_END - VSYSCALL_START); 571 VSYSCALL_END - VSYSCALL_START);
552 572
553 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", 573 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
574 "%ldk reserved, %ldk data, %ldk init)\n",
554 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 575 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
555 end_pfn << (PAGE_SHIFT-10), 576 end_pfn << (PAGE_SHIFT-10),
556 codesize >> 10, 577 codesize >> 10,
@@ -566,19 +587,27 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
566 if (begin >= end) 587 if (begin >= end)
567 return; 588 return;
568 589
590 /*
591 * If debugging page accesses then do not free this memory but
592 * mark them not present - any buggy init-section access will
593 * create a kernel page fault:
594 */
595#ifdef CONFIG_DEBUG_PAGEALLOC
596 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
597 begin, PAGE_ALIGN(end));
598 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
599#else
569 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 600 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
601
570 for (addr = begin; addr < end; addr += PAGE_SIZE) { 602 for (addr = begin; addr < end; addr += PAGE_SIZE) {
571 ClearPageReserved(virt_to_page(addr)); 603 ClearPageReserved(virt_to_page(addr));
572 init_page_count(virt_to_page(addr)); 604 init_page_count(virt_to_page(addr));
573 memset((void *)(addr & ~(PAGE_SIZE-1)), 605 memset((void *)(addr & ~(PAGE_SIZE-1)),
574 POISON_FREE_INITMEM, PAGE_SIZE); 606 POISON_FREE_INITMEM, PAGE_SIZE);
575 if (addr >= __START_KERNEL_map)
576 change_page_attr_addr(addr, 1, __pgprot(0));
577 free_page(addr); 607 free_page(addr);
578 totalram_pages++; 608 totalram_pages++;
579 } 609 }
580 if (addr > __START_KERNEL_map) 610#endif
581 global_flush_tlb();
582} 611}
583 612
584void free_initmem(void) 613void free_initmem(void)
@@ -589,6 +618,8 @@ void free_initmem(void)
589} 618}
590 619
591#ifdef CONFIG_DEBUG_RODATA 620#ifdef CONFIG_DEBUG_RODATA
621const int rodata_test_data = 0xC3;
622EXPORT_SYMBOL_GPL(rodata_test_data);
592 623
593void mark_rodata_ro(void) 624void mark_rodata_ro(void)
594{ 625{
@@ -603,25 +634,27 @@ void mark_rodata_ro(void)
603#ifdef CONFIG_KPROBES 634#ifdef CONFIG_KPROBES
604 start = (unsigned long)__start_rodata; 635 start = (unsigned long)__start_rodata;
605#endif 636#endif
606 637
607 end = (unsigned long)__end_rodata; 638 end = (unsigned long)__end_rodata;
608 start = (start + PAGE_SIZE - 1) & PAGE_MASK; 639 start = (start + PAGE_SIZE - 1) & PAGE_MASK;
609 end &= PAGE_MASK; 640 end &= PAGE_MASK;
610 if (end <= start) 641 if (end <= start)
611 return; 642 return;
612 643
613 change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO); 644 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
614 645
615 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 646 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
616 (end - start) >> 10); 647 (end - start) >> 10);
617 648
618 /* 649 rodata_test();
619 * change_page_attr_addr() requires a global_flush_tlb() call after it. 650
620 * We do this after the printk so that if something went wrong in the 651#ifdef CONFIG_CPA_DEBUG
621 * change, the printk gets out at least to give a better debug hint 652 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
622 * of who is the culprit. 653 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
623 */ 654
624 global_flush_tlb(); 655 printk(KERN_INFO "Testing CPA: again\n");
656 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
657#endif
625} 658}
626#endif 659#endif
627 660
@@ -632,17 +665,21 @@ void free_initrd_mem(unsigned long start, unsigned long end)
632} 665}
633#endif 666#endif
634 667
635void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 668void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
636{ 669{
637#ifdef CONFIG_NUMA 670#ifdef CONFIG_NUMA
638 int nid = phys_to_nid(phys); 671 int nid = phys_to_nid(phys);
639#endif 672#endif
640 unsigned long pfn = phys >> PAGE_SHIFT; 673 unsigned long pfn = phys >> PAGE_SHIFT;
674
641 if (pfn >= end_pfn) { 675 if (pfn >= end_pfn) {
642 /* This can happen with kdump kernels when accessing firmware 676 /*
643 tables. */ 677 * This can happen with kdump kernels when accessing
678 * firmware tables:
679 */
644 if (pfn < end_pfn_map) 680 if (pfn < end_pfn_map)
645 return; 681 return;
682
646 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", 683 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
647 phys, len); 684 phys, len);
648 return; 685 return;
@@ -650,9 +687,9 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
650 687
651 /* Should check here against the e820 map to avoid double free */ 688 /* Should check here against the e820 map to avoid double free */
652#ifdef CONFIG_NUMA 689#ifdef CONFIG_NUMA
653 reserve_bootmem_node(NODE_DATA(nid), phys, len); 690 reserve_bootmem_node(NODE_DATA(nid), phys, len);
654#else 691#else
655 reserve_bootmem(phys, len); 692 reserve_bootmem(phys, len);
656#endif 693#endif
657 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { 694 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
658 dma_reserve += len / PAGE_SIZE; 695 dma_reserve += len / PAGE_SIZE;
@@ -660,46 +697,49 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
660 } 697 }
661} 698}
662 699
663int kern_addr_valid(unsigned long addr) 700int kern_addr_valid(unsigned long addr)
664{ 701{
665 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; 702 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
666 pgd_t *pgd; 703 pgd_t *pgd;
667 pud_t *pud; 704 pud_t *pud;
668 pmd_t *pmd; 705 pmd_t *pmd;
669 pte_t *pte; 706 pte_t *pte;
670 707
671 if (above != 0 && above != -1UL) 708 if (above != 0 && above != -1UL)
672 return 0; 709 return 0;
673 710
674 pgd = pgd_offset_k(addr); 711 pgd = pgd_offset_k(addr);
675 if (pgd_none(*pgd)) 712 if (pgd_none(*pgd))
676 return 0; 713 return 0;
677 714
678 pud = pud_offset(pgd, addr); 715 pud = pud_offset(pgd, addr);
679 if (pud_none(*pud)) 716 if (pud_none(*pud))
680 return 0; 717 return 0;
681 718
682 pmd = pmd_offset(pud, addr); 719 pmd = pmd_offset(pud, addr);
683 if (pmd_none(*pmd)) 720 if (pmd_none(*pmd))
684 return 0; 721 return 0;
722
685 if (pmd_large(*pmd)) 723 if (pmd_large(*pmd))
686 return pfn_valid(pmd_pfn(*pmd)); 724 return pfn_valid(pmd_pfn(*pmd));
687 725
688 pte = pte_offset_kernel(pmd, addr); 726 pte = pte_offset_kernel(pmd, addr);
689 if (pte_none(*pte)) 727 if (pte_none(*pte))
690 return 0; 728 return 0;
729
691 return pfn_valid(pte_pfn(*pte)); 730 return pfn_valid(pte_pfn(*pte));
692} 731}
693 732
694/* A pseudo VMA to allow ptrace access for the vsyscall page. This only 733/*
695 covers the 64bit vsyscall page now. 32bit has a real VMA now and does 734 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
696 not need special handling anymore. */ 735 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
697 736 * not need special handling anymore:
737 */
698static struct vm_area_struct gate_vma = { 738static struct vm_area_struct gate_vma = {
699 .vm_start = VSYSCALL_START, 739 .vm_start = VSYSCALL_START,
700 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT), 740 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
701 .vm_page_prot = PAGE_READONLY_EXEC, 741 .vm_page_prot = PAGE_READONLY_EXEC,
702 .vm_flags = VM_READ | VM_EXEC 742 .vm_flags = VM_READ | VM_EXEC
703}; 743};
704 744
705struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 745struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
@@ -714,14 +754,17 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
714int in_gate_area(struct task_struct *task, unsigned long addr) 754int in_gate_area(struct task_struct *task, unsigned long addr)
715{ 755{
716 struct vm_area_struct *vma = get_gate_vma(task); 756 struct vm_area_struct *vma = get_gate_vma(task);
757
717 if (!vma) 758 if (!vma)
718 return 0; 759 return 0;
760
719 return (addr >= vma->vm_start) && (addr < vma->vm_end); 761 return (addr >= vma->vm_start) && (addr < vma->vm_end);
720} 762}
721 763
722/* Use this when you have no reliable task/vma, typically from interrupt 764/*
723 * context. It is less reliable than using the task's vma and may give 765 * Use this when you have no reliable task/vma, typically from interrupt
724 * false positives. 766 * context. It is less reliable than using the task's vma and may give
767 * false positives:
725 */ 768 */
726int in_gate_area_no_task(unsigned long addr) 769int in_gate_area_no_task(unsigned long addr)
727{ 770{
@@ -741,8 +784,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
741/* 784/*
742 * Initialise the sparsemem vmemmap using huge-pages at the PMD level. 785 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
743 */ 786 */
744int __meminit vmemmap_populate(struct page *start_page, 787int __meminit
745 unsigned long size, int node) 788vmemmap_populate(struct page *start_page, unsigned long size, int node)
746{ 789{
747 unsigned long addr = (unsigned long)start_page; 790 unsigned long addr = (unsigned long)start_page;
748 unsigned long end = (unsigned long)(start_page + size); 791 unsigned long end = (unsigned long)(start_page + size);
@@ -757,6 +800,7 @@ int __meminit vmemmap_populate(struct page *start_page,
757 pgd = vmemmap_pgd_populate(addr, node); 800 pgd = vmemmap_pgd_populate(addr, node);
758 if (!pgd) 801 if (!pgd)
759 return -ENOMEM; 802 return -ENOMEM;
803
760 pud = vmemmap_pud_populate(pgd, addr, node); 804 pud = vmemmap_pud_populate(pgd, addr, node);
761 if (!pud) 805 if (!pud)
762 return -ENOMEM; 806 return -ENOMEM;
@@ -764,20 +808,22 @@ int __meminit vmemmap_populate(struct page *start_page,
764 pmd = pmd_offset(pud, addr); 808 pmd = pmd_offset(pud, addr);
765 if (pmd_none(*pmd)) { 809 if (pmd_none(*pmd)) {
766 pte_t entry; 810 pte_t entry;
767 void *p = vmemmap_alloc_block(PMD_SIZE, node); 811 void *p;
812
813 p = vmemmap_alloc_block(PMD_SIZE, node);
768 if (!p) 814 if (!p)
769 return -ENOMEM; 815 return -ENOMEM;
770 816
771 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 817 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
772 mk_pte_huge(entry); 818 PAGE_KERNEL_LARGE);
773 set_pmd(pmd, __pmd(pte_val(entry))); 819 set_pmd(pmd, __pmd(pte_val(entry)));
774 820
775 printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n", 821 printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
776 addr, addr + PMD_SIZE - 1, p, node); 822 addr, addr + PMD_SIZE - 1, p, node);
777 } else 823 } else {
778 vmemmap_verify((pte_t *)pmd, node, addr, next); 824 vmemmap_verify((pte_t *)pmd, node, addr, next);
825 }
779 } 826 }
780
781 return 0; 827 return 0;
782} 828}
783#endif 829#endif