aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-25 05:10:42 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-25 05:10:42 -0400
commitea1c9de45ecb162841c9b4e0fa303a245d59b1c8 (patch)
tree72ef1624d8a4c581ad94a84f38f941682562030b /arch/x86/mm
parent4e1d112cac08049e764fe3c4f6d3ec92529f9f68 (diff)
parenta2bd7274b47124d2fc4dfdb8c0591f545ba749dd (diff)
Merge branch 'x86/urgent' into x86/cleanups
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_64.c36
-rw-r--r--arch/x86/mm/ioremap.c8
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/pageattr.c6
-rw-r--r--arch/x86/mm/pat.c50
5 files changed, 65 insertions, 39 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5f47fde5fa9e..770536ebf7e9 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -241,7 +241,7 @@ static unsigned long __initdata table_start;
241static unsigned long __meminitdata table_end; 241static unsigned long __meminitdata table_end;
242static unsigned long __meminitdata table_top; 242static unsigned long __meminitdata table_top;
243 243
244static __meminit void *alloc_low_page(unsigned long *phys) 244static __ref void *alloc_low_page(unsigned long *phys)
245{ 245{
246 unsigned long pfn = table_end++; 246 unsigned long pfn = table_end++;
247 void *adr; 247 void *adr;
@@ -262,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys)
262 return adr; 262 return adr;
263} 263}
264 264
265static __meminit void unmap_low_page(void *adr) 265static __ref void unmap_low_page(void *adr)
266{ 266{
267 if (after_bootmem) 267 if (after_bootmem)
268 return; 268 return;
@@ -336,9 +336,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
336 } 336 }
337 337
338 if (pmd_val(*pmd)) { 338 if (pmd_val(*pmd)) {
339 if (!pmd_large(*pmd)) 339 if (!pmd_large(*pmd)) {
340 spin_lock(&init_mm.page_table_lock);
340 last_map_addr = phys_pte_update(pmd, address, 341 last_map_addr = phys_pte_update(pmd, address,
341 end); 342 end);
343 spin_unlock(&init_mm.page_table_lock);
344 }
342 /* Count entries we're using from level2_ident_pgt */ 345 /* Count entries we're using from level2_ident_pgt */
343 if (start == 0) 346 if (start == 0)
344 pages++; 347 pages++;
@@ -347,8 +350,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
347 350
348 if (page_size_mask & (1<<PG_LEVEL_2M)) { 351 if (page_size_mask & (1<<PG_LEVEL_2M)) {
349 pages++; 352 pages++;
353 spin_lock(&init_mm.page_table_lock);
350 set_pte((pte_t *)pmd, 354 set_pte((pte_t *)pmd,
351 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 355 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
356 spin_unlock(&init_mm.page_table_lock);
352 last_map_addr = (address & PMD_MASK) + PMD_SIZE; 357 last_map_addr = (address & PMD_MASK) + PMD_SIZE;
353 continue; 358 continue;
354 } 359 }
@@ -357,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
357 last_map_addr = phys_pte_init(pte, address, end); 362 last_map_addr = phys_pte_init(pte, address, end);
358 unmap_low_page(pte); 363 unmap_low_page(pte);
359 364
365 spin_lock(&init_mm.page_table_lock);
360 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); 366 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
367 spin_unlock(&init_mm.page_table_lock);
361 } 368 }
362 update_page_count(PG_LEVEL_2M, pages); 369 update_page_count(PG_LEVEL_2M, pages);
363 return last_map_addr; 370 return last_map_addr;
@@ -370,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
370 pmd_t *pmd = pmd_offset(pud, 0); 377 pmd_t *pmd = pmd_offset(pud, 0);
371 unsigned long last_map_addr; 378 unsigned long last_map_addr;
372 379
373 spin_lock(&init_mm.page_table_lock);
374 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); 380 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
375 spin_unlock(&init_mm.page_table_lock);
376 __flush_tlb_all(); 381 __flush_tlb_all();
377 return last_map_addr; 382 return last_map_addr;
378} 383}
@@ -408,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
408 413
409 if (page_size_mask & (1<<PG_LEVEL_1G)) { 414 if (page_size_mask & (1<<PG_LEVEL_1G)) {
410 pages++; 415 pages++;
416 spin_lock(&init_mm.page_table_lock);
411 set_pte((pte_t *)pud, 417 set_pte((pte_t *)pud,
412 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 418 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
419 spin_unlock(&init_mm.page_table_lock);
413 last_map_addr = (addr & PUD_MASK) + PUD_SIZE; 420 last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
414 continue; 421 continue;
415 } 422 }
416 423
417 pmd = alloc_low_page(&pmd_phys); 424 pmd = alloc_low_page(&pmd_phys);
418
419 spin_lock(&init_mm.page_table_lock);
420 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); 425 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
421 unmap_low_page(pmd); 426 unmap_low_page(pmd);
427
428 spin_lock(&init_mm.page_table_lock);
422 pud_populate(&init_mm, pud, __va(pmd_phys)); 429 pud_populate(&init_mm, pud, __va(pmd_phys));
423 spin_unlock(&init_mm.page_table_lock); 430 spin_unlock(&init_mm.page_table_lock);
424
425 } 431 }
426 __flush_tlb_all(); 432 __flush_tlb_all();
427 update_page_count(PG_LEVEL_1G, pages); 433 update_page_count(PG_LEVEL_1G, pages);
@@ -513,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
513 continue; 519 continue;
514 } 520 }
515 521
516 if (after_bootmem) 522 pud = alloc_low_page(&pud_phys);
517 pud = pud_offset(pgd, start & PGDIR_MASK);
518 else
519 pud = alloc_low_page(&pud_phys);
520
521 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), 523 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
522 page_size_mask); 524 page_size_mask);
523 unmap_low_page(pud); 525 unmap_low_page(pud);
524 pgd_populate(&init_mm, pgd_offset_k(start), 526
525 __va(pud_phys)); 527 spin_lock(&init_mm.page_table_lock);
528 pgd_populate(&init_mm, pgd, __va(pud_phys));
529 spin_unlock(&init_mm.page_table_lock);
526 } 530 }
527 531
528 return last_map_addr; 532 return last_map_addr;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 6ba6f889c79d..d4b6e6a29ae3 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -553,13 +553,11 @@ static int __init check_early_ioremap_leak(void)
553{ 553{
554 if (!early_ioremap_nested) 554 if (!early_ioremap_nested)
555 return 0; 555 return 0;
556 556 WARN(1, KERN_WARNING
557 printk(KERN_WARNING
558 "Debug warning: early ioremap leak of %d areas detected.\n", 557 "Debug warning: early ioremap leak of %d areas detected.\n",
559 early_ioremap_nested); 558 early_ioremap_nested);
560 printk(KERN_WARNING 559 printk(KERN_WARNING
561 "please boot with early_ioremap_debug and report the dmesg.\n"); 560 "please boot with early_ioremap_debug and report the dmesg.\n");
562 WARN_ON(1);
563 561
564 return 1; 562 return 1;
565} 563}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index e7397e108beb..635b50e85581 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void)
430 "may miss events.\n"); 430 "may miss events.\n");
431} 431}
432 432
433static void leave_uniprocessor(void) 433/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
434 but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
435static void __ref leave_uniprocessor(void)
434{ 436{
435 int cpu; 437 int cpu;
436 int err; 438 int err;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1b87b76e2d04..be54f501776a 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -849,7 +849,7 @@ int set_memory_uc(unsigned long addr, int numpages)
849 /* 849 /*
850 * for now UC MINUS. see comments in ioremap_nocache() 850 * for now UC MINUS. see comments in ioremap_nocache()
851 */ 851 */
852 if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, 852 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
853 _PAGE_CACHE_UC_MINUS, NULL)) 853 _PAGE_CACHE_UC_MINUS, NULL))
854 return -EINVAL; 854 return -EINVAL;
855 855
@@ -868,7 +868,7 @@ int set_memory_wc(unsigned long addr, int numpages)
868 if (!pat_enabled) 868 if (!pat_enabled)
869 return set_memory_uc(addr, numpages); 869 return set_memory_uc(addr, numpages);
870 870
871 if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, 871 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
872 _PAGE_CACHE_WC, NULL)) 872 _PAGE_CACHE_WC, NULL))
873 return -EINVAL; 873 return -EINVAL;
874 874
@@ -884,7 +884,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
884 884
885int set_memory_wb(unsigned long addr, int numpages) 885int set_memory_wb(unsigned long addr, int numpages)
886{ 886{
887 free_memtype(addr, addr + numpages * PAGE_SIZE); 887 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
888 888
889 return _set_memory_wb(addr, numpages); 889 return _set_memory_wb(addr, numpages);
890} 890}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2fe30916d4b6..2a50e0fa64a5 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
207 return -EBUSY; 207 return -EBUSY;
208} 208}
209 209
210static struct memtype *cached_entry;
211static u64 cached_start;
212
210/* 213/*
211 * req_type typically has one of the: 214 * req_type typically has one of the:
212 * - _PAGE_CACHE_WB 215 * - _PAGE_CACHE_WB
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
280 283
281 spin_lock(&memtype_lock); 284 spin_lock(&memtype_lock);
282 285
286 if (cached_entry && start >= cached_start)
287 entry = cached_entry;
288 else
289 entry = list_entry(&memtype_list, struct memtype, nd);
290
283 /* Search for existing mapping that overlaps the current range */ 291 /* Search for existing mapping that overlaps the current range */
284 where = NULL; 292 where = NULL;
285 list_for_each_entry(entry, &memtype_list, nd) { 293 list_for_each_entry_continue(entry, &memtype_list, nd) {
286 if (end <= entry->start) { 294 if (end <= entry->start) {
287 where = entry->nd.prev; 295 where = entry->nd.prev;
296 cached_entry = list_entry(where, struct memtype, nd);
288 break; 297 break;
289 } else if (start <= entry->start) { /* end > entry->start */ 298 } else if (start <= entry->start) { /* end > entry->start */
290 err = chk_conflict(new, entry, new_type); 299 err = chk_conflict(new, entry, new_type);
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
292 dprintk("Overlap at 0x%Lx-0x%Lx\n", 301 dprintk("Overlap at 0x%Lx-0x%Lx\n",
293 entry->start, entry->end); 302 entry->start, entry->end);
294 where = entry->nd.prev; 303 where = entry->nd.prev;
304 cached_entry = list_entry(where,
305 struct memtype, nd);
295 } 306 }
296 break; 307 break;
297 } else if (start < entry->end) { /* start > entry->start */ 308 } else if (start < entry->end) { /* start > entry->start */
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
299 if (!err) { 310 if (!err) {
300 dprintk("Overlap at 0x%Lx-0x%Lx\n", 311 dprintk("Overlap at 0x%Lx-0x%Lx\n",
301 entry->start, entry->end); 312 entry->start, entry->end);
302 where = &entry->nd; 313 cached_entry = list_entry(entry->nd.prev,
314 struct memtype, nd);
315
316 /*
317 * Move to right position in the linked
318 * list to add this new entry
319 */
320 list_for_each_entry_continue(entry,
321 &memtype_list, nd) {
322 if (start <= entry->start) {
323 where = entry->nd.prev;
324 break;
325 }
326 }
303 } 327 }
304 break; 328 break;
305 } 329 }
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
314 return err; 338 return err;
315 } 339 }
316 340
341 cached_start = start;
342
317 if (where) 343 if (where)
318 list_add(&new->nd, where); 344 list_add(&new->nd, where);
319 else 345 else
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
343 spin_lock(&memtype_lock); 369 spin_lock(&memtype_lock);
344 list_for_each_entry(entry, &memtype_list, nd) { 370 list_for_each_entry(entry, &memtype_list, nd) {
345 if (entry->start == start && entry->end == end) { 371 if (entry->start == start && entry->end == end) {
372 if (cached_entry == entry || cached_start == start)
373 cached_entry = NULL;
374
346 list_del(&entry->nd); 375 list_del(&entry->nd);
347 kfree(entry); 376 kfree(entry);
348 err = 0; 377 err = 0;
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end)
361} 390}
362 391
363 392
364/*
365 * /dev/mem mmap interface. The memtype used for mapping varies:
366 * - Use UC for mappings with O_SYNC flag
367 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
368 * inherit the memtype from existing mapping.
369 * - Else use UC_MINUS memtype (for backward compatibility with existing
370 * X drivers.
371 */
372pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 393pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
373 unsigned long size, pgprot_t vma_prot) 394 unsigned long size, pgprot_t vma_prot)
374{ 395{
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
406 unsigned long size, pgprot_t *vma_prot) 427 unsigned long size, pgprot_t *vma_prot)
407{ 428{
408 u64 offset = ((u64) pfn) << PAGE_SHIFT; 429 u64 offset = ((u64) pfn) << PAGE_SHIFT;
409 unsigned long flags = _PAGE_CACHE_UC_MINUS; 430 unsigned long flags = -1;
410 int retval; 431 int retval;
411 432
412 if (!range_is_allowed(pfn, size)) 433 if (!range_is_allowed(pfn, size))
413 return 0; 434 return 0;
414 435
415 if (file->f_flags & O_SYNC) { 436 if (file->f_flags & O_SYNC) {
416 flags = _PAGE_CACHE_UC; 437 flags = _PAGE_CACHE_UC_MINUS;
417 } 438 }
418 439
419#ifdef CONFIG_X86_32 440#ifdef CONFIG_X86_32
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
436#endif 457#endif
437 458
438 /* 459 /*
439 * With O_SYNC, we can only take UC mapping. Fail if we cannot. 460 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
461 *
440 * Without O_SYNC, we want to get 462 * Without O_SYNC, we want to get
441 * - WB for WB-able memory and no other conflicting mappings 463 * - WB for WB-able memory and no other conflicting mappings
442 * - UC_MINUS for non-WB-able memory with no other conflicting mappings 464 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
443 * - Inherit from confliting mappings otherwise 465 * - Inherit from confliting mappings otherwise
444 */ 466 */
445 if (flags != _PAGE_CACHE_UC_MINUS) { 467 if (flags != -1) {
446 retval = reserve_memtype(offset, offset + size, flags, NULL); 468 retval = reserve_memtype(offset, offset + size, flags, NULL);
447 } else { 469 } else {
448 retval = reserve_memtype(offset, offset + size, -1, &flags); 470 retval = reserve_memtype(offset, offset + size, -1, &flags);