aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 21:54:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 21:54:50 -0400
commitdfe2c6dcc8ca2cdc662d7c0473e9811b72ef3370 (patch)
tree9ed639a08c16322cdf136d576f42df5b97cd1549 /arch/x86/mm
parenta45d572841a24db02a62cf05e1157c35fdd3705b (diff)
parent64e455079e1bd7787cc47be30b7f601ce682a5f6 (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge second patch-bomb from Andrew Morton: - a few hotfixes - drivers/dma updates - MAINTAINERS updates - Quite a lot of lib/ updates - checkpatch updates - binfmt updates - autofs4 - drivers/rtc/ - various small tweaks to less used filesystems - ipc/ updates - kernel/watchdog.c changes * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (135 commits) mm: softdirty: enable write notifications on VMAs after VM_SOFTDIRTY cleared kernel/param: consolidate __{start,stop}___param[] in <linux/moduleparam.h> ia64: remove duplicate declarations of __per_cpu_start[] and __per_cpu_end[] frv: remove unused declarations of __start___ex_table and __stop___ex_table kvm: ensure hard lockup detection is disabled by default kernel/watchdog.c: control hard lockup detection default staging: rtl8192u: use %*pEn to escape buffer staging: rtl8192e: use %*pEn to escape buffer staging: wlan-ng: use %*pEhp to print SN lib80211: remove unused print_ssid() wireless: hostap: proc: print properly escaped SSID wireless: ipw2x00: print SSID via %*pE wireless: libertas: print esaped string via %*pE lib/vsprintf: add %*pE[achnops] format specifier lib / string_helpers: introduce string_escape_mem() lib / string_helpers: refactoring the test suite lib / string_helpers: move documentation to c-file include/linux: remove strict_strto* definitions arch/x86/mm/numa.c: fix boot failure when all nodes are hotpluggable fs: check bh blocknr earlier when searching lru ...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/ioremap.c20
-rw-r--r--arch/x86/mm/numa.c89
2 files changed, 61 insertions, 48 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index baff1da354e0..af78e50ca6ce 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -86,6 +86,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
86 pgprot_t prot; 86 pgprot_t prot;
87 int retval; 87 int retval;
88 void __iomem *ret_addr; 88 void __iomem *ret_addr;
89 int ram_region;
89 90
90 /* Don't allow wraparound or zero size */ 91 /* Don't allow wraparound or zero size */
91 last_addr = phys_addr + size - 1; 92 last_addr = phys_addr + size - 1;
@@ -108,12 +109,23 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
108 /* 109 /*
109 * Don't allow anybody to remap normal RAM that we're using.. 110 * Don't allow anybody to remap normal RAM that we're using..
110 */ 111 */
111 pfn = phys_addr >> PAGE_SHIFT; 112 /* First check if whole region can be identified as RAM or not */
112 last_pfn = last_addr >> PAGE_SHIFT; 113 ram_region = region_is_ram(phys_addr, size);
113 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, 114 if (ram_region > 0) {
114 __ioremap_check_ram) == 1) 115 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
116 (unsigned long int)phys_addr,
117 (unsigned long int)last_addr);
115 return NULL; 118 return NULL;
119 }
116 120
121 /* If could not be identified(-1), check page by page */
122 if (ram_region < 0) {
123 pfn = phys_addr >> PAGE_SHIFT;
124 last_pfn = last_addr >> PAGE_SHIFT;
125 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
126 __ioremap_check_ram) == 1)
127 return NULL;
128 }
117 /* 129 /*
118 * Mappings have to be page-aligned 130 * Mappings have to be page-aligned
119 */ 131 */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index d221374d5ce8..1a883705a12a 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -463,6 +463,42 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
463 return true; 463 return true;
464} 464}
465 465
466static void __init numa_clear_kernel_node_hotplug(void)
467{
468 int i, nid;
469 nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
470 unsigned long start, end;
471 struct memblock_region *r;
472
473 /*
474 * At this time, all memory regions reserved by memblock are
475 * used by the kernel. Set the nid in memblock.reserved will
476 * mark out all the nodes the kernel resides in.
477 */
478 for (i = 0; i < numa_meminfo.nr_blks; i++) {
479 struct numa_memblk *mb = &numa_meminfo.blk[i];
480
481 memblock_set_node(mb->start, mb->end - mb->start,
482 &memblock.reserved, mb->nid);
483 }
484
485 /* Mark all kernel nodes. */
486 for_each_memblock(reserved, r)
487 node_set(r->nid, numa_kernel_nodes);
488
489 /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
490 for (i = 0; i < numa_meminfo.nr_blks; i++) {
491 nid = numa_meminfo.blk[i].nid;
492 if (!node_isset(nid, numa_kernel_nodes))
493 continue;
494
495 start = numa_meminfo.blk[i].start;
496 end = numa_meminfo.blk[i].end;
497
498 memblock_clear_hotplug(start, end - start);
499 }
500}
501
466static int __init numa_register_memblks(struct numa_meminfo *mi) 502static int __init numa_register_memblks(struct numa_meminfo *mi)
467{ 503{
468 unsigned long uninitialized_var(pfn_align); 504 unsigned long uninitialized_var(pfn_align);
@@ -481,6 +517,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
481 } 517 }
482 518
483 /* 519 /*
520 * At very early time, the kernel have to use some memory such as
521 * loading the kernel image. We cannot prevent this anyway. So any
522 * node the kernel resides in should be un-hotpluggable.
523 *
524 * And when we come here, alloc node data won't fail.
525 */
526 numa_clear_kernel_node_hotplug();
527
528 /*
484 * If sections array is gonna be used for pfn -> nid mapping, check 529 * If sections array is gonna be used for pfn -> nid mapping, check
485 * whether its granularity is fine enough. 530 * whether its granularity is fine enough.
486 */ 531 */
@@ -548,41 +593,6 @@ static void __init numa_init_array(void)
548 } 593 }
549} 594}
550 595
551static void __init numa_clear_kernel_node_hotplug(void)
552{
553 int i, nid;
554 nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
555 unsigned long start, end;
556 struct memblock_region *r;
557
558 /*
559 * At this time, all memory regions reserved by memblock are
560 * used by the kernel. Set the nid in memblock.reserved will
561 * mark out all the nodes the kernel resides in.
562 */
563 for (i = 0; i < numa_meminfo.nr_blks; i++) {
564 struct numa_memblk *mb = &numa_meminfo.blk[i];
565 memblock_set_node(mb->start, mb->end - mb->start,
566 &memblock.reserved, mb->nid);
567 }
568
569 /* Mark all kernel nodes. */
570 for_each_memblock(reserved, r)
571 node_set(r->nid, numa_kernel_nodes);
572
573 /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
574 for (i = 0; i < numa_meminfo.nr_blks; i++) {
575 nid = numa_meminfo.blk[i].nid;
576 if (!node_isset(nid, numa_kernel_nodes))
577 continue;
578
579 start = numa_meminfo.blk[i].start;
580 end = numa_meminfo.blk[i].end;
581
582 memblock_clear_hotplug(start, end - start);
583 }
584}
585
586static int __init numa_init(int (*init_func)(void)) 596static int __init numa_init(int (*init_func)(void))
587{ 597{
588 int i; 598 int i;
@@ -637,15 +647,6 @@ static int __init numa_init(int (*init_func)(void))
637 } 647 }
638 numa_init_array(); 648 numa_init_array();
639 649
640 /*
641 * At very early time, the kernel have to use some memory such as
642 * loading the kernel image. We cannot prevent this anyway. So any
643 * node the kernel resides in should be un-hotpluggable.
644 *
645 * And when we come here, numa_init() won't fail.
646 */
647 numa_clear_kernel_node_hotplug();
648
649 return 0; 650 return 0;
650} 651}
651 652