diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 16:07:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 16:07:55 -0400 |
commit | b278240839e20fa9384ea430df463b367b90e04e (patch) | |
tree | f99f0c8cdd4cc7f177cd75440e6bd181cded7fb3 /arch/x86_64/mm | |
parent | dd77a4ee0f3981693d4229aa1d57cea9e526ff47 (diff) | |
parent | 3f75f42d7733e73aca5c78326489efd4189e0111 (diff) |
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (225 commits)
[PATCH] Don't set calgary iommu as default y
[PATCH] i386/x86-64: New Intel feature flags
[PATCH] x86: Add a cumulative thermal throttle event counter.
[PATCH] i386: Make the jiffies compares use the 64bit safe macros.
[PATCH] x86: Refactor thermal throttle processing
[PATCH] Add 64bit jiffies compares (for use with get_jiffies_64)
[PATCH] Fix unwinder warning in traps.c
[PATCH] x86: Allow disabling early pci scans with pci=noearly or disallowing conf1
[PATCH] x86: Move direct PCI scanning functions out of line
[PATCH] i386/x86-64: Make all early PCI scans dependent on CONFIG_PCI
[PATCH] Don't leak NT bit into next task
[PATCH] i386/x86-64: Work around gcc bug with noreturn functions in unwinder
[PATCH] Fix some broken white space in ia32_signal.c
[PATCH] Initialize argument registers for 32bit signal handlers.
[PATCH] Remove all traces of signal number conversion
[PATCH] Don't synchronize time reading on single core AMD systems
[PATCH] Remove outdated comment in x86-64 mmconfig code
[PATCH] Use string instructions for Core2 copy/clear
[PATCH] x86: - restore i8259A eoi status on resume
[PATCH] i386: Split multi-line printk in oops output.
...
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r-- | arch/x86_64/mm/fault.c | 22 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 58 | ||||
-rw-r--r-- | arch/x86_64/mm/k8topology.c | 3 | ||||
-rw-r--r-- | arch/x86_64/mm/numa.c | 11 | ||||
-rw-r--r-- | arch/x86_64/mm/pageattr.c | 24 | ||||
-rw-r--r-- | arch/x86_64/mm/srat.c | 2 |
6 files changed, 57 insertions, 63 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 4198798e1469..1a17b0733ab5 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c | |||
@@ -40,8 +40,7 @@ | |||
40 | #define PF_RSVD (1<<3) | 40 | #define PF_RSVD (1<<3) |
41 | #define PF_INSTR (1<<4) | 41 | #define PF_INSTR (1<<4) |
42 | 42 | ||
43 | #ifdef CONFIG_KPROBES | 43 | static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); |
44 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | ||
45 | 44 | ||
46 | /* Hook to register for page fault notifications */ | 45 | /* Hook to register for page fault notifications */ |
47 | int register_page_fault_notifier(struct notifier_block *nb) | 46 | int register_page_fault_notifier(struct notifier_block *nb) |
@@ -49,11 +48,13 @@ int register_page_fault_notifier(struct notifier_block *nb) | |||
49 | vmalloc_sync_all(); | 48 | vmalloc_sync_all(); |
50 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | 49 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); |
51 | } | 50 | } |
51 | EXPORT_SYMBOL_GPL(register_page_fault_notifier); | ||
52 | 52 | ||
53 | int unregister_page_fault_notifier(struct notifier_block *nb) | 53 | int unregister_page_fault_notifier(struct notifier_block *nb) |
54 | { | 54 | { |
55 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | 55 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); |
56 | } | 56 | } |
57 | EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); | ||
57 | 58 | ||
58 | static inline int notify_page_fault(enum die_val val, const char *str, | 59 | static inline int notify_page_fault(enum die_val val, const char *str, |
59 | struct pt_regs *regs, long err, int trap, int sig) | 60 | struct pt_regs *regs, long err, int trap, int sig) |
@@ -67,13 +68,6 @@ static inline int notify_page_fault(enum die_val val, const char *str, | |||
67 | }; | 68 | }; |
68 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); | 69 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); |
69 | } | 70 | } |
70 | #else | ||
71 | static inline int notify_page_fault(enum die_val val, const char *str, | ||
72 | struct pt_regs *regs, long err, int trap, int sig) | ||
73 | { | ||
74 | return NOTIFY_DONE; | ||
75 | } | ||
76 | #endif | ||
77 | 71 | ||
78 | void bust_spinlocks(int yes) | 72 | void bust_spinlocks(int yes) |
79 | { | 73 | { |
@@ -102,7 +96,7 @@ void bust_spinlocks(int yes) | |||
102 | static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, | 96 | static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, |
103 | unsigned long error_code) | 97 | unsigned long error_code) |
104 | { | 98 | { |
105 | unsigned char *instr; | 99 | unsigned char __user *instr; |
106 | int scan_more = 1; | 100 | int scan_more = 1; |
107 | int prefetch = 0; | 101 | int prefetch = 0; |
108 | unsigned char *max_instr; | 102 | unsigned char *max_instr; |
@@ -111,7 +105,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
111 | if (error_code & PF_INSTR) | 105 | if (error_code & PF_INSTR) |
112 | return 0; | 106 | return 0; |
113 | 107 | ||
114 | instr = (unsigned char *)convert_rip_to_linear(current, regs); | 108 | instr = (unsigned char __user *)convert_rip_to_linear(current, regs); |
115 | max_instr = instr + 15; | 109 | max_instr = instr + 15; |
116 | 110 | ||
117 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) | 111 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
@@ -122,7 +116,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
122 | unsigned char instr_hi; | 116 | unsigned char instr_hi; |
123 | unsigned char instr_lo; | 117 | unsigned char instr_lo; |
124 | 118 | ||
125 | if (__get_user(opcode, instr)) | 119 | if (__get_user(opcode, (char __user *)instr)) |
126 | break; | 120 | break; |
127 | 121 | ||
128 | instr_hi = opcode & 0xf0; | 122 | instr_hi = opcode & 0xf0; |
@@ -160,7 +154,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
160 | case 0x00: | 154 | case 0x00: |
161 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | 155 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ |
162 | scan_more = 0; | 156 | scan_more = 0; |
163 | if (__get_user(opcode, instr)) | 157 | if (__get_user(opcode, (char __user *)instr)) |
164 | break; | 158 | break; |
165 | prefetch = (instr_lo == 0xF) && | 159 | prefetch = (instr_lo == 0xF) && |
166 | (opcode == 0x0D || opcode == 0x18); | 160 | (opcode == 0x0D || opcode == 0x18); |
@@ -176,7 +170,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
176 | static int bad_address(void *p) | 170 | static int bad_address(void *p) |
177 | { | 171 | { |
178 | unsigned long dummy; | 172 | unsigned long dummy; |
179 | return __get_user(dummy, (unsigned long *)p); | 173 | return __get_user(dummy, (unsigned long __user *)p); |
180 | } | 174 | } |
181 | 175 | ||
182 | void dump_pagetable(unsigned long address) | 176 | void dump_pagetable(unsigned long address) |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 52fd42c40c86..1e4669fa5734 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -229,7 +229,6 @@ __init void *early_ioremap(unsigned long addr, unsigned long size) | |||
229 | 229 | ||
230 | /* actually usually some more */ | 230 | /* actually usually some more */ |
231 | if (size >= LARGE_PAGE_SIZE) { | 231 | if (size >= LARGE_PAGE_SIZE) { |
232 | printk("SMBIOS area too long %lu\n", size); | ||
233 | return NULL; | 232 | return NULL; |
234 | } | 233 | } |
235 | set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); | 234 | set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); |
@@ -250,12 +249,13 @@ __init void early_iounmap(void *addr, unsigned long size) | |||
250 | } | 249 | } |
251 | 250 | ||
252 | static void __meminit | 251 | static void __meminit |
253 | phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) | 252 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) |
254 | { | 253 | { |
255 | int i; | 254 | int i = pmd_index(address); |
256 | 255 | ||
257 | for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { | 256 | for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { |
258 | unsigned long entry; | 257 | unsigned long entry; |
258 | pmd_t *pmd = pmd_page + pmd_index(address); | ||
259 | 259 | ||
260 | if (address >= end) { | 260 | if (address >= end) { |
261 | if (!after_bootmem) | 261 | if (!after_bootmem) |
@@ -263,6 +263,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) | |||
263 | set_pmd(pmd, __pmd(0)); | 263 | set_pmd(pmd, __pmd(0)); |
264 | break; | 264 | break; |
265 | } | 265 | } |
266 | |||
267 | if (pmd_val(*pmd)) | ||
268 | continue; | ||
269 | |||
266 | entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; | 270 | entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; |
267 | entry &= __supported_pte_mask; | 271 | entry &= __supported_pte_mask; |
268 | set_pmd(pmd, __pmd(entry)); | 272 | set_pmd(pmd, __pmd(entry)); |
@@ -272,45 +276,41 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) | |||
272 | static void __meminit | 276 | static void __meminit |
273 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) | 277 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) |
274 | { | 278 | { |
275 | pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address)); | 279 | pmd_t *pmd = pmd_offset(pud,0); |
276 | 280 | spin_lock(&init_mm.page_table_lock); | |
277 | if (pmd_none(*pmd)) { | 281 | phys_pmd_init(pmd, address, end); |
278 | spin_lock(&init_mm.page_table_lock); | 282 | spin_unlock(&init_mm.page_table_lock); |
279 | phys_pmd_init(pmd, address, end); | 283 | __flush_tlb_all(); |
280 | spin_unlock(&init_mm.page_table_lock); | ||
281 | __flush_tlb_all(); | ||
282 | } | ||
283 | } | 284 | } |
284 | 285 | ||
285 | static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) | 286 | static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) |
286 | { | 287 | { |
287 | long i = pud_index(address); | 288 | int i = pud_index(addr); |
288 | |||
289 | pud = pud + i; | ||
290 | 289 | ||
291 | if (after_bootmem && pud_val(*pud)) { | ||
292 | phys_pmd_update(pud, address, end); | ||
293 | return; | ||
294 | } | ||
295 | 290 | ||
296 | for (; i < PTRS_PER_PUD; pud++, i++) { | 291 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) { |
297 | int map; | 292 | int map; |
298 | unsigned long paddr, pmd_phys; | 293 | unsigned long pmd_phys; |
294 | pud_t *pud = pud_page + pud_index(addr); | ||
299 | pmd_t *pmd; | 295 | pmd_t *pmd; |
300 | 296 | ||
301 | paddr = (address & PGDIR_MASK) + i*PUD_SIZE; | 297 | if (addr >= end) |
302 | if (paddr >= end) | ||
303 | break; | 298 | break; |
304 | 299 | ||
305 | if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) { | 300 | if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) { |
306 | set_pud(pud, __pud(0)); | 301 | set_pud(pud, __pud(0)); |
307 | continue; | 302 | continue; |
308 | } | 303 | } |
309 | 304 | ||
305 | if (pud_val(*pud)) { | ||
306 | phys_pmd_update(pud, addr, end); | ||
307 | continue; | ||
308 | } | ||
309 | |||
310 | pmd = alloc_low_page(&map, &pmd_phys); | 310 | pmd = alloc_low_page(&map, &pmd_phys); |
311 | spin_lock(&init_mm.page_table_lock); | 311 | spin_lock(&init_mm.page_table_lock); |
312 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); | 312 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
313 | phys_pmd_init(pmd, paddr, end); | 313 | phys_pmd_init(pmd, addr, end); |
314 | spin_unlock(&init_mm.page_table_lock); | 314 | spin_unlock(&init_mm.page_table_lock); |
315 | unmap_low_page(map); | 315 | unmap_low_page(map); |
316 | } | 316 | } |
@@ -597,12 +597,6 @@ void __init mem_init(void) | |||
597 | 597 | ||
598 | pci_iommu_alloc(); | 598 | pci_iommu_alloc(); |
599 | 599 | ||
600 | /* How many end-of-memory variables you have, grandma! */ | ||
601 | max_low_pfn = end_pfn; | ||
602 | max_pfn = end_pfn; | ||
603 | num_physpages = end_pfn; | ||
604 | high_memory = (void *) __va(end_pfn * PAGE_SIZE); | ||
605 | |||
606 | /* clear the zero-page */ | 600 | /* clear the zero-page */ |
607 | memset(empty_zero_page, 0, PAGE_SIZE); | 601 | memset(empty_zero_page, 0, PAGE_SIZE); |
608 | 602 | ||
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c index 7c45c2d2b8b2..5cf594f9230d 100644 --- a/arch/x86_64/mm/k8topology.c +++ b/arch/x86_64/mm/k8topology.c | |||
@@ -54,6 +54,9 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) | |||
54 | 54 | ||
55 | nodes_clear(nodes_parsed); | 55 | nodes_clear(nodes_parsed); |
56 | 56 | ||
57 | if (!early_pci_allowed()) | ||
58 | return -1; | ||
59 | |||
57 | nb = find_northbridge(); | 60 | nb = find_northbridge(); |
58 | if (nb < 0) | 61 | if (nb < 0) |
59 | return nb; | 62 | return nb; |
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index b2fac14baac0..322bf45fc36a 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c | |||
@@ -225,7 +225,7 @@ void __init numa_init_array(void) | |||
225 | int numa_fake __initdata = 0; | 225 | int numa_fake __initdata = 0; |
226 | 226 | ||
227 | /* Numa emulation */ | 227 | /* Numa emulation */ |
228 | static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn) | 228 | static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) |
229 | { | 229 | { |
230 | int i; | 230 | int i; |
231 | struct bootnode nodes[MAX_NUMNODES]; | 231 | struct bootnode nodes[MAX_NUMNODES]; |
@@ -348,9 +348,10 @@ void __init paging_init(void) | |||
348 | } | 348 | } |
349 | } | 349 | } |
350 | 350 | ||
351 | /* [numa=off] */ | 351 | static __init int numa_setup(char *opt) |
352 | __init int numa_setup(char *opt) | ||
353 | { | 352 | { |
353 | if (!opt) | ||
354 | return -EINVAL; | ||
354 | if (!strncmp(opt,"off",3)) | 355 | if (!strncmp(opt,"off",3)) |
355 | numa_off = 1; | 356 | numa_off = 1; |
356 | #ifdef CONFIG_NUMA_EMU | 357 | #ifdef CONFIG_NUMA_EMU |
@@ -366,9 +367,11 @@ __init int numa_setup(char *opt) | |||
366 | if (!strncmp(opt,"hotadd=", 7)) | 367 | if (!strncmp(opt,"hotadd=", 7)) |
367 | hotadd_percent = simple_strtoul(opt+7, NULL, 10); | 368 | hotadd_percent = simple_strtoul(opt+7, NULL, 10); |
368 | #endif | 369 | #endif |
369 | return 1; | 370 | return 0; |
370 | } | 371 | } |
371 | 372 | ||
373 | early_param("numa", numa_setup); | ||
374 | |||
372 | /* | 375 | /* |
373 | * Setup early cpu_to_node. | 376 | * Setup early cpu_to_node. |
374 | * | 377 | * |
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 2685b1f3671c..3e231d762aaa 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -108,8 +108,8 @@ static void revert_page(unsigned long address, pgprot_t ref_prot) | |||
108 | BUG_ON(pud_none(*pud)); | 108 | BUG_ON(pud_none(*pud)); |
109 | pmd = pmd_offset(pud, address); | 109 | pmd = pmd_offset(pud, address); |
110 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); | 110 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); |
111 | pgprot_val(ref_prot) |= _PAGE_PSE; | ||
112 | large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot); | 111 | large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot); |
112 | large_pte = pte_mkhuge(large_pte); | ||
113 | set_pte((pte_t *)pmd, large_pte); | 113 | set_pte((pte_t *)pmd, large_pte); |
114 | } | 114 | } |
115 | 115 | ||
@@ -119,32 +119,28 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
119 | { | 119 | { |
120 | pte_t *kpte; | 120 | pte_t *kpte; |
121 | struct page *kpte_page; | 121 | struct page *kpte_page; |
122 | unsigned kpte_flags; | ||
123 | pgprot_t ref_prot2; | 122 | pgprot_t ref_prot2; |
124 | kpte = lookup_address(address); | 123 | kpte = lookup_address(address); |
125 | if (!kpte) return 0; | 124 | if (!kpte) return 0; |
126 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); | 125 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); |
127 | kpte_flags = pte_val(*kpte); | ||
128 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { | 126 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { |
129 | if ((kpte_flags & _PAGE_PSE) == 0) { | 127 | if (!pte_huge(*kpte)) { |
130 | set_pte(kpte, pfn_pte(pfn, prot)); | 128 | set_pte(kpte, pfn_pte(pfn, prot)); |
131 | } else { | 129 | } else { |
132 | /* | 130 | /* |
133 | * split_large_page will take the reference for this | 131 | * split_large_page will take the reference for this |
134 | * change_page_attr on the split page. | 132 | * change_page_attr on the split page. |
135 | */ | 133 | */ |
136 | |||
137 | struct page *split; | 134 | struct page *split; |
138 | ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE)); | 135 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); |
139 | |||
140 | split = split_large_page(address, prot, ref_prot2); | 136 | split = split_large_page(address, prot, ref_prot2); |
141 | if (!split) | 137 | if (!split) |
142 | return -ENOMEM; | 138 | return -ENOMEM; |
143 | set_pte(kpte,mk_pte(split, ref_prot2)); | 139 | set_pte(kpte, mk_pte(split, ref_prot2)); |
144 | kpte_page = split; | 140 | kpte_page = split; |
145 | } | 141 | } |
146 | page_private(kpte_page)++; | 142 | page_private(kpte_page)++; |
147 | } else if ((kpte_flags & _PAGE_PSE) == 0) { | 143 | } else if (!pte_huge(*kpte)) { |
148 | set_pte(kpte, pfn_pte(pfn, ref_prot)); | 144 | set_pte(kpte, pfn_pte(pfn, ref_prot)); |
149 | BUG_ON(page_private(kpte_page) == 0); | 145 | BUG_ON(page_private(kpte_page) == 0); |
150 | page_private(kpte_page)--; | 146 | page_private(kpte_page)--; |
@@ -190,10 +186,12 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | |||
190 | * lowmem */ | 186 | * lowmem */ |
191 | if (__pa(address) < KERNEL_TEXT_SIZE) { | 187 | if (__pa(address) < KERNEL_TEXT_SIZE) { |
192 | unsigned long addr2; | 188 | unsigned long addr2; |
193 | pgprot_t prot2 = prot; | 189 | pgprot_t prot2; |
194 | addr2 = __START_KERNEL_map + __pa(address); | 190 | addr2 = __START_KERNEL_map + __pa(address); |
195 | pgprot_val(prot2) &= ~_PAGE_NX; | 191 | /* Make sure the kernel mappings stay executable */ |
196 | err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC); | 192 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); |
193 | err = __change_page_attr(addr2, pfn, prot2, | ||
194 | PAGE_KERNEL_EXEC); | ||
197 | } | 195 | } |
198 | } | 196 | } |
199 | up_write(&init_mm.mmap_sem); | 197 | up_write(&init_mm.mmap_sem); |
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index 502fce65e96a..ca10701e7a90 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <asm/numa.h> | 21 | #include <asm/numa.h> |
22 | #include <asm/e820.h> | 22 | #include <asm/e820.h> |
23 | 23 | ||
24 | int acpi_numa __initdata; | ||
25 | |||
24 | #if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \ | 26 | #if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \ |
25 | defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \ | 27 | defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \ |
26 | && !defined(CONFIG_MEMORY_HOTPLUG) | 28 | && !defined(CONFIG_MEMORY_HOTPLUG) |