aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2013-02-01 05:25:06 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-02-01 05:28:36 -0500
commit68d00bbebb5a48b7a9056a8c03476a71ecbc30a6 (patch)
tree95af63041b79c43be37d734da6073fc82070f769 /arch/x86/mm
parentac2cbab21f318e19bc176a7f38a120cec835220f (diff)
parent07f4207a305c834f528d08428df4531744e25678 (diff)
Merge remote-tracking branch 'origin/x86/mm' into x86/mm2
Explicitly merging these two branches due to nontrivial conflicts and to allow further work. Resolved Conflicts: arch/x86/kernel/head32.c arch/x86/kernel/head64.c arch/x86/mm/init_64.c arch/x86/realmode/init.c Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_64.c18
-rw-r--r--arch/x86/mm/numa.c32
-rw-r--r--arch/x86/mm/numa_32.c161
-rw-r--r--arch/x86/mm/numa_internal.h6
-rw-r--r--arch/x86/mm/pageattr.c50
-rw-r--r--arch/x86/mm/pat.c4
-rw-r--r--arch/x86/mm/pgtable.c7
-rw-r--r--arch/x86/mm/physaddr.c60
8 files changed, 110 insertions, 228 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e2fcbc34c9df..edaa2daf4b37 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -804,12 +804,10 @@ void set_kernel_text_ro(void)
804void mark_rodata_ro(void) 804void mark_rodata_ro(void)
805{ 805{
806 unsigned long start = PFN_ALIGN(_text); 806 unsigned long start = PFN_ALIGN(_text);
807 unsigned long rodata_start = 807 unsigned long rodata_start = PFN_ALIGN(__start_rodata);
808 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
809 unsigned long end = (unsigned long) &__end_rodata_hpage_align; 808 unsigned long end = (unsigned long) &__end_rodata_hpage_align;
810 unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table); 809 unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
811 unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata); 810 unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
812 unsigned long data_start = (unsigned long) &_sdata;
813 unsigned long all_end = PFN_ALIGN(&_end); 811 unsigned long all_end = PFN_ALIGN(&_end);
814 812
815 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 813 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
@@ -835,12 +833,12 @@ void mark_rodata_ro(void)
835#endif 833#endif
836 834
837 free_init_pages("unused kernel memory", 835 free_init_pages("unused kernel memory",
838 (unsigned long) page_address(virt_to_page(text_end)), 836 (unsigned long) __va(__pa_symbol(text_end)),
839 (unsigned long) 837 (unsigned long) __va(__pa_symbol(rodata_start)));
840 page_address(virt_to_page(rodata_start))); 838
841 free_init_pages("unused kernel memory", 839 free_init_pages("unused kernel memory",
842 (unsigned long) page_address(virt_to_page(rodata_end)), 840 (unsigned long) __va(__pa_symbol(rodata_end)),
843 (unsigned long) page_address(virt_to_page(data_start))); 841 (unsigned long) __va(__pa_symbol(_sdata)));
844} 842}
845 843
846#endif 844#endif
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 2d125be1bae9..8504f3698753 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -193,7 +193,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
193static void __init setup_node_data(int nid, u64 start, u64 end) 193static void __init setup_node_data(int nid, u64 start, u64 end)
194{ 194{
195 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 195 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
196 bool remapped = false;
197 u64 nd_pa; 196 u64 nd_pa;
198 void *nd; 197 void *nd;
199 int tnid; 198 int tnid;
@@ -205,37 +204,28 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
205 if (end && (end - start) < NODE_MIN_SIZE) 204 if (end && (end - start) < NODE_MIN_SIZE)
206 return; 205 return;
207 206
208 /* initialize remap allocator before aligning to ZONE_ALIGN */
209 init_alloc_remap(nid, start, end);
210
211 start = roundup(start, ZONE_ALIGN); 207 start = roundup(start, ZONE_ALIGN);
212 208
213 printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", 209 printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
214 nid, start, end - 1); 210 nid, start, end - 1);
215 211
216 /* 212 /*
217 * Allocate node data. Try remap allocator first, node-local 213 * Allocate node data. Try node-local memory and then any node.
218 * memory and then any node. Never allocate in DMA zone. 214 * Never allocate in DMA zone.
219 */ 215 */
220 nd = alloc_remap(nid, nd_size); 216 nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
221 if (nd) { 217 if (!nd_pa) {
222 nd_pa = __pa(nd); 218 pr_err("Cannot find %zu bytes in node %d\n",
223 remapped = true; 219 nd_size, nid);
224 } else { 220 return;
225 nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
226 if (!nd_pa) {
227 pr_err("Cannot find %zu bytes in node %d\n",
228 nd_size, nid);
229 return;
230 }
231 nd = __va(nd_pa);
232 } 221 }
222 nd = __va(nd_pa);
233 223
234 /* report and initialize */ 224 /* report and initialize */
235 printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n", 225 printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]\n",
236 nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); 226 nd_pa, nd_pa + nd_size - 1);
237 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 227 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
238 if (!remapped && tnid != nid) 228 if (tnid != nid)
239 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); 229 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
240 230
241 node_data[nid] = nd; 231 node_data[nid] = nd;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 534255a36b6b..73a6d7395bd3 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -73,167 +73,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
73 73
74extern unsigned long highend_pfn, highstart_pfn; 74extern unsigned long highend_pfn, highstart_pfn;
75 75
76#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
77
78static void *node_remap_start_vaddr[MAX_NUMNODES];
79void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
80
81/*
82 * Remap memory allocator
83 */
84static unsigned long node_remap_start_pfn[MAX_NUMNODES];
85static void *node_remap_end_vaddr[MAX_NUMNODES];
86static void *node_remap_alloc_vaddr[MAX_NUMNODES];
87
88/**
89 * alloc_remap - Allocate remapped memory
90 * @nid: NUMA node to allocate memory from
91 * @size: The size of allocation
92 *
93 * Allocate @size bytes from the remap area of NUMA node @nid. The
94 * size of the remap area is predetermined by init_alloc_remap() and
95 * only the callers considered there should call this function. For
96 * more info, please read the comment on top of init_alloc_remap().
97 *
98 * The caller must be ready to handle allocation failure from this
99 * function and fall back to regular memory allocator in such cases.
100 *
101 * CONTEXT:
102 * Single CPU early boot context.
103 *
104 * RETURNS:
105 * Pointer to the allocated memory on success, %NULL on failure.
106 */
107void *alloc_remap(int nid, unsigned long size)
108{
109 void *allocation = node_remap_alloc_vaddr[nid];
110
111 size = ALIGN(size, L1_CACHE_BYTES);
112
113 if (!allocation || (allocation + size) > node_remap_end_vaddr[nid])
114 return NULL;
115
116 node_remap_alloc_vaddr[nid] += size;
117 memset(allocation, 0, size);
118
119 return allocation;
120}
121
122#ifdef CONFIG_HIBERNATION
123/**
124 * resume_map_numa_kva - add KVA mapping to the temporary page tables created
125 * during resume from hibernation
126 * @pgd_base - temporary resume page directory
127 */
128void resume_map_numa_kva(pgd_t *pgd_base)
129{
130 int node;
131
132 for_each_online_node(node) {
133 unsigned long start_va, start_pfn, nr_pages, pfn;
134
135 start_va = (unsigned long)node_remap_start_vaddr[node];
136 start_pfn = node_remap_start_pfn[node];
137 nr_pages = (node_remap_end_vaddr[node] -
138 node_remap_start_vaddr[node]) >> PAGE_SHIFT;
139
140 printk(KERN_DEBUG "%s: node %d\n", __func__, node);
141
142 for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) {
143 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
144 pgd_t *pgd = pgd_base + pgd_index(vaddr);
145 pud_t *pud = pud_offset(pgd, vaddr);
146 pmd_t *pmd = pmd_offset(pud, vaddr);
147
148 set_pmd(pmd, pfn_pmd(start_pfn + pfn,
149 PAGE_KERNEL_LARGE_EXEC));
150
151 printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
152 __func__, vaddr, start_pfn + pfn);
153 }
154 }
155}
156#endif
157
158/**
159 * init_alloc_remap - Initialize remap allocator for a NUMA node
160 * @nid: NUMA node to initizlie remap allocator for
161 *
162 * NUMA nodes may end up without any lowmem. As allocating pgdat and
163 * memmap on a different node with lowmem is inefficient, a special
164 * remap allocator is implemented which can be used by alloc_remap().
165 *
166 * For each node, the amount of memory which will be necessary for
167 * pgdat and memmap is calculated and two memory areas of the size are
168 * allocated - one in the node and the other in lowmem; then, the area
169 * in the node is remapped to the lowmem area.
170 *
171 * As pgdat and memmap must be allocated in lowmem anyway, this
172 * doesn't waste lowmem address space; however, the actual lowmem
173 * which gets remapped over is wasted. The amount shouldn't be
174 * problematic on machines this feature will be used.
175 *
176 * Initialization failure isn't fatal. alloc_remap() is used
177 * opportunistically and the callers will fall back to other memory
178 * allocation mechanisms on failure.
179 */
180void __init init_alloc_remap(int nid, u64 start, u64 end)
181{
182 unsigned long start_pfn = start >> PAGE_SHIFT;
183 unsigned long end_pfn = end >> PAGE_SHIFT;
184 unsigned long size, pfn;
185 u64 node_pa, remap_pa;
186 void *remap_va;
187
188 /*
189 * The acpi/srat node info can show hot-add memroy zones where
190 * memory could be added but not currently present.
191 */
192 printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
193 nid, start_pfn, end_pfn);
194
195 /* calculate the necessary space aligned to large page size */
196 size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
197 size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
198 size = ALIGN(size, LARGE_PAGE_BYTES);
199
200 /* allocate node memory and the lowmem remap area */
201 node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
202 if (!node_pa) {
203 pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
204 size, nid);
205 return;
206 }
207 memblock_reserve(node_pa, size);
208
209 remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
210 max_low_pfn << PAGE_SHIFT,
211 size, LARGE_PAGE_BYTES);
212 if (!remap_pa) {
213 pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
214 size, nid);
215 memblock_free(node_pa, size);
216 return;
217 }
218 memblock_reserve(remap_pa, size);
219 remap_va = phys_to_virt(remap_pa);
220
221 /* perform actual remap */
222 for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
223 set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
224 (node_pa >> PAGE_SHIFT) + pfn,
225 PAGE_KERNEL_LARGE);
226
227 /* initialize remap allocator parameters */
228 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
229 node_remap_start_vaddr[nid] = remap_va;
230 node_remap_end_vaddr[nid] = remap_va + size;
231 node_remap_alloc_vaddr[nid] = remap_va;
232
233 printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
234 nid, node_pa, node_pa + size, remap_va, remap_va + size);
235}
236
237void __init initmem_init(void) 76void __init initmem_init(void)
238{ 77{
239 x86_numa_init(); 78 x86_numa_init();
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
index 7178c3afe05e..ad86ec91e640 100644
--- a/arch/x86/mm/numa_internal.h
+++ b/arch/x86/mm/numa_internal.h
@@ -21,12 +21,6 @@ void __init numa_reset_distance(void);
21 21
22void __init x86_numa_init(void); 22void __init x86_numa_init(void);
23 23
24#ifdef CONFIG_X86_64
25static inline void init_alloc_remap(int nid, u64 start, u64 end) { }
26#else
27void __init init_alloc_remap(int nid, u64 start, u64 end);
28#endif
29
30#ifdef CONFIG_NUMA_EMU 24#ifdef CONFIG_NUMA_EMU
31void __init numa_emulation(struct numa_meminfo *numa_meminfo, 25void __init numa_emulation(struct numa_meminfo *numa_meminfo,
32 int numa_dist_cnt); 26 int numa_dist_cnt);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 44acfcd6c16f..a1b1c88f9caf 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -94,12 +94,12 @@ static inline void split_page_count(int level) { }
94 94
95static inline unsigned long highmap_start_pfn(void) 95static inline unsigned long highmap_start_pfn(void)
96{ 96{
97 return __pa(_text) >> PAGE_SHIFT; 97 return __pa_symbol(_text) >> PAGE_SHIFT;
98} 98}
99 99
100static inline unsigned long highmap_end_pfn(void) 100static inline unsigned long highmap_end_pfn(void)
101{ 101{
102 return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; 102 return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
103} 103}
104 104
105#endif 105#endif
@@ -276,8 +276,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
276 * The .rodata section needs to be read-only. Using the pfn 276 * The .rodata section needs to be read-only. Using the pfn
277 * catches all aliases. 277 * catches all aliases.
278 */ 278 */
279 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, 279 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
280 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) 280 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
281 pgprot_val(forbidden) |= _PAGE_RW; 281 pgprot_val(forbidden) |= _PAGE_RW;
282 282
283#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 283#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
@@ -364,6 +364,37 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
364EXPORT_SYMBOL_GPL(lookup_address); 364EXPORT_SYMBOL_GPL(lookup_address);
365 365
366/* 366/*
367 * This is necessary because __pa() does not work on some
368 * kinds of memory, like vmalloc() or the alloc_remap()
369 * areas on 32-bit NUMA systems. The percpu areas can
370 * end up in this kind of memory, for instance.
371 *
372 * This could be optimized, but it is only intended to be
373 * used at inititalization time, and keeping it
374 * unoptimized should increase the testing coverage for
375 * the more obscure platforms.
376 */
377phys_addr_t slow_virt_to_phys(void *__virt_addr)
378{
379 unsigned long virt_addr = (unsigned long)__virt_addr;
380 phys_addr_t phys_addr;
381 unsigned long offset;
382 enum pg_level level;
383 unsigned long psize;
384 unsigned long pmask;
385 pte_t *pte;
386
387 pte = lookup_address(virt_addr, &level);
388 BUG_ON(!pte);
389 psize = page_level_size(level);
390 pmask = page_level_mask(level);
391 offset = virt_addr & ~pmask;
392 phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
393 return (phys_addr | offset);
394}
395EXPORT_SYMBOL_GPL(slow_virt_to_phys);
396
397/*
367 * Set the new pmd in all the pgds we know about: 398 * Set the new pmd in all the pgds we know about:
368 */ 399 */
369static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 400static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
@@ -396,7 +427,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
396 pte_t new_pte, old_pte, *tmp; 427 pte_t new_pte, old_pte, *tmp;
397 pgprot_t old_prot, new_prot, req_prot; 428 pgprot_t old_prot, new_prot, req_prot;
398 int i, do_split = 1; 429 int i, do_split = 1;
399 unsigned int level; 430 enum pg_level level;
400 431
401 if (cpa->force_split) 432 if (cpa->force_split)
402 return 1; 433 return 1;
@@ -412,15 +443,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
412 443
413 switch (level) { 444 switch (level) {
414 case PG_LEVEL_2M: 445 case PG_LEVEL_2M:
415 psize = PMD_PAGE_SIZE;
416 pmask = PMD_PAGE_MASK;
417 break;
418#ifdef CONFIG_X86_64 446#ifdef CONFIG_X86_64
419 case PG_LEVEL_1G: 447 case PG_LEVEL_1G:
420 psize = PUD_PAGE_SIZE;
421 pmask = PUD_PAGE_MASK;
422 break;
423#endif 448#endif
449 psize = page_level_size(level);
450 pmask = page_level_mask(level);
451 break;
424 default: 452 default:
425 do_split = -EINVAL; 453 do_split = -EINVAL;
426 goto out_unlock; 454 goto out_unlock;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 0eb572eda406..2610bd93c896 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -560,10 +560,10 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
560{ 560{
561 unsigned long id_sz; 561 unsigned long id_sz;
562 562
563 if (base >= __pa(high_memory)) 563 if (base > __pa(high_memory-1))
564 return 0; 564 return 0;
565 565
566 id_sz = (__pa(high_memory) < base + size) ? 566 id_sz = (__pa(high_memory-1) <= base + size) ?
567 __pa(high_memory) - base : 567 __pa(high_memory) - base :
568 size; 568 size;
569 569
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e27fbf887f3b..193350b51f90 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -334,7 +334,12 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
334 if (changed && dirty) { 334 if (changed && dirty) {
335 *pmdp = entry; 335 *pmdp = entry;
336 pmd_update_defer(vma->vm_mm, address, pmdp); 336 pmd_update_defer(vma->vm_mm, address, pmdp);
337 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 337 /*
338 * We had a write-protection fault here and changed the pmd
339 * to to more permissive. No need to flush the TLB for that,
340 * #PF is architecturally guaranteed to do that and in the
341 * worst-case we'll generate a spurious fault.
342 */
338 } 343 }
339 344
340 return changed; 345 return changed;
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index d2e2735327b4..e666cbbb9261 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -1,3 +1,4 @@
1#include <linux/bootmem.h>
1#include <linux/mmdebug.h> 2#include <linux/mmdebug.h>
2#include <linux/module.h> 3#include <linux/module.h>
3#include <linux/mm.h> 4#include <linux/mm.h>
@@ -8,33 +9,54 @@
8 9
9#ifdef CONFIG_X86_64 10#ifdef CONFIG_X86_64
10 11
12#ifdef CONFIG_DEBUG_VIRTUAL
11unsigned long __phys_addr(unsigned long x) 13unsigned long __phys_addr(unsigned long x)
12{ 14{
13 if (x >= __START_KERNEL_map) { 15 unsigned long y = x - __START_KERNEL_map;
14 x -= __START_KERNEL_map; 16
15 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE); 17 /* use the carry flag to determine if x was < __START_KERNEL_map */
16 x += phys_base; 18 if (unlikely(x > y)) {
19 x = y + phys_base;
20
21 VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
17 } else { 22 } else {
18 VIRTUAL_BUG_ON(x < PAGE_OFFSET); 23 x = y + (__START_KERNEL_map - PAGE_OFFSET);
19 x -= PAGE_OFFSET; 24
20 VIRTUAL_BUG_ON(!phys_addr_valid(x)); 25 /* carry flag will be set if starting x was >= PAGE_OFFSET */
26 VIRTUAL_BUG_ON((x > y) || !phys_addr_valid(x));
21 } 27 }
28
22 return x; 29 return x;
23} 30}
24EXPORT_SYMBOL(__phys_addr); 31EXPORT_SYMBOL(__phys_addr);
25 32
33unsigned long __phys_addr_symbol(unsigned long x)
34{
35 unsigned long y = x - __START_KERNEL_map;
36
37 /* only check upper bounds since lower bounds will trigger carry */
38 VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
39
40 return y + phys_base;
41}
42EXPORT_SYMBOL(__phys_addr_symbol);
43#endif
44
26bool __virt_addr_valid(unsigned long x) 45bool __virt_addr_valid(unsigned long x)
27{ 46{
28 if (x >= __START_KERNEL_map) { 47 unsigned long y = x - __START_KERNEL_map;
29 x -= __START_KERNEL_map; 48
30 if (x >= KERNEL_IMAGE_SIZE) 49 /* use the carry flag to determine if x was < __START_KERNEL_map */
50 if (unlikely(x > y)) {
51 x = y + phys_base;
52
53 if (y >= KERNEL_IMAGE_SIZE)
31 return false; 54 return false;
32 x += phys_base;
33 } else { 55 } else {
34 if (x < PAGE_OFFSET) 56 x = y + (__START_KERNEL_map - PAGE_OFFSET);
35 return false; 57
36 x -= PAGE_OFFSET; 58 /* carry flag will be set if starting x was >= PAGE_OFFSET */
37 if (!phys_addr_valid(x)) 59 if ((x > y) || !phys_addr_valid(x))
38 return false; 60 return false;
39 } 61 }
40 62
@@ -47,10 +69,16 @@ EXPORT_SYMBOL(__virt_addr_valid);
47#ifdef CONFIG_DEBUG_VIRTUAL 69#ifdef CONFIG_DEBUG_VIRTUAL
48unsigned long __phys_addr(unsigned long x) 70unsigned long __phys_addr(unsigned long x)
49{ 71{
72 unsigned long phys_addr = x - PAGE_OFFSET;
50 /* VMALLOC_* aren't constants */ 73 /* VMALLOC_* aren't constants */
51 VIRTUAL_BUG_ON(x < PAGE_OFFSET); 74 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
52 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); 75 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
53 return x - PAGE_OFFSET; 76 /* max_low_pfn is set early, but not _that_ early */
77 if (max_low_pfn) {
78 VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn);
79 BUG_ON(slow_virt_to_phys((void *)x) != phys_addr);
80 }
81 return phys_addr;
54} 82}
55EXPORT_SYMBOL(__phys_addr); 83EXPORT_SYMBOL(__phys_addr);
56#endif 84#endif