diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 11:44:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 11:44:24 -0400 |
commit | e3ebadd95cb621e2c7436f3d3646447ac9d5c16d (patch) | |
tree | 510b41550cc3751cfb565e3e2ba195a68b784a03 | |
parent | 15700770ef7c5d12e2f1659d2ddbeb3f658d9f37 (diff) |
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/i386/kernel/alternative.c | 4 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 15 | ||||
-rw-r--r-- | arch/x86_64/kernel/machine_kexec.c | 16 | ||||
-rw-r--r-- | arch/x86_64/kernel/setup.c | 9 | ||||
-rw-r--r-- | arch/x86_64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 31 | ||||
-rw-r--r-- | arch/x86_64/mm/ioremap.c | 9 | ||||
-rw-r--r-- | arch/x86_64/mm/pageattr.c | 16 | ||||
-rw-r--r-- | include/asm-x86_64/page.h | 18 | ||||
-rw-r--r-- | include/asm-x86_64/pgtable.h | 4 |
10 files changed, 66 insertions, 58 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index e5cec6685cc5..d8cda14fff8b 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c | |||
@@ -390,8 +390,8 @@ void __init alternative_instructions(void) | |||
390 | _text, _etext); | 390 | _text, _etext); |
391 | } | 391 | } |
392 | free_init_pages("SMP alternatives", | 392 | free_init_pages("SMP alternatives", |
393 | __pa_symbol(&__smp_locks), | 393 | (unsigned long)__smp_locks, |
394 | __pa_symbol(&__smp_locks_end)); | 394 | (unsigned long)__smp_locks_end); |
395 | } else { | 395 | } else { |
396 | alternatives_smp_module_add(NULL, "core kernel", | 396 | alternatives_smp_module_add(NULL, "core kernel", |
397 | __smp_locks, __smp_locks_end, | 397 | __smp_locks, __smp_locks_end, |
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index dbe16f63a566..1a7197e89eb4 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -843,11 +843,10 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
843 | unsigned long addr; | 843 | unsigned long addr; |
844 | 844 | ||
845 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 845 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
846 | struct page *page = pfn_to_page(addr >> PAGE_SHIFT); | 846 | ClearPageReserved(virt_to_page(addr)); |
847 | ClearPageReserved(page); | 847 | init_page_count(virt_to_page(addr)); |
848 | init_page_count(page); | 848 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
849 | memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); | 849 | free_page(addr); |
850 | __free_page(page); | ||
851 | totalram_pages++; | 850 | totalram_pages++; |
852 | } | 851 | } |
853 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | 852 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
@@ -856,14 +855,14 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
856 | void free_initmem(void) | 855 | void free_initmem(void) |
857 | { | 856 | { |
858 | free_init_pages("unused kernel memory", | 857 | free_init_pages("unused kernel memory", |
859 | __pa_symbol(&__init_begin), | 858 | (unsigned long)(&__init_begin), |
860 | __pa_symbol(&__init_end)); | 859 | (unsigned long)(&__init_end)); |
861 | } | 860 | } |
862 | 861 | ||
863 | #ifdef CONFIG_BLK_DEV_INITRD | 862 | #ifdef CONFIG_BLK_DEV_INITRD |
864 | void free_initrd_mem(unsigned long start, unsigned long end) | 863 | void free_initrd_mem(unsigned long start, unsigned long end) |
865 | { | 864 | { |
866 | free_init_pages("initrd memory", __pa(start), __pa(end)); | 865 | free_init_pages("initrd memory", start, end); |
867 | } | 866 | } |
868 | #endif | 867 | #endif |
869 | 868 | ||
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c index a8bb33c1a8f2..c3a554703672 100644 --- a/arch/x86_64/kernel/machine_kexec.c +++ b/arch/x86_64/kernel/machine_kexec.c | |||
@@ -189,21 +189,21 @@ NORET_TYPE void machine_kexec(struct kimage *image) | |||
189 | control_page = page_address(image->control_code_page) + PAGE_SIZE; | 189 | control_page = page_address(image->control_code_page) + PAGE_SIZE; |
190 | memcpy(control_page, relocate_kernel, PAGE_SIZE); | 190 | memcpy(control_page, relocate_kernel, PAGE_SIZE); |
191 | 191 | ||
192 | page_list[PA_CONTROL_PAGE] = __pa(control_page); | 192 | page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); |
193 | page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; | 193 | page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; |
194 | page_list[PA_PGD] = __pa_symbol(&kexec_pgd); | 194 | page_list[PA_PGD] = virt_to_phys(&kexec_pgd); |
195 | page_list[VA_PGD] = (unsigned long)kexec_pgd; | 195 | page_list[VA_PGD] = (unsigned long)kexec_pgd; |
196 | page_list[PA_PUD_0] = __pa_symbol(&kexec_pud0); | 196 | page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0); |
197 | page_list[VA_PUD_0] = (unsigned long)kexec_pud0; | 197 | page_list[VA_PUD_0] = (unsigned long)kexec_pud0; |
198 | page_list[PA_PMD_0] = __pa_symbol(&kexec_pmd0); | 198 | page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0); |
199 | page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; | 199 | page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; |
200 | page_list[PA_PTE_0] = __pa_symbol(&kexec_pte0); | 200 | page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0); |
201 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; | 201 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; |
202 | page_list[PA_PUD_1] = __pa_symbol(&kexec_pud1); | 202 | page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1); |
203 | page_list[VA_PUD_1] = (unsigned long)kexec_pud1; | 203 | page_list[VA_PUD_1] = (unsigned long)kexec_pud1; |
204 | page_list[PA_PMD_1] = __pa_symbol(&kexec_pmd1); | 204 | page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1); |
205 | page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; | 205 | page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; |
206 | page_list[PA_PTE_1] = __pa_symbol(&kexec_pte1); | 206 | page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1); |
207 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; | 207 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; |
208 | 208 | ||
209 | page_list[PA_TABLE_PAGE] = | 209 | page_list[PA_TABLE_PAGE] = |
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index db30b5bcef61..db51577bda32 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c | |||
@@ -245,12 +245,11 @@ void __init setup_arch(char **cmdline_p) | |||
245 | init_mm.end_code = (unsigned long) &_etext; | 245 | init_mm.end_code = (unsigned long) &_etext; |
246 | init_mm.end_data = (unsigned long) &_edata; | 246 | init_mm.end_data = (unsigned long) &_edata; |
247 | init_mm.brk = (unsigned long) &_end; | 247 | init_mm.brk = (unsigned long) &_end; |
248 | init_mm.pgd = __va(__pa_symbol(&init_level4_pgt)); | ||
249 | 248 | ||
250 | code_resource.start = __pa_symbol(&_text); | 249 | code_resource.start = virt_to_phys(&_text); |
251 | code_resource.end = __pa_symbol(&_etext)-1; | 250 | code_resource.end = virt_to_phys(&_etext)-1; |
252 | data_resource.start = __pa_symbol(&_etext); | 251 | data_resource.start = virt_to_phys(&_etext); |
253 | data_resource.end = __pa_symbol(&_edata)-1; | 252 | data_resource.end = virt_to_phys(&_edata)-1; |
254 | 253 | ||
255 | early_identify_cpu(&boot_cpu_data); | 254 | early_identify_cpu(&boot_cpu_data); |
256 | 255 | ||
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 22abae4e9f39..bd1d123947ce 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c | |||
@@ -76,7 +76,7 @@ static inline void leave_mm(int cpu) | |||
76 | if (read_pda(mmu_state) == TLBSTATE_OK) | 76 | if (read_pda(mmu_state) == TLBSTATE_OK) |
77 | BUG(); | 77 | BUG(); |
78 | cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); | 78 | cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); |
79 | load_cr3(init_mm.pgd); | 79 | load_cr3(swapper_pg_dir); |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 282b0a8f00ad..c0822683b916 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -572,13 +572,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
572 | 572 | ||
573 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | 573 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
574 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 574 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
575 | struct page *page = pfn_to_page(addr >> PAGE_SHIFT); | 575 | ClearPageReserved(virt_to_page(addr)); |
576 | ClearPageReserved(page); | 576 | init_page_count(virt_to_page(addr)); |
577 | init_page_count(page); | 577 | memset((void *)(addr & ~(PAGE_SIZE-1)), |
578 | memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); | 578 | POISON_FREE_INITMEM, PAGE_SIZE); |
579 | if (addr >= __START_KERNEL_map) | 579 | if (addr >= __START_KERNEL_map) |
580 | change_page_attr_addr(addr, 1, __pgprot(0)); | 580 | change_page_attr_addr(addr, 1, __pgprot(0)); |
581 | __free_page(page); | 581 | free_page(addr); |
582 | totalram_pages++; | 582 | totalram_pages++; |
583 | } | 583 | } |
584 | if (addr > __START_KERNEL_map) | 584 | if (addr > __START_KERNEL_map) |
@@ -588,26 +588,31 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
588 | void free_initmem(void) | 588 | void free_initmem(void) |
589 | { | 589 | { |
590 | free_init_pages("unused kernel memory", | 590 | free_init_pages("unused kernel memory", |
591 | __pa_symbol(&__init_begin), | 591 | (unsigned long)(&__init_begin), |
592 | __pa_symbol(&__init_end)); | 592 | (unsigned long)(&__init_end)); |
593 | } | 593 | } |
594 | 594 | ||
595 | #ifdef CONFIG_DEBUG_RODATA | 595 | #ifdef CONFIG_DEBUG_RODATA |
596 | 596 | ||
597 | void mark_rodata_ro(void) | 597 | void mark_rodata_ro(void) |
598 | { | 598 | { |
599 | unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size; | 599 | unsigned long start = (unsigned long)_stext, end; |
600 | 600 | ||
601 | #ifdef CONFIG_HOTPLUG_CPU | 601 | #ifdef CONFIG_HOTPLUG_CPU |
602 | /* It must still be possible to apply SMP alternatives. */ | 602 | /* It must still be possible to apply SMP alternatives. */ |
603 | if (num_possible_cpus() > 1) | 603 | if (num_possible_cpus() > 1) |
604 | start = PFN_ALIGN(__va(__pa_symbol(&_etext))); | 604 | start = (unsigned long)_etext; |
605 | #endif | 605 | #endif |
606 | size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start; | 606 | end = (unsigned long)__end_rodata; |
607 | change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO); | 607 | start = (start + PAGE_SIZE - 1) & PAGE_MASK; |
608 | end &= PAGE_MASK; | ||
609 | if (end <= start) | ||
610 | return; | ||
611 | |||
612 | change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO); | ||
608 | 613 | ||
609 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", | 614 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
610 | size >> 10); | 615 | (end - start) >> 10); |
611 | 616 | ||
612 | /* | 617 | /* |
613 | * change_page_attr_addr() requires a global_flush_tlb() call after it. | 618 | * change_page_attr_addr() requires a global_flush_tlb() call after it. |
@@ -622,7 +627,7 @@ void mark_rodata_ro(void) | |||
622 | #ifdef CONFIG_BLK_DEV_INITRD | 627 | #ifdef CONFIG_BLK_DEV_INITRD |
623 | void free_initrd_mem(unsigned long start, unsigned long end) | 628 | void free_initrd_mem(unsigned long start, unsigned long end) |
624 | { | 629 | { |
625 | free_init_pages("initrd memory", __pa(start), __pa(end)); | 630 | free_init_pages("initrd memory", start, end); |
626 | } | 631 | } |
627 | #endif | 632 | #endif |
628 | 633 | ||
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index c6e5e8d401a4..6cac90aa5032 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c | |||
@@ -13,12 +13,21 @@ | |||
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | |||
16 | #include <asm/pgalloc.h> | 17 | #include <asm/pgalloc.h> |
17 | #include <asm/fixmap.h> | 18 | #include <asm/fixmap.h> |
18 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
19 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
20 | #include <asm/proto.h> | 21 | #include <asm/proto.h> |
21 | 22 | ||
23 | unsigned long __phys_addr(unsigned long x) | ||
24 | { | ||
25 | if (x >= __START_KERNEL_map) | ||
26 | return x - __START_KERNEL_map + phys_base; | ||
27 | return x - PAGE_OFFSET; | ||
28 | } | ||
29 | EXPORT_SYMBOL(__phys_addr); | ||
30 | |||
22 | #define ISA_START_ADDRESS 0xa0000 | 31 | #define ISA_START_ADDRESS 0xa0000 |
23 | #define ISA_END_ADDRESS 0x100000 | 32 | #define ISA_END_ADDRESS 0x100000 |
24 | 33 | ||
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index bf4aa8dd4254..d653d0bf3df6 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -51,6 +51,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, | |||
51 | SetPagePrivate(base); | 51 | SetPagePrivate(base); |
52 | page_private(base) = 0; | 52 | page_private(base) = 0; |
53 | 53 | ||
54 | address = __pa(address); | ||
54 | addr = address & LARGE_PAGE_MASK; | 55 | addr = address & LARGE_PAGE_MASK; |
55 | pbase = (pte_t *)page_address(base); | 56 | pbase = (pte_t *)page_address(base); |
56 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { | 57 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { |
@@ -100,12 +101,13 @@ static inline void save_page(struct page *fpage) | |||
100 | * No more special protections in this 2/4MB area - revert to a | 101 | * No more special protections in this 2/4MB area - revert to a |
101 | * large page again. | 102 | * large page again. |
102 | */ | 103 | */ |
103 | static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_prot) | 104 | static void revert_page(unsigned long address, pgprot_t ref_prot) |
104 | { | 105 | { |
105 | pgd_t *pgd; | 106 | pgd_t *pgd; |
106 | pud_t *pud; | 107 | pud_t *pud; |
107 | pmd_t *pmd; | 108 | pmd_t *pmd; |
108 | pte_t large_pte; | 109 | pte_t large_pte; |
110 | unsigned long pfn; | ||
109 | 111 | ||
110 | pgd = pgd_offset_k(address); | 112 | pgd = pgd_offset_k(address); |
111 | BUG_ON(pgd_none(*pgd)); | 113 | BUG_ON(pgd_none(*pgd)); |
@@ -113,6 +115,7 @@ static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_p | |||
113 | BUG_ON(pud_none(*pud)); | 115 | BUG_ON(pud_none(*pud)); |
114 | pmd = pmd_offset(pud, address); | 116 | pmd = pmd_offset(pud, address); |
115 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); | 117 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); |
118 | pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT; | ||
116 | large_pte = pfn_pte(pfn, ref_prot); | 119 | large_pte = pfn_pte(pfn, ref_prot); |
117 | large_pte = pte_mkhuge(large_pte); | 120 | large_pte = pte_mkhuge(large_pte); |
118 | set_pte((pte_t *)pmd, large_pte); | 121 | set_pte((pte_t *)pmd, large_pte); |
@@ -138,8 +141,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
138 | */ | 141 | */ |
139 | struct page *split; | 142 | struct page *split; |
140 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); | 143 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); |
141 | split = split_large_page(pfn << PAGE_SHIFT, prot, | 144 | split = split_large_page(address, prot, ref_prot2); |
142 | ref_prot2); | ||
143 | if (!split) | 145 | if (!split) |
144 | return -ENOMEM; | 146 | return -ENOMEM; |
145 | set_pte(kpte, mk_pte(split, ref_prot2)); | 147 | set_pte(kpte, mk_pte(split, ref_prot2)); |
@@ -158,7 +160,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
158 | 160 | ||
159 | if (page_private(kpte_page) == 0) { | 161 | if (page_private(kpte_page) == 0) { |
160 | save_page(kpte_page); | 162 | save_page(kpte_page); |
161 | revert_page(address, pfn, ref_prot); | 163 | revert_page(address, ref_prot); |
162 | } | 164 | } |
163 | return 0; | 165 | return 0; |
164 | } | 166 | } |
@@ -178,7 +180,6 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
178 | */ | 180 | */ |
179 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | 181 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) |
180 | { | 182 | { |
181 | unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT; | ||
182 | int err = 0, kernel_map = 0; | 183 | int err = 0, kernel_map = 0; |
183 | int i; | 184 | int i; |
184 | 185 | ||
@@ -199,11 +200,10 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | |||
199 | } | 200 | } |
200 | /* Handle kernel mapping too which aliases part of the | 201 | /* Handle kernel mapping too which aliases part of the |
201 | * lowmem */ | 202 | * lowmem */ |
202 | if ((pfn >= phys_base_pfn) && | 203 | if (__pa(address) < KERNEL_TEXT_SIZE) { |
203 | ((pfn - phys_base_pfn) < (KERNEL_TEXT_SIZE >> PAGE_SHIFT))) { | ||
204 | unsigned long addr2; | 204 | unsigned long addr2; |
205 | pgprot_t prot2; | 205 | pgprot_t prot2; |
206 | addr2 = __START_KERNEL_map + ((pfn - phys_base_pfn) << PAGE_SHIFT); | 206 | addr2 = __START_KERNEL_map + __pa(address); |
207 | /* Make sure the kernel mappings stay executable */ | 207 | /* Make sure the kernel mappings stay executable */ |
208 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); | 208 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); |
209 | err = __change_page_attr(addr2, pfn, prot2, | 209 | err = __change_page_attr(addr2, pfn, prot2, |
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index b17fc16ec2eb..4d04e2479569 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h | |||
@@ -94,26 +94,22 @@ extern unsigned long phys_base; | |||
94 | 94 | ||
95 | #define KERNEL_TEXT_SIZE (40*1024*1024) | 95 | #define KERNEL_TEXT_SIZE (40*1024*1024) |
96 | #define KERNEL_TEXT_START 0xffffffff80000000 | 96 | #define KERNEL_TEXT_START 0xffffffff80000000 |
97 | #define PAGE_OFFSET __PAGE_OFFSET | ||
97 | 98 | ||
98 | #ifndef __ASSEMBLY__ | 99 | #ifndef __ASSEMBLY__ |
99 | 100 | ||
100 | #include <asm/bug.h> | 101 | #include <asm/bug.h> |
101 | 102 | ||
102 | #endif /* __ASSEMBLY__ */ | 103 | extern unsigned long __phys_addr(unsigned long); |
103 | 104 | ||
104 | #define PAGE_OFFSET __PAGE_OFFSET | 105 | #endif /* __ASSEMBLY__ */ |
105 | 106 | ||
106 | /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol. | 107 | #define __pa(x) __phys_addr((unsigned long)(x)) |
107 | Otherwise you risk miscompilation. */ | 108 | #define __pa_symbol(x) __phys_addr((unsigned long)(x)) |
108 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) | ||
109 | /* __pa_symbol should be used for C visible symbols. | ||
110 | This seems to be the official gcc blessed way to do such arithmetic. */ | ||
111 | #define __pa_symbol(x) \ | ||
112 | ({unsigned long v; \ | ||
113 | asm("" : "=r" (v) : "0" (x)); \ | ||
114 | ((v - __START_KERNEL_map) + phys_base); }) | ||
115 | 109 | ||
116 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 110 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
111 | #define __boot_va(x) __va(x) | ||
112 | #define __boot_pa(x) __pa(x) | ||
117 | #ifdef CONFIG_FLATMEM | 113 | #ifdef CONFIG_FLATMEM |
118 | #define pfn_valid(pfn) ((pfn) < end_pfn) | 114 | #define pfn_valid(pfn) ((pfn) < end_pfn) |
119 | #endif | 115 | #endif |
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 599993f6ba84..da3390faaea6 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h | |||
@@ -19,7 +19,7 @@ extern pmd_t level2_kernel_pgt[512]; | |||
19 | extern pgd_t init_level4_pgt[]; | 19 | extern pgd_t init_level4_pgt[]; |
20 | extern unsigned long __supported_pte_mask; | 20 | extern unsigned long __supported_pte_mask; |
21 | 21 | ||
22 | #define swapper_pg_dir ((pgd_t *)NULL) | 22 | #define swapper_pg_dir init_level4_pgt |
23 | 23 | ||
24 | extern void paging_init(void); | 24 | extern void paging_init(void); |
25 | extern void clear_kernel_mapping(unsigned long addr, unsigned long size); | 25 | extern void clear_kernel_mapping(unsigned long addr, unsigned long size); |
@@ -29,7 +29,7 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size); | |||
29 | * for zero-mapped memory areas etc.. | 29 | * for zero-mapped memory areas etc.. |
30 | */ | 30 | */ |
31 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | 31 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; |
32 | #define ZERO_PAGE(vaddr) (pfn_to_page(__pa_symbol(&empty_zero_page) >> PAGE_SHIFT)) | 32 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
33 | 33 | ||
34 | #endif /* !__ASSEMBLY__ */ | 34 | #endif /* !__ASSEMBLY__ */ |
35 | 35 | ||