aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 11:44:24 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 11:44:24 -0400
commite3ebadd95cb621e2c7436f3d3646447ac9d5c16d (patch)
tree510b41550cc3751cfb565e3e2ba195a68b784a03 /arch/x86_64
parent15700770ef7c5d12e2f1659d2ddbeb3f658d9f37 (diff)
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(), and preferably __pa() too - and just use "virt_to_phys()" instead, which is more readable and has nicer semantics. However, right now, just undo the separation, and make __pa_symbol() be the exact same as __pa(). That fixes the bugs this patch introduced, and we can do the fairly obvious cleanups later. Do the new __phys_addr() function (which is now the actual workhorse for the unified __pa()/__pa_symbol()) as a real external function, that way all the potential issues with compile/link-time optimizations of constant symbol addresses go away, and we can also, if we choose to, add more sanity-checking of the argument. Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Vivek Goyal <vgoyal@in.ibm.com> Cc: Andi Kleen <ak@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/machine_kexec.c16
-rw-r--r--arch/x86_64/kernel/setup.c9
-rw-r--r--arch/x86_64/kernel/smp.c2
-rw-r--r--arch/x86_64/mm/init.c31
-rw-r--r--arch/x86_64/mm/ioremap.c9
-rw-r--r--arch/x86_64/mm/pageattr.c16
6 files changed, 48 insertions, 35 deletions
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index a8bb33c1a8f2..c3a554703672 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -189,21 +189,21 @@ NORET_TYPE void machine_kexec(struct kimage *image)
189 control_page = page_address(image->control_code_page) + PAGE_SIZE; 189 control_page = page_address(image->control_code_page) + PAGE_SIZE;
190 memcpy(control_page, relocate_kernel, PAGE_SIZE); 190 memcpy(control_page, relocate_kernel, PAGE_SIZE);
191 191
192 page_list[PA_CONTROL_PAGE] = __pa(control_page); 192 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
193 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; 193 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
194 page_list[PA_PGD] = __pa_symbol(&kexec_pgd); 194 page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
195 page_list[VA_PGD] = (unsigned long)kexec_pgd; 195 page_list[VA_PGD] = (unsigned long)kexec_pgd;
196 page_list[PA_PUD_0] = __pa_symbol(&kexec_pud0); 196 page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
197 page_list[VA_PUD_0] = (unsigned long)kexec_pud0; 197 page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
198 page_list[PA_PMD_0] = __pa_symbol(&kexec_pmd0); 198 page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
199 page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; 199 page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
200 page_list[PA_PTE_0] = __pa_symbol(&kexec_pte0); 200 page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
201 page_list[VA_PTE_0] = (unsigned long)kexec_pte0; 201 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
202 page_list[PA_PUD_1] = __pa_symbol(&kexec_pud1); 202 page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
203 page_list[VA_PUD_1] = (unsigned long)kexec_pud1; 203 page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
204 page_list[PA_PMD_1] = __pa_symbol(&kexec_pmd1); 204 page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
205 page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; 205 page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
206 page_list[PA_PTE_1] = __pa_symbol(&kexec_pte1); 206 page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
207 page_list[VA_PTE_1] = (unsigned long)kexec_pte1; 207 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
208 208
209 page_list[PA_TABLE_PAGE] = 209 page_list[PA_TABLE_PAGE] =
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index db30b5bcef61..db51577bda32 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -245,12 +245,11 @@ void __init setup_arch(char **cmdline_p)
245 init_mm.end_code = (unsigned long) &_etext; 245 init_mm.end_code = (unsigned long) &_etext;
246 init_mm.end_data = (unsigned long) &_edata; 246 init_mm.end_data = (unsigned long) &_edata;
247 init_mm.brk = (unsigned long) &_end; 247 init_mm.brk = (unsigned long) &_end;
248 init_mm.pgd = __va(__pa_symbol(&init_level4_pgt));
249 248
250 code_resource.start = __pa_symbol(&_text); 249 code_resource.start = virt_to_phys(&_text);
251 code_resource.end = __pa_symbol(&_etext)-1; 250 code_resource.end = virt_to_phys(&_etext)-1;
252 data_resource.start = __pa_symbol(&_etext); 251 data_resource.start = virt_to_phys(&_etext);
253 data_resource.end = __pa_symbol(&_edata)-1; 252 data_resource.end = virt_to_phys(&_edata)-1;
254 253
255 early_identify_cpu(&boot_cpu_data); 254 early_identify_cpu(&boot_cpu_data);
256 255
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 22abae4e9f39..bd1d123947ce 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -76,7 +76,7 @@ static inline void leave_mm(int cpu)
76 if (read_pda(mmu_state) == TLBSTATE_OK) 76 if (read_pda(mmu_state) == TLBSTATE_OK)
77 BUG(); 77 BUG();
78 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); 78 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
79 load_cr3(init_mm.pgd); 79 load_cr3(swapper_pg_dir);
80} 80}
81 81
82/* 82/*
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 282b0a8f00ad..c0822683b916 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -572,13 +572,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
572 572
573 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 573 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
574 for (addr = begin; addr < end; addr += PAGE_SIZE) { 574 for (addr = begin; addr < end; addr += PAGE_SIZE) {
575 struct page *page = pfn_to_page(addr >> PAGE_SHIFT); 575 ClearPageReserved(virt_to_page(addr));
576 ClearPageReserved(page); 576 init_page_count(virt_to_page(addr));
577 init_page_count(page); 577 memset((void *)(addr & ~(PAGE_SIZE-1)),
578 memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); 578 POISON_FREE_INITMEM, PAGE_SIZE);
579 if (addr >= __START_KERNEL_map) 579 if (addr >= __START_KERNEL_map)
580 change_page_attr_addr(addr, 1, __pgprot(0)); 580 change_page_attr_addr(addr, 1, __pgprot(0));
581 __free_page(page); 581 free_page(addr);
582 totalram_pages++; 582 totalram_pages++;
583 } 583 }
584 if (addr > __START_KERNEL_map) 584 if (addr > __START_KERNEL_map)
@@ -588,26 +588,31 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
588void free_initmem(void) 588void free_initmem(void)
589{ 589{
590 free_init_pages("unused kernel memory", 590 free_init_pages("unused kernel memory",
591 __pa_symbol(&__init_begin), 591 (unsigned long)(&__init_begin),
592 __pa_symbol(&__init_end)); 592 (unsigned long)(&__init_end));
593} 593}
594 594
595#ifdef CONFIG_DEBUG_RODATA 595#ifdef CONFIG_DEBUG_RODATA
596 596
597void mark_rodata_ro(void) 597void mark_rodata_ro(void)
598{ 598{
599 unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size; 599 unsigned long start = (unsigned long)_stext, end;
600 600
601#ifdef CONFIG_HOTPLUG_CPU 601#ifdef CONFIG_HOTPLUG_CPU
602 /* It must still be possible to apply SMP alternatives. */ 602 /* It must still be possible to apply SMP alternatives. */
603 if (num_possible_cpus() > 1) 603 if (num_possible_cpus() > 1)
604 start = PFN_ALIGN(__va(__pa_symbol(&_etext))); 604 start = (unsigned long)_etext;
605#endif 605#endif
606 size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start; 606 end = (unsigned long)__end_rodata;
607 change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO); 607 start = (start + PAGE_SIZE - 1) & PAGE_MASK;
608 end &= PAGE_MASK;
609 if (end <= start)
610 return;
611
612 change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
608 613
609 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 614 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
610 size >> 10); 615 (end - start) >> 10);
611 616
612 /* 617 /*
613 * change_page_attr_addr() requires a global_flush_tlb() call after it. 618 * change_page_attr_addr() requires a global_flush_tlb() call after it.
@@ -622,7 +627,7 @@ void mark_rodata_ro(void)
622#ifdef CONFIG_BLK_DEV_INITRD 627#ifdef CONFIG_BLK_DEV_INITRD
623void free_initrd_mem(unsigned long start, unsigned long end) 628void free_initrd_mem(unsigned long start, unsigned long end)
624{ 629{
625 free_init_pages("initrd memory", __pa(start), __pa(end)); 630 free_init_pages("initrd memory", start, end);
626} 631}
627#endif 632#endif
628 633
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index c6e5e8d401a4..6cac90aa5032 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -13,12 +13,21 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/io.h> 15#include <linux/io.h>
16
16#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
17#include <asm/fixmap.h> 18#include <asm/fixmap.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/proto.h> 21#include <asm/proto.h>
21 22
23unsigned long __phys_addr(unsigned long x)
24{
25 if (x >= __START_KERNEL_map)
26 return x - __START_KERNEL_map + phys_base;
27 return x - PAGE_OFFSET;
28}
29EXPORT_SYMBOL(__phys_addr);
30
22#define ISA_START_ADDRESS 0xa0000 31#define ISA_START_ADDRESS 0xa0000
23#define ISA_END_ADDRESS 0x100000 32#define ISA_END_ADDRESS 0x100000
24 33
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index bf4aa8dd4254..d653d0bf3df6 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -51,6 +51,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
51 SetPagePrivate(base); 51 SetPagePrivate(base);
52 page_private(base) = 0; 52 page_private(base) = 0;
53 53
54 address = __pa(address);
54 addr = address & LARGE_PAGE_MASK; 55 addr = address & LARGE_PAGE_MASK;
55 pbase = (pte_t *)page_address(base); 56 pbase = (pte_t *)page_address(base);
56 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { 57 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
@@ -100,12 +101,13 @@ static inline void save_page(struct page *fpage)
100 * No more special protections in this 2/4MB area - revert to a 101 * No more special protections in this 2/4MB area - revert to a
101 * large page again. 102 * large page again.
102 */ 103 */
103static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_prot) 104static void revert_page(unsigned long address, pgprot_t ref_prot)
104{ 105{
105 pgd_t *pgd; 106 pgd_t *pgd;
106 pud_t *pud; 107 pud_t *pud;
107 pmd_t *pmd; 108 pmd_t *pmd;
108 pte_t large_pte; 109 pte_t large_pte;
110 unsigned long pfn;
109 111
110 pgd = pgd_offset_k(address); 112 pgd = pgd_offset_k(address);
111 BUG_ON(pgd_none(*pgd)); 113 BUG_ON(pgd_none(*pgd));
@@ -113,6 +115,7 @@ static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_p
113 BUG_ON(pud_none(*pud)); 115 BUG_ON(pud_none(*pud));
114 pmd = pmd_offset(pud, address); 116 pmd = pmd_offset(pud, address);
115 BUG_ON(pmd_val(*pmd) & _PAGE_PSE); 117 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
118 pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
116 large_pte = pfn_pte(pfn, ref_prot); 119 large_pte = pfn_pte(pfn, ref_prot);
117 large_pte = pte_mkhuge(large_pte); 120 large_pte = pte_mkhuge(large_pte);
118 set_pte((pte_t *)pmd, large_pte); 121 set_pte((pte_t *)pmd, large_pte);
@@ -138,8 +141,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
138 */ 141 */
139 struct page *split; 142 struct page *split;
140 ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); 143 ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
141 split = split_large_page(pfn << PAGE_SHIFT, prot, 144 split = split_large_page(address, prot, ref_prot2);
142 ref_prot2);
143 if (!split) 145 if (!split)
144 return -ENOMEM; 146 return -ENOMEM;
145 set_pte(kpte, mk_pte(split, ref_prot2)); 147 set_pte(kpte, mk_pte(split, ref_prot2));
@@ -158,7 +160,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
158 160
159 if (page_private(kpte_page) == 0) { 161 if (page_private(kpte_page) == 0) {
160 save_page(kpte_page); 162 save_page(kpte_page);
161 revert_page(address, pfn, ref_prot); 163 revert_page(address, ref_prot);
162 } 164 }
163 return 0; 165 return 0;
164} 166}
@@ -178,7 +180,6 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
178 */ 180 */
179int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) 181int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
180{ 182{
181 unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT;
182 int err = 0, kernel_map = 0; 183 int err = 0, kernel_map = 0;
183 int i; 184 int i;
184 185
@@ -199,11 +200,10 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
199 } 200 }
200 /* Handle kernel mapping too which aliases part of the 201 /* Handle kernel mapping too which aliases part of the
201 * lowmem */ 202 * lowmem */
202 if ((pfn >= phys_base_pfn) && 203 if (__pa(address) < KERNEL_TEXT_SIZE) {
203 ((pfn - phys_base_pfn) < (KERNEL_TEXT_SIZE >> PAGE_SHIFT))) {
204 unsigned long addr2; 204 unsigned long addr2;
205 pgprot_t prot2; 205 pgprot_t prot2;
206 addr2 = __START_KERNEL_map + ((pfn - phys_base_pfn) << PAGE_SHIFT); 206 addr2 = __START_KERNEL_map + __pa(address);
207 /* Make sure the kernel mappings stay executable */ 207 /* Make sure the kernel mappings stay executable */
208 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); 208 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
209 err = __change_page_attr(addr2, pfn, prot2, 209 err = __change_page_attr(addr2, pfn, prot2,