diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 11:44:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 11:44:24 -0400 |
commit | e3ebadd95cb621e2c7436f3d3646447ac9d5c16d (patch) | |
tree | 510b41550cc3751cfb565e3e2ba195a68b784a03 /arch/x86_64/mm/pageattr.c | |
parent | 15700770ef7c5d12e2f1659d2ddbeb3f658d9f37 (diff) |
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86_64/mm/pageattr.c')
-rw-r--r-- | arch/x86_64/mm/pageattr.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index bf4aa8dd4254..d653d0bf3df6 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -51,6 +51,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, | |||
51 | SetPagePrivate(base); | 51 | SetPagePrivate(base); |
52 | page_private(base) = 0; | 52 | page_private(base) = 0; |
53 | 53 | ||
54 | address = __pa(address); | ||
54 | addr = address & LARGE_PAGE_MASK; | 55 | addr = address & LARGE_PAGE_MASK; |
55 | pbase = (pte_t *)page_address(base); | 56 | pbase = (pte_t *)page_address(base); |
56 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { | 57 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { |
@@ -100,12 +101,13 @@ static inline void save_page(struct page *fpage) | |||
100 | * No more special protections in this 2/4MB area - revert to a | 101 | * No more special protections in this 2/4MB area - revert to a |
101 | * large page again. | 102 | * large page again. |
102 | */ | 103 | */ |
103 | static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_prot) | 104 | static void revert_page(unsigned long address, pgprot_t ref_prot) |
104 | { | 105 | { |
105 | pgd_t *pgd; | 106 | pgd_t *pgd; |
106 | pud_t *pud; | 107 | pud_t *pud; |
107 | pmd_t *pmd; | 108 | pmd_t *pmd; |
108 | pte_t large_pte; | 109 | pte_t large_pte; |
110 | unsigned long pfn; | ||
109 | 111 | ||
110 | pgd = pgd_offset_k(address); | 112 | pgd = pgd_offset_k(address); |
111 | BUG_ON(pgd_none(*pgd)); | 113 | BUG_ON(pgd_none(*pgd)); |
@@ -113,6 +115,7 @@ static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_p | |||
113 | BUG_ON(pud_none(*pud)); | 115 | BUG_ON(pud_none(*pud)); |
114 | pmd = pmd_offset(pud, address); | 116 | pmd = pmd_offset(pud, address); |
115 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); | 117 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); |
118 | pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT; | ||
116 | large_pte = pfn_pte(pfn, ref_prot); | 119 | large_pte = pfn_pte(pfn, ref_prot); |
117 | large_pte = pte_mkhuge(large_pte); | 120 | large_pte = pte_mkhuge(large_pte); |
118 | set_pte((pte_t *)pmd, large_pte); | 121 | set_pte((pte_t *)pmd, large_pte); |
@@ -138,8 +141,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
138 | */ | 141 | */ |
139 | struct page *split; | 142 | struct page *split; |
140 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); | 143 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); |
141 | split = split_large_page(pfn << PAGE_SHIFT, prot, | 144 | split = split_large_page(address, prot, ref_prot2); |
142 | ref_prot2); | ||
143 | if (!split) | 145 | if (!split) |
144 | return -ENOMEM; | 146 | return -ENOMEM; |
145 | set_pte(kpte, mk_pte(split, ref_prot2)); | 147 | set_pte(kpte, mk_pte(split, ref_prot2)); |
@@ -158,7 +160,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
158 | 160 | ||
159 | if (page_private(kpte_page) == 0) { | 161 | if (page_private(kpte_page) == 0) { |
160 | save_page(kpte_page); | 162 | save_page(kpte_page); |
161 | revert_page(address, pfn, ref_prot); | 163 | revert_page(address, ref_prot); |
162 | } | 164 | } |
163 | return 0; | 165 | return 0; |
164 | } | 166 | } |
@@ -178,7 +180,6 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
178 | */ | 180 | */ |
179 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | 181 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) |
180 | { | 182 | { |
181 | unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT; | ||
182 | int err = 0, kernel_map = 0; | 183 | int err = 0, kernel_map = 0; |
183 | int i; | 184 | int i; |
184 | 185 | ||
@@ -199,11 +200,10 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | |||
199 | } | 200 | } |
200 | /* Handle kernel mapping too which aliases part of the | 201 | /* Handle kernel mapping too which aliases part of the |
201 | * lowmem */ | 202 | * lowmem */ |
202 | if ((pfn >= phys_base_pfn) && | 203 | if (__pa(address) < KERNEL_TEXT_SIZE) { |
203 | ((pfn - phys_base_pfn) < (KERNEL_TEXT_SIZE >> PAGE_SHIFT))) { | ||
204 | unsigned long addr2; | 204 | unsigned long addr2; |
205 | pgprot_t prot2; | 205 | pgprot_t prot2; |
206 | addr2 = __START_KERNEL_map + ((pfn - phys_base_pfn) << PAGE_SHIFT); | 206 | addr2 = __START_KERNEL_map + __pa(address); |
207 | /* Make sure the kernel mappings stay executable */ | 207 | /* Make sure the kernel mappings stay executable */ |
208 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); | 208 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); |
209 | err = __change_page_attr(addr2, pfn, prot2, | 209 | err = __change_page_attr(addr2, pfn, prot2, |