diff options
Diffstat (limited to 'arch/x86_64/mm/pageattr.c')
-rw-r--r-- | arch/x86_64/mm/pageattr.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 081409aa3452..bf4aa8dd4254 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -51,7 +51,6 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, | |||
51 | SetPagePrivate(base); | 51 | SetPagePrivate(base); |
52 | page_private(base) = 0; | 52 | page_private(base) = 0; |
53 | 53 | ||
54 | address = __pa(address); | ||
55 | addr = address & LARGE_PAGE_MASK; | 54 | addr = address & LARGE_PAGE_MASK; |
56 | pbase = (pte_t *)page_address(base); | 55 | pbase = (pte_t *)page_address(base); |
57 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { | 56 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { |
@@ -101,13 +100,12 @@ static inline void save_page(struct page *fpage) | |||
101 | * No more special protections in this 2/4MB area - revert to a | 100 | * No more special protections in this 2/4MB area - revert to a |
102 | * large page again. | 101 | * large page again. |
103 | */ | 102 | */ |
104 | static void revert_page(unsigned long address, pgprot_t ref_prot) | 103 | static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_prot) |
105 | { | 104 | { |
106 | pgd_t *pgd; | 105 | pgd_t *pgd; |
107 | pud_t *pud; | 106 | pud_t *pud; |
108 | pmd_t *pmd; | 107 | pmd_t *pmd; |
109 | pte_t large_pte; | 108 | pte_t large_pte; |
110 | unsigned long pfn; | ||
111 | 109 | ||
112 | pgd = pgd_offset_k(address); | 110 | pgd = pgd_offset_k(address); |
113 | BUG_ON(pgd_none(*pgd)); | 111 | BUG_ON(pgd_none(*pgd)); |
@@ -115,7 +113,6 @@ static void revert_page(unsigned long address, pgprot_t ref_prot) | |||
115 | BUG_ON(pud_none(*pud)); | 113 | BUG_ON(pud_none(*pud)); |
116 | pmd = pmd_offset(pud, address); | 114 | pmd = pmd_offset(pud, address); |
117 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); | 115 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); |
118 | pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT; | ||
119 | large_pte = pfn_pte(pfn, ref_prot); | 116 | large_pte = pfn_pte(pfn, ref_prot); |
120 | large_pte = pte_mkhuge(large_pte); | 117 | large_pte = pte_mkhuge(large_pte); |
121 | set_pte((pte_t *)pmd, large_pte); | 118 | set_pte((pte_t *)pmd, large_pte); |
@@ -141,7 +138,8 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
141 | */ | 138 | */ |
142 | struct page *split; | 139 | struct page *split; |
143 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); | 140 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); |
144 | split = split_large_page(address, prot, ref_prot2); | 141 | split = split_large_page(pfn << PAGE_SHIFT, prot, |
142 | ref_prot2); | ||
145 | if (!split) | 143 | if (!split) |
146 | return -ENOMEM; | 144 | return -ENOMEM; |
147 | set_pte(kpte, mk_pte(split, ref_prot2)); | 145 | set_pte(kpte, mk_pte(split, ref_prot2)); |
@@ -160,7 +158,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
160 | 158 | ||
161 | if (page_private(kpte_page) == 0) { | 159 | if (page_private(kpte_page) == 0) { |
162 | save_page(kpte_page); | 160 | save_page(kpte_page); |
163 | revert_page(address, ref_prot); | 161 | revert_page(address, pfn, ref_prot); |
164 | } | 162 | } |
165 | return 0; | 163 | return 0; |
166 | } | 164 | } |
@@ -180,22 +178,32 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
180 | */ | 178 | */ |
181 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | 179 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) |
182 | { | 180 | { |
183 | int err = 0; | 181 | unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT; |
182 | int err = 0, kernel_map = 0; | ||
184 | int i; | 183 | int i; |
185 | 184 | ||
185 | if (address >= __START_KERNEL_map | ||
186 | && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { | ||
187 | address = (unsigned long)__va(__pa(address)); | ||
188 | kernel_map = 1; | ||
189 | } | ||
190 | |||
186 | down_write(&init_mm.mmap_sem); | 191 | down_write(&init_mm.mmap_sem); |
187 | for (i = 0; i < numpages; i++, address += PAGE_SIZE) { | 192 | for (i = 0; i < numpages; i++, address += PAGE_SIZE) { |
188 | unsigned long pfn = __pa(address) >> PAGE_SHIFT; | 193 | unsigned long pfn = __pa(address) >> PAGE_SHIFT; |
189 | 194 | ||
190 | err = __change_page_attr(address, pfn, prot, PAGE_KERNEL); | 195 | if (!kernel_map || pte_present(pfn_pte(0, prot))) { |
191 | if (err) | 196 | err = __change_page_attr(address, pfn, prot, PAGE_KERNEL); |
192 | break; | 197 | if (err) |
198 | break; | ||
199 | } | ||
193 | /* Handle kernel mapping too which aliases part of the | 200 | /* Handle kernel mapping too which aliases part of the |
194 | * lowmem */ | 201 | * lowmem */ |
195 | if (__pa(address) < KERNEL_TEXT_SIZE) { | 202 | if ((pfn >= phys_base_pfn) && |
203 | ((pfn - phys_base_pfn) < (KERNEL_TEXT_SIZE >> PAGE_SHIFT))) { | ||
196 | unsigned long addr2; | 204 | unsigned long addr2; |
197 | pgprot_t prot2; | 205 | pgprot_t prot2; |
198 | addr2 = __START_KERNEL_map + __pa(address); | 206 | addr2 = __START_KERNEL_map + ((pfn - phys_base_pfn) << PAGE_SHIFT); |
199 | /* Make sure the kernel mappings stay executable */ | 207 | /* Make sure the kernel mappings stay executable */ |
200 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); | 208 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); |
201 | err = __change_page_attr(addr2, pfn, prot2, | 209 | err = __change_page_attr(addr2, pfn, prot2, |