diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-02-04 10:48:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-02-04 10:48:07 -0500 |
commit | 65e074dffa198978ab0c9976a19b954fbe1183e2 (patch) | |
tree | a50807444598c6d7a64ed75af23f9a246ee40d86 /arch/x86/mm/pageattr.c | |
parent | f4ae5da0e8e92caa168e7c2a7c4a6c4064b082c2 (diff) |
x86: cpa, preserve large pages if possible
When CPA is called on a range which fits into a large page mapping,
avoid to split the page when:
1) There is no change of attributes
2) The range to change is a complete large mapping
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 142 |
1 files changed, 130 insertions, 12 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 79a9f1b42ddd..40b7ac58e671 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -18,12 +18,17 @@ | |||
18 | 18 | ||
19 | struct cpa_data { | 19 | struct cpa_data { |
20 | unsigned long vaddr; | 20 | unsigned long vaddr; |
21 | int numpages; | ||
22 | pgprot_t mask_set; | 21 | pgprot_t mask_set; |
23 | pgprot_t mask_clr; | 22 | pgprot_t mask_clr; |
23 | int numpages; | ||
24 | int flushtlb; | 24 | int flushtlb; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | enum { | ||
28 | CPA_NO_SPLIT = 0, | ||
29 | CPA_SPLIT, | ||
30 | }; | ||
31 | |||
27 | static inline int | 32 | static inline int |
28 | within(unsigned long addr, unsigned long start, unsigned long end) | 33 | within(unsigned long addr, unsigned long start, unsigned long end) |
29 | { | 34 | { |
@@ -230,6 +235,86 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | |||
230 | #endif | 235 | #endif |
231 | } | 236 | } |
232 | 237 | ||
238 | static int try_preserve_large_page(pte_t *kpte, unsigned long address, | ||
239 | struct cpa_data *cpa) | ||
240 | { | ||
241 | unsigned long nextpage_addr, numpages, pmask, psize, flags; | ||
242 | pte_t new_pte, old_pte, *tmp; | ||
243 | pgprot_t old_prot, new_prot; | ||
244 | int level, res = CPA_SPLIT; | ||
245 | |||
246 | spin_lock_irqsave(&pgd_lock, flags); | ||
247 | /* | ||
248 | * Check for races, another CPU might have split this page | ||
249 | * up already: | ||
250 | */ | ||
251 | tmp = lookup_address(address, &level); | ||
252 | if (tmp != kpte) | ||
253 | goto out_unlock; | ||
254 | |||
255 | switch (level) { | ||
256 | case PG_LEVEL_2M: | ||
257 | psize = LARGE_PAGE_SIZE; | ||
258 | pmask = LARGE_PAGE_MASK; | ||
259 | break; | ||
260 | case PG_LEVEL_1G: | ||
261 | default: | ||
262 | res = -EINVAL; | ||
263 | goto out_unlock; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * Calculate the number of pages, which fit into this large | ||
268 | * page starting at address: | ||
269 | */ | ||
270 | nextpage_addr = (address + psize) & pmask; | ||
271 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | ||
272 | if (numpages < cpa->numpages) | ||
273 | cpa->numpages = numpages; | ||
274 | |||
275 | /* | ||
276 | * We are safe now. Check whether the new pgprot is the same: | ||
277 | */ | ||
278 | old_pte = *kpte; | ||
279 | old_prot = new_prot = pte_pgprot(old_pte); | ||
280 | |||
281 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | ||
282 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | ||
283 | new_prot = static_protections(new_prot, address); | ||
284 | |||
285 | /* | ||
286 | * If there are no changes, return. maxpages has been updated | ||
287 | * above: | ||
288 | */ | ||
289 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | ||
290 | res = CPA_NO_SPLIT; | ||
291 | goto out_unlock; | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * We need to change the attributes. Check, whether we can | ||
296 | * change the large page in one go. We request a split, when | ||
297 | * the address is not aligned and the number of pages is | ||
298 | * smaller than the number of pages in the large page. Note | ||
299 | * that we limited the number of possible pages already to | ||
300 | * the number of pages in the large page. | ||
301 | */ | ||
302 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { | ||
303 | /* | ||
304 | * The address is aligned and the number of pages | ||
305 | * covers the full page. | ||
306 | */ | ||
307 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | ||
308 | __set_pmd_pte(kpte, address, new_pte); | ||
309 | cpa->flushtlb = 1; | ||
310 | res = CPA_NO_SPLIT; | ||
311 | } | ||
312 | |||
313 | out_unlock: | ||
314 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
315 | return res; | ||
316 | } | ||
317 | |||
233 | static int split_large_page(pte_t *kpte, unsigned long address) | 318 | static int split_large_page(pte_t *kpte, unsigned long address) |
234 | { | 319 | { |
235 | pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); | 320 | pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
@@ -295,7 +380,7 @@ out_unlock: | |||
295 | static int __change_page_attr(unsigned long address, struct cpa_data *cpa) | 380 | static int __change_page_attr(unsigned long address, struct cpa_data *cpa) |
296 | { | 381 | { |
297 | struct page *kpte_page; | 382 | struct page *kpte_page; |
298 | int level, err = 0; | 383 | int level, res; |
299 | pte_t *kpte; | 384 | pte_t *kpte; |
300 | 385 | ||
301 | repeat: | 386 | repeat: |
@@ -338,13 +423,34 @@ repeat: | |||
338 | set_pte_atomic(kpte, new_pte); | 423 | set_pte_atomic(kpte, new_pte); |
339 | cpa->flushtlb = 1; | 424 | cpa->flushtlb = 1; |
340 | } | 425 | } |
341 | } else { | 426 | cpa->numpages = 1; |
342 | err = split_large_page(kpte, address); | 427 | return 0; |
343 | if (!err) | ||
344 | goto repeat; | ||
345 | cpa->flushtlb = 1; | ||
346 | } | 428 | } |
347 | return err; | 429 | |
430 | /* | ||
431 | * Check, whether we can keep the large page intact | ||
432 | * and just change the pte: | ||
433 | */ | ||
434 | res = try_preserve_large_page(kpte, address, cpa); | ||
435 | if (res < 0) | ||
436 | return res; | ||
437 | |||
438 | /* | ||
439 | * When the range fits into the existing large page, | ||
440 | * return. cp->numpages and cpa->tlbflush have been updated in | ||
441 | * try_large_page: | ||
442 | */ | ||
443 | if (res == CPA_NO_SPLIT) | ||
444 | return 0; | ||
445 | |||
446 | /* | ||
447 | * We have to split the large page: | ||
448 | */ | ||
449 | res = split_large_page(kpte, address); | ||
450 | if (res) | ||
451 | return res; | ||
452 | cpa->flushtlb = 1; | ||
453 | goto repeat; | ||
348 | } | 454 | } |
349 | 455 | ||
350 | /** | 456 | /** |
@@ -410,15 +516,27 @@ static int change_page_attr_addr(struct cpa_data *cpa) | |||
410 | 516 | ||
411 | static int __change_page_attr_set_clr(struct cpa_data *cpa) | 517 | static int __change_page_attr_set_clr(struct cpa_data *cpa) |
412 | { | 518 | { |
413 | unsigned int i; | 519 | int ret, numpages = cpa->numpages; |
414 | int ret; | ||
415 | 520 | ||
416 | for (i = 0; i < cpa->numpages ; i++, cpa->vaddr += PAGE_SIZE) { | 521 | while (numpages) { |
522 | /* | ||
523 | * Store the remaining nr of pages for the large page | ||
524 | * preservation check. | ||
525 | */ | ||
526 | cpa->numpages = numpages; | ||
417 | ret = change_page_attr_addr(cpa); | 527 | ret = change_page_attr_addr(cpa); |
418 | if (ret) | 528 | if (ret) |
419 | return ret; | 529 | return ret; |
420 | } | ||
421 | 530 | ||
531 | /* | ||
532 | * Adjust the number of pages with the result of the | ||
533 | * CPA operation. Either a large page has been | ||
534 | * preserved or a single page update happened. | ||
535 | */ | ||
536 | BUG_ON(cpa->numpages > numpages); | ||
537 | numpages -= cpa->numpages; | ||
538 | cpa->vaddr += cpa->numpages * PAGE_SIZE; | ||
539 | } | ||
422 | return 0; | 540 | return 0; |
423 | } | 541 | } |
424 | 542 | ||