diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-11-30 22:36:37 -0500 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-12-13 23:19:08 -0500 |
commit | 1ca7212932862e348f2f9307f35bd309a7da82d8 (patch) | |
tree | 00d808affd922a8066a100c1608f3d3a73294174 /arch | |
parent | 371352ca0e7f3fad8406933e37c965d5a44365d9 (diff) |
powerpc/mm: Move PTE bits from generic functions to hash64 functions.
functions which operate on pte bits are moved to hash*.h and other
generic functions are moved to pgtable.h
Acked-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgtable.h | 183 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash.h | 151 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/pgtable.h | 204 |
4 files changed, 340 insertions, 204 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 226f29d39332..38b33dcfcc9d 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h | |||
@@ -294,6 +294,189 @@ void pgtable_cache_init(void); | |||
294 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, | 294 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, |
295 | pmd_t **pmdp); | 295 | pmd_t **pmdp); |
296 | 296 | ||
297 | /* Generic accessors to PTE bits */ | ||
298 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} | ||
299 | static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } | ||
300 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } | ||
301 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } | ||
302 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | ||
303 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | ||
304 | |||
305 | static inline int pte_present(pte_t pte) | ||
306 | { | ||
307 | return pte_val(pte) & _PAGE_PRESENT; | ||
308 | } | ||
309 | |||
310 | /* Conversion functions: convert a page and protection to a page entry, | ||
311 | * and a page entry and page directory to the page they refer to. | ||
312 | * | ||
313 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | ||
314 | * long for now. | ||
315 | */ | ||
316 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | ||
317 | { | ||
318 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | ||
319 | pgprot_val(pgprot)); | ||
320 | } | ||
321 | |||
322 | static inline unsigned long pte_pfn(pte_t pte) | ||
323 | { | ||
324 | return pte_val(pte) >> PTE_RPN_SHIFT; | ||
325 | } | ||
326 | |||
327 | /* Generic modifiers for PTE bits */ | ||
328 | static inline pte_t pte_wrprotect(pte_t pte) | ||
329 | { | ||
330 | return __pte(pte_val(pte) & ~_PAGE_RW); | ||
331 | } | ||
332 | |||
333 | static inline pte_t pte_mkclean(pte_t pte) | ||
334 | { | ||
335 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); | ||
336 | } | ||
337 | |||
338 | static inline pte_t pte_mkold(pte_t pte) | ||
339 | { | ||
340 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | ||
341 | } | ||
342 | |||
343 | static inline pte_t pte_mkwrite(pte_t pte) | ||
344 | { | ||
345 | return __pte(pte_val(pte) | _PAGE_RW); | ||
346 | } | ||
347 | |||
348 | static inline pte_t pte_mkdirty(pte_t pte) | ||
349 | { | ||
350 | return __pte(pte_val(pte) | _PAGE_DIRTY); | ||
351 | } | ||
352 | |||
353 | static inline pte_t pte_mkyoung(pte_t pte) | ||
354 | { | ||
355 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
356 | } | ||
357 | |||
358 | static inline pte_t pte_mkspecial(pte_t pte) | ||
359 | { | ||
360 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | ||
361 | } | ||
362 | |||
363 | static inline pte_t pte_mkhuge(pte_t pte) | ||
364 | { | ||
365 | return pte; | ||
366 | } | ||
367 | |||
368 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
369 | { | ||
370 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | ||
371 | } | ||
372 | |||
373 | |||
374 | |||
375 | /* This low level function performs the actual PTE insertion | ||
376 | * Setting the PTE depends on the MMU type and other factors. It's | ||
377 | * an horrible mess that I'm not going to try to clean up now but | ||
378 | * I'm keeping it in one place rather than spread around | ||
379 | */ | ||
380 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
381 | pte_t *ptep, pte_t pte, int percpu) | ||
382 | { | ||
383 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | ||
384 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | ||
385 | * helper pte_update() which does an atomic update. We need to do that | ||
386 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | ||
387 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | ||
388 | * the hash bits instead (ie, same as the non-SMP case) | ||
389 | */ | ||
390 | if (percpu) | ||
391 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
392 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
393 | else | ||
394 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | ||
395 | |||
396 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) | ||
397 | /* Second case is 32-bit with 64-bit PTE. In this case, we | ||
398 | * can just store as long as we do the two halves in the right order | ||
399 | * with a barrier in between. This is possible because we take care, | ||
400 | * in the hash code, to pre-invalidate if the PTE was already hashed, | ||
401 | * which synchronizes us with any concurrent invalidation. | ||
402 | * In the percpu case, we also fallback to the simple update preserving | ||
403 | * the hash bits | ||
404 | */ | ||
405 | if (percpu) { | ||
406 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
407 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
408 | return; | ||
409 | } | ||
410 | if (pte_val(*ptep) & _PAGE_HASHPTE) | ||
411 | flush_hash_entry(mm, ptep, addr); | ||
412 | __asm__ __volatile__("\ | ||
413 | stw%U0%X0 %2,%0\n\ | ||
414 | eieio\n\ | ||
415 | stw%U0%X0 %L2,%1" | ||
416 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | ||
417 | : "r" (pte) : "memory"); | ||
418 | |||
419 | #elif defined(CONFIG_PPC_STD_MMU_32) | ||
420 | /* Third case is 32-bit hash table in UP mode, we need to preserve | ||
421 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | ||
422 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | ||
423 | * and see we need to keep track that this PTE needs invalidating | ||
424 | */ | ||
425 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
426 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
427 | |||
428 | #else | ||
429 | #error "Not supported " | ||
430 | #endif | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Macro to mark a page protection value as "uncacheable". | ||
435 | */ | ||
436 | |||
437 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | ||
438 | _PAGE_WRITETHRU) | ||
439 | |||
440 | #define pgprot_noncached pgprot_noncached | ||
441 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | ||
442 | { | ||
443 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
444 | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
445 | } | ||
446 | |||
447 | #define pgprot_noncached_wc pgprot_noncached_wc | ||
448 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) | ||
449 | { | ||
450 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
451 | _PAGE_NO_CACHE); | ||
452 | } | ||
453 | |||
454 | #define pgprot_cached pgprot_cached | ||
455 | static inline pgprot_t pgprot_cached(pgprot_t prot) | ||
456 | { | ||
457 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
458 | _PAGE_COHERENT); | ||
459 | } | ||
460 | |||
461 | #define pgprot_cached_wthru pgprot_cached_wthru | ||
462 | static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) | ||
463 | { | ||
464 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
465 | _PAGE_COHERENT | _PAGE_WRITETHRU); | ||
466 | } | ||
467 | |||
468 | #define pgprot_cached_noncoherent pgprot_cached_noncoherent | ||
469 | static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) | ||
470 | { | ||
471 | return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); | ||
472 | } | ||
473 | |||
474 | #define pgprot_writecombine pgprot_writecombine | ||
475 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) | ||
476 | { | ||
477 | return pgprot_noncached_wc(prot); | ||
478 | } | ||
479 | |||
297 | #endif /* !__ASSEMBLY__ */ | 480 | #endif /* !__ASSEMBLY__ */ |
298 | 481 | ||
299 | #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */ | 482 | #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */ |
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 447b212649c8..48237e66e823 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h | |||
@@ -481,6 +481,157 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
481 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); | 481 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); |
482 | } | 482 | } |
483 | 483 | ||
484 | /* Generic accessors to PTE bits */ | ||
485 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} | ||
486 | static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } | ||
487 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } | ||
488 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } | ||
489 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | ||
490 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | ||
491 | |||
492 | #ifdef CONFIG_NUMA_BALANCING | ||
493 | /* | ||
494 | * These work without NUMA balancing but the kernel does not care. See the | ||
495 | * comment in include/asm-generic/pgtable.h . On powerpc, this will only | ||
496 | * work for user pages and always return true for kernel pages. | ||
497 | */ | ||
498 | static inline int pte_protnone(pte_t pte) | ||
499 | { | ||
500 | return (pte_val(pte) & | ||
501 | (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; | ||
502 | } | ||
503 | #endif /* CONFIG_NUMA_BALANCING */ | ||
504 | |||
505 | static inline int pte_present(pte_t pte) | ||
506 | { | ||
507 | return pte_val(pte) & _PAGE_PRESENT; | ||
508 | } | ||
509 | |||
510 | /* Conversion functions: convert a page and protection to a page entry, | ||
511 | * and a page entry and page directory to the page they refer to. | ||
512 | * | ||
513 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | ||
514 | * long for now. | ||
515 | */ | ||
516 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | ||
517 | { | ||
518 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | ||
519 | pgprot_val(pgprot)); | ||
520 | } | ||
521 | |||
522 | static inline unsigned long pte_pfn(pte_t pte) | ||
523 | { | ||
524 | return pte_val(pte) >> PTE_RPN_SHIFT; | ||
525 | } | ||
526 | |||
527 | /* Generic modifiers for PTE bits */ | ||
528 | static inline pte_t pte_wrprotect(pte_t pte) | ||
529 | { | ||
530 | return __pte(pte_val(pte) & ~_PAGE_RW); | ||
531 | } | ||
532 | |||
533 | static inline pte_t pte_mkclean(pte_t pte) | ||
534 | { | ||
535 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); | ||
536 | } | ||
537 | |||
538 | static inline pte_t pte_mkold(pte_t pte) | ||
539 | { | ||
540 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | ||
541 | } | ||
542 | |||
543 | static inline pte_t pte_mkwrite(pte_t pte) | ||
544 | { | ||
545 | return __pte(pte_val(pte) | _PAGE_RW); | ||
546 | } | ||
547 | |||
548 | static inline pte_t pte_mkdirty(pte_t pte) | ||
549 | { | ||
550 | return __pte(pte_val(pte) | _PAGE_DIRTY); | ||
551 | } | ||
552 | |||
553 | static inline pte_t pte_mkyoung(pte_t pte) | ||
554 | { | ||
555 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
556 | } | ||
557 | |||
558 | static inline pte_t pte_mkspecial(pte_t pte) | ||
559 | { | ||
560 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | ||
561 | } | ||
562 | |||
563 | static inline pte_t pte_mkhuge(pte_t pte) | ||
564 | { | ||
565 | return pte; | ||
566 | } | ||
567 | |||
568 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
569 | { | ||
570 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | ||
571 | } | ||
572 | |||
573 | /* This low level function performs the actual PTE insertion | ||
574 | * Setting the PTE depends on the MMU type and other factors. It's | ||
575 | * an horrible mess that I'm not going to try to clean up now but | ||
576 | * I'm keeping it in one place rather than spread around | ||
577 | */ | ||
578 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
579 | pte_t *ptep, pte_t pte, int percpu) | ||
580 | { | ||
581 | /* | ||
582 | * Anything else just stores the PTE normally. That covers all 64-bit | ||
583 | * cases, and 32-bit non-hash with 32-bit PTEs. | ||
584 | */ | ||
585 | *ptep = pte; | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * Macro to mark a page protection value as "uncacheable". | ||
590 | */ | ||
591 | |||
592 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | ||
593 | _PAGE_WRITETHRU) | ||
594 | |||
595 | #define pgprot_noncached pgprot_noncached | ||
596 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | ||
597 | { | ||
598 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
599 | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
600 | } | ||
601 | |||
602 | #define pgprot_noncached_wc pgprot_noncached_wc | ||
603 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) | ||
604 | { | ||
605 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
606 | _PAGE_NO_CACHE); | ||
607 | } | ||
608 | |||
609 | #define pgprot_cached pgprot_cached | ||
610 | static inline pgprot_t pgprot_cached(pgprot_t prot) | ||
611 | { | ||
612 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
613 | _PAGE_COHERENT); | ||
614 | } | ||
615 | |||
616 | #define pgprot_cached_wthru pgprot_cached_wthru | ||
617 | static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) | ||
618 | { | ||
619 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
620 | _PAGE_COHERENT | _PAGE_WRITETHRU); | ||
621 | } | ||
622 | |||
623 | #define pgprot_cached_noncoherent pgprot_cached_noncoherent | ||
624 | static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) | ||
625 | { | ||
626 | return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); | ||
627 | } | ||
628 | |||
629 | #define pgprot_writecombine pgprot_writecombine | ||
630 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) | ||
631 | { | ||
632 | return pgprot_noncached_wc(prot); | ||
633 | } | ||
634 | |||
484 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 635 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
485 | extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | 636 | extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, |
486 | pmd_t *pmdp, unsigned long old_pmd); | 637 | pmd_t *pmdp, unsigned long old_pmd); |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index aac630b4a15e..f2ace2cac7bb 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -201,6 +201,12 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) | |||
201 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | 201 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
202 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | 202 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) |
203 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | 203 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) |
204 | #ifdef CONFIG_NUMA_BALANCING | ||
205 | static inline int pmd_protnone(pmd_t pmd) | ||
206 | { | ||
207 | return pte_protnone(pmd_pte(pmd)); | ||
208 | } | ||
209 | #endif /* CONFIG_NUMA_BALANCING */ | ||
204 | 210 | ||
205 | #define __HAVE_ARCH_PMD_WRITE | 211 | #define __HAVE_ARCH_PMD_WRITE |
206 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | 212 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) |
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h index ebd6677ea017..8b0f4a29259a 100644 --- a/arch/powerpc/include/asm/book3s/pgtable.h +++ b/arch/powerpc/include/asm/book3s/pgtable.h | |||
@@ -9,221 +9,17 @@ | |||
9 | 9 | ||
10 | #define FIRST_USER_ADDRESS 0UL | 10 | #define FIRST_USER_ADDRESS 0UL |
11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
12 | |||
13 | /* Generic accessors to PTE bits */ | ||
14 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} | ||
15 | static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } | ||
16 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } | ||
17 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } | ||
18 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | ||
19 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | ||
20 | |||
21 | #ifdef CONFIG_NUMA_BALANCING | ||
22 | /* | ||
23 | * These work without NUMA balancing but the kernel does not care. See the | ||
24 | * comment in include/asm-generic/pgtable.h . On powerpc, this will only | ||
25 | * work for user pages and always return true for kernel pages. | ||
26 | */ | ||
27 | static inline int pte_protnone(pte_t pte) | ||
28 | { | ||
29 | return (pte_val(pte) & | ||
30 | (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; | ||
31 | } | ||
32 | |||
33 | static inline int pmd_protnone(pmd_t pmd) | ||
34 | { | ||
35 | return pte_protnone(pmd_pte(pmd)); | ||
36 | } | ||
37 | #endif /* CONFIG_NUMA_BALANCING */ | ||
38 | |||
39 | static inline int pte_present(pte_t pte) | ||
40 | { | ||
41 | return pte_val(pte) & _PAGE_PRESENT; | ||
42 | } | ||
43 | |||
44 | /* Conversion functions: convert a page and protection to a page entry, | ||
45 | * and a page entry and page directory to the page they refer to. | ||
46 | * | ||
47 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | ||
48 | * long for now. | ||
49 | */ | ||
50 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | ||
51 | { | ||
52 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | ||
53 | pgprot_val(pgprot)); | ||
54 | } | ||
55 | |||
56 | static inline unsigned long pte_pfn(pte_t pte) | ||
57 | { | ||
58 | return pte_val(pte) >> PTE_RPN_SHIFT; | ||
59 | } | ||
60 | |||
61 | /* Generic modifiers for PTE bits */ | ||
62 | static inline pte_t pte_wrprotect(pte_t pte) | ||
63 | { | ||
64 | return __pte(pte_val(pte) & ~_PAGE_RW); | ||
65 | } | ||
66 | |||
67 | static inline pte_t pte_mkclean(pte_t pte) | ||
68 | { | ||
69 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); | ||
70 | } | ||
71 | |||
72 | static inline pte_t pte_mkold(pte_t pte) | ||
73 | { | ||
74 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | ||
75 | } | ||
76 | |||
77 | static inline pte_t pte_mkwrite(pte_t pte) | ||
78 | { | ||
79 | return __pte(pte_val(pte) | _PAGE_RW); | ||
80 | } | ||
81 | |||
82 | static inline pte_t pte_mkdirty(pte_t pte) | ||
83 | { | ||
84 | return __pte(pte_val(pte) | _PAGE_DIRTY); | ||
85 | } | ||
86 | |||
87 | static inline pte_t pte_mkyoung(pte_t pte) | ||
88 | { | ||
89 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
90 | } | ||
91 | |||
92 | static inline pte_t pte_mkspecial(pte_t pte) | ||
93 | { | ||
94 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | ||
95 | } | ||
96 | |||
97 | static inline pte_t pte_mkhuge(pte_t pte) | ||
98 | { | ||
99 | return pte; | ||
100 | } | ||
101 | |||
102 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
103 | { | ||
104 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | ||
105 | } | ||
106 | |||
107 | |||
108 | /* Insert a PTE, top-level function is out of line. It uses an inline | 12 | /* Insert a PTE, top-level function is out of line. It uses an inline |
109 | * low level function in the respective pgtable-* files | 13 | * low level function in the respective pgtable-* files |
110 | */ | 14 | */ |
111 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | 15 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, |
112 | pte_t pte); | 16 | pte_t pte); |
113 | 17 | ||
114 | /* This low level function performs the actual PTE insertion | ||
115 | * Setting the PTE depends on the MMU type and other factors. It's | ||
116 | * an horrible mess that I'm not going to try to clean up now but | ||
117 | * I'm keeping it in one place rather than spread around | ||
118 | */ | ||
119 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
120 | pte_t *ptep, pte_t pte, int percpu) | ||
121 | { | ||
122 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | ||
123 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | ||
124 | * helper pte_update() which does an atomic update. We need to do that | ||
125 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | ||
126 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | ||
127 | * the hash bits instead (ie, same as the non-SMP case) | ||
128 | */ | ||
129 | if (percpu) | ||
130 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
131 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
132 | else | ||
133 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | ||
134 | |||
135 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) | ||
136 | /* Second case is 32-bit with 64-bit PTE. In this case, we | ||
137 | * can just store as long as we do the two halves in the right order | ||
138 | * with a barrier in between. This is possible because we take care, | ||
139 | * in the hash code, to pre-invalidate if the PTE was already hashed, | ||
140 | * which synchronizes us with any concurrent invalidation. | ||
141 | * In the percpu case, we also fallback to the simple update preserving | ||
142 | * the hash bits | ||
143 | */ | ||
144 | if (percpu) { | ||
145 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
146 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
147 | return; | ||
148 | } | ||
149 | if (pte_val(*ptep) & _PAGE_HASHPTE) | ||
150 | flush_hash_entry(mm, ptep, addr); | ||
151 | __asm__ __volatile__("\ | ||
152 | stw%U0%X0 %2,%0\n\ | ||
153 | eieio\n\ | ||
154 | stw%U0%X0 %L2,%1" | ||
155 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | ||
156 | : "r" (pte) : "memory"); | ||
157 | |||
158 | #elif defined(CONFIG_PPC_STD_MMU_32) | ||
159 | /* Third case is 32-bit hash table in UP mode, we need to preserve | ||
160 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | ||
161 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | ||
162 | * and see we need to keep track that this PTE needs invalidating | ||
163 | */ | ||
164 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
165 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
166 | |||
167 | #else | ||
168 | /* Anything else just stores the PTE normally. That covers all 64-bit | ||
169 | * cases, and 32-bit non-hash with 32-bit PTEs. | ||
170 | */ | ||
171 | *ptep = pte; | ||
172 | #endif | ||
173 | } | ||
174 | |||
175 | 18 | ||
176 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 19 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
177 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | 20 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
178 | pte_t *ptep, pte_t entry, int dirty); | 21 | pte_t *ptep, pte_t entry, int dirty); |
179 | 22 | ||
180 | /* | ||
181 | * Macro to mark a page protection value as "uncacheable". | ||
182 | */ | ||
183 | |||
184 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | ||
185 | _PAGE_WRITETHRU) | ||
186 | |||
187 | #define pgprot_noncached pgprot_noncached | ||
188 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | ||
189 | { | ||
190 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
191 | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
192 | } | ||
193 | |||
194 | #define pgprot_noncached_wc pgprot_noncached_wc | ||
195 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) | ||
196 | { | ||
197 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
198 | _PAGE_NO_CACHE); | ||
199 | } | ||
200 | |||
201 | #define pgprot_cached pgprot_cached | ||
202 | static inline pgprot_t pgprot_cached(pgprot_t prot) | ||
203 | { | ||
204 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
205 | _PAGE_COHERENT); | ||
206 | } | ||
207 | |||
208 | #define pgprot_cached_wthru pgprot_cached_wthru | ||
209 | static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) | ||
210 | { | ||
211 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
212 | _PAGE_COHERENT | _PAGE_WRITETHRU); | ||
213 | } | ||
214 | |||
215 | #define pgprot_cached_noncoherent pgprot_cached_noncoherent | ||
216 | static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) | ||
217 | { | ||
218 | return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); | ||
219 | } | ||
220 | |||
221 | #define pgprot_writecombine pgprot_writecombine | ||
222 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) | ||
223 | { | ||
224 | return pgprot_noncached_wc(prot); | ||
225 | } | ||
226 | |||
227 | struct file; | 23 | struct file; |
228 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 24 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
229 | unsigned long size, pgprot_t vma_prot); | 25 | unsigned long size, pgprot_t vma_prot); |