aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-sh/pgtable.h47
1 files changed, 41 insertions, 6 deletions
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index c84901dbd8e5..af62bd33a02a 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -508,16 +508,50 @@ struct vm_area_struct;
508extern void update_mmu_cache(struct vm_area_struct * vma, 508extern void update_mmu_cache(struct vm_area_struct * vma,
509 unsigned long address, pte_t pte); 509 unsigned long address, pte_t pte);
510 510
511/* Encode and de-code a swap entry */
512/* 511/*
512 * Encode and de-code a swap entry
513 *
514 * Constraints:
515 * _PAGE_FILE at bit 0
516 * _PAGE_PRESENT at bit 8
517 * _PAGE_PROTNONE at bit 9
518 *
519 * For the normal case, we encode the swap type into bits 0:7 and the
520 * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
521 * preserved bits in the low 32-bits and use the upper 32 as the swap
522 * offset (along with a 5-bit type), following the same approach as x86
523 * PAE. This keeps the logic quite simple, and allows for a full 32
524 * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
525 * in the pte_low case.
526 *
527 * As is evident by the Alpha code, if we ever get a 64-bit unsigned
528 * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
529 * much cleaner..
530 *
513 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT 531 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
514 * and _PAGE_PROTNONE bits 532 * and _PAGE_PROTNONE bits
515 */ 533 */
516#define __swp_type(x) ((x).val & 0xff) 534#ifdef CONFIG_X2TLB
517#define __swp_offset(x) ((x).val >> 10) 535#define __swp_type(x) ((x).val & 0x1f)
518#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) }) 536#define __swp_offset(x) ((x).val >> 5)
519#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) 537#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
520#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) 538#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
539#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
540
541/*
542 * Encode and decode a nonlinear file mapping entry
543 */
544#define pte_to_pgoff(pte) ((pte).pte_high)
545#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
546
547#define PTE_FILE_MAX_BITS 32
548#else
549#define __swp_type(x) ((x).val & 0xff)
550#define __swp_offset(x) ((x).val >> 10)
551#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10)})
552
553#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
554#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
521 555
522/* 556/*
523 * Encode and decode a nonlinear file mapping entry 557 * Encode and decode a nonlinear file mapping entry
@@ -525,6 +559,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
525#define PTE_FILE_MAX_BITS 29 559#define PTE_FILE_MAX_BITS 29
526#define pte_to_pgoff(pte) (pte_val(pte) >> 1) 560#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
527#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE }) 561#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
562#endif
528 563
529typedef pte_t *pte_addr_t; 564typedef pte_t *pte_addr_t;
530 565