diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2015-02-10 17:10:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-10 17:30:32 -0500 |
commit | 22f9bf3950f20d24198791685f2dccac2c4ef38a (patch) | |
tree | c1f3a677ff0cf72cccd1d273dbc47baf26e99a94 /arch | |
parent | 1eeda0abf4425c91e7ce3ca32f1908c3a51bf84e (diff) |
metag: drop _PAGE_FILE and pte_file()-related helpers
We've replaced remap_file_pages(2) implementation with emulation. Nobody
creates non-linear mapping anymore.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: James Hogan <james.hogan@imgtec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/metag/include/asm/pgtable.h | 6 |
1 files changed, 0 insertions, 6 deletions
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index 0d9dc5487296..d0604c0a8702 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h | |||
@@ -47,7 +47,6 @@ | |||
47 | */ | 47 | */ |
48 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 | 48 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 |
49 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 | 49 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 |
50 | #define _PAGE_FILE _PAGE_ALWAYS_ZERO_3 | ||
51 | 50 | ||
52 | /* Pages owned, and protected by, the kernel. */ | 51 | /* Pages owned, and protected by, the kernel. */ |
53 | #define _PAGE_KERNEL _PAGE_PRIV | 52 | #define _PAGE_KERNEL _PAGE_PRIV |
@@ -219,7 +218,6 @@ extern unsigned long empty_zero_page; | |||
219 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } | 218 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } |
220 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | 219 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
221 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 220 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
222 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
223 | static inline int pte_special(pte_t pte) { return 0; } | 221 | static inline int pte_special(pte_t pte) { return 0; } |
224 | 222 | ||
225 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; } | 223 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; } |
@@ -327,10 +325,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
327 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 325 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
328 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 326 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
329 | 327 | ||
330 | #define PTE_FILE_MAX_BITS 22 | ||
331 | #define pte_to_pgoff(x) (pte_val(x) >> 10) | ||
332 | #define pgoff_to_pte(x) __pte(((x) << 10) | _PAGE_FILE) | ||
333 | |||
334 | #define kern_addr_valid(addr) (1) | 328 | #define kern_addr_valid(addr) (1) |
335 | 329 | ||
336 | /* | 330 | /* |