From 9b4ee40ebbbaf3f8c775b023d89ceedda1167d79 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sat, 3 Sep 2005 15:54:57 -0700 Subject: [PATCH] mm: correct _PAGE_FILE comment _PAGE_FILE does not indicate whether a file is in page / swap cache, it is set just for non-linear PTE's. Correct the comment for i386, x86_64, UML. Also clearify _PAGE_NONE. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/pgtable.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/asm-i386/pgtable.h') diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 77c6497f416e..c797286b512f 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -86,9 +86,7 @@ void paging_init(void); #endif /* - * The 4MB page is guessing.. Detailed in the infamous "Chapter H" - * of the Pentium details, but assuming intel did the straightforward - * thing, this bit set in the page directory entry just means that + * _PAGE_PSE set in the page directory entry just means that * the page directory entry points directly to a 4MB-aligned block of * memory. */ @@ -119,8 +117,10 @@ void paging_init(void); #define _PAGE_UNUSED2 0x400 #define _PAGE_UNUSED3 0x800 -#define _PAGE_FILE 0x040 /* set:pagecache unset:swap */ -#define _PAGE_PROTNONE 0x080 /* If not present */ +/* If _PAGE_PRESENT is clear, we use these: */ +#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ +#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; + pte_present gives true */ #ifdef CONFIG_X86_PAE #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) #else -- cgit v1.2.2 From 32e51a8c976fc72c3e9bcece9767d9908816bf8e Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Sat, 3 Sep 2005 15:54:59 -0700 Subject: [PATCH] hugetlb: add pte_huge() macro This patch adds a macro pte_huge(pte) for i386/x86_64 which is needed by a patch later in the series. Instead of repeating (_PAGE_PRESENT | _PAGE_PSE), I've added __LARGE_PTE to i386 to match x86_64. Signed-off-by: Adam Litke Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/pgtable.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/asm-i386/pgtable.h') diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index c797286b512f..f51fd2c956bb 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -215,11 +215,13 @@ extern unsigned long pg0[]; * The following only work if pte_present() is true. * Undefined behaviour if not.. */ +#define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT) static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } +static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; } /* * The following only works if pte_present() is not true. @@ -236,7 +238,7 @@ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PRESENT | _PAGE_PSE; return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; } #ifdef CONFIG_X86_PAE # include -- cgit v1.2.2 From a600388d28419305aad3c4c0af52c223cf6fa0af Mon Sep 17 00:00:00 2001 From: Zachary Amsden Date: Sat, 3 Sep 2005 15:55:04 -0700 Subject: [PATCH] x86: ptep_clear optimization Add a new accessor for PTEs, which passes the full hint from the mmu_gather struct; this allows architectures with hardware pagetables to optimize away atomic PTE operations when destroying an address space. Removing the locked operation should allow better pipelining of memory access in this loop. I measured an average savings of 30-35 cycles per zap_pte_range on the first 500 destructions on Pentium-M, but I believe the optimization would win more on older processors which still assert the bus lock on xchg for an exclusive cacheline. Update: I made some new measurements, and this saves exactly 26 cycles over ptep_get_and_clear on Pentium M. On P4, with a PAE kernel, this saves 180 cycles per ptep_get_and_clear, for a whopping 92160 cycles savings for a full address space destruction. pte_clear_full is not yet used, but is provided for future optimizations (in particular, when running inside of a hypervisor that queues page table updates, the full hint allows us to avoid queueing unnecessary page table update for an address space in the process of being destroyed. This is not a huge win, but it does help a bit, and sets the stage for further hypervisor optimization of the mm layer on all architectures. Signed-off-by: Zachary Amsden Cc: Christoph Lameter Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/pgtable.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/asm-i386/pgtable.h') diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index f51fd2c956bb..d74185aee15b 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -260,6 +260,18 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); } +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +{ + pte_t pte; + if (full) { + pte = *ptep; + *ptep = __pte(0); + } else { + pte = ptep_get_and_clear(mm, addr, ptep); + } + return pte; +} + static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); @@ -417,6 +429,7 @@ extern void noexec_setup(const char *str); #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME #include -- cgit v1.2.2 From d7271b14b2e9e5905aba0fbf5c4dc4f8980c0cb2 Mon Sep 17 00:00:00 2001 From: Zachary Amsden Date: Sat, 3 Sep 2005 15:56:50 -0700 Subject: [PATCH] i386: encapsulate copying of pgd entries Add a clone operation for pgd updates. This helps complete the encapsulation of updates to page tables (or pages about to become page tables) into accessor functions rather than using memcpy() to duplicate them. This is both generally good for consistency and also necessary for running in a hypervisor which requires explicit updates to page table entries. The new function is: clone_pgd_range(pgd_t *dst, pgd_t *src, int count); dst - pointer to pgd range anwhere on a pgd page src - "" count - the number of pgds to copy. dst and src can be on the same page, but the range must not overlap and must not cross a page boundary. Note that I ommitted using this call to copy pgd entries into the software suspend page root, since this is not technically a live paging structure, rather it is used on resume from suspend. CC'ing Pavel in case he has any feedback on this. Thanks to Chris Wright for noticing that this could be more optimal in PAE compiles by eliminating the memset. Signed-off-by: Zachary Amsden Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/pgtable.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include/asm-i386/pgtable.h') diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index d74185aee15b..47bc1ffa3d4c 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -277,6 +277,21 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, clear_bit(_PAGE_BIT_RW, &ptep->pte_low); } +/* + * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); + * + * dst - pointer to pgd range anwhere on a pgd page + * src - "" + * count - the number of pgds to copy. + * + * dst and src can be on the same page, but the range must not overlap, + * and must not cross a page boundary. + */ +static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) +{ + memcpy(dst, src, count * sizeof(pgd_t)); +} + /* * Macro to mark a page protection value as "uncacheable". On processors which do not support * it, this is a no-op. -- cgit v1.2.2