diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2017-03-16 11:26:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-18 04:48:02 -0400 |
commit | 0005d20b2ff1e501e186b3b3bc587085ac305fdc (patch) | |
tree | b0f05c1dc037f034e533ead0a56eeeb9dd5f0b23 /mm/gup.c | |
parent | e7884f8ead4a301b04687a3238527b06feef8ea0 (diff) |
mm/gup: Move page table entry dereference into helper function
This is a preparation patch for the transition of x86 to the generic GUP_fast()
implementation.
On x86 PAE, page table entry is larger than sizeof(long) and we would
need to provide a helper that can read the entry atomically.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Aneesh Kumar K . V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dann Frazier <dann.frazier@canonical.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170316152655.37789-4-kirill.shutemov@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r-- | mm/gup.c | 20 |
1 files changed, 12 insertions, 8 deletions
@@ -1189,6 +1189,17 @@ struct page *get_dump_page(unsigned long addr) | |||
1189 | */ | 1189 | */ |
1190 | #ifdef CONFIG_HAVE_GENERIC_RCU_GUP | 1190 | #ifdef CONFIG_HAVE_GENERIC_RCU_GUP |
1191 | 1191 | ||
1192 | #ifndef gup_get_pte | ||
1193 | /* | ||
1194 | * We assume that the PTE can be read atomically. If this is not the case for | ||
1195 | * your architecture, please provide the helper. | ||
1196 | */ | ||
1197 | static inline pte_t gup_get_pte(pte_t *ptep) | ||
1198 | { | ||
1199 | return READ_ONCE(*ptep); | ||
1200 | } | ||
1201 | #endif | ||
1202 | |||
1192 | #ifdef __HAVE_ARCH_PTE_SPECIAL | 1203 | #ifdef __HAVE_ARCH_PTE_SPECIAL |
1193 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | 1204 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, |
1194 | int write, struct page **pages, int *nr) | 1205 | int write, struct page **pages, int *nr) |
@@ -1198,14 +1209,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |||
1198 | 1209 | ||
1199 | ptem = ptep = pte_offset_map(&pmd, addr); | 1210 | ptem = ptep = pte_offset_map(&pmd, addr); |
1200 | do { | 1211 | do { |
1201 | /* | 1212 | pte_t pte = gup_get_pte(ptep); |
1202 | * In the line below we are assuming that the pte can be read | ||
1203 | * atomically. If this is not the case for your architecture, | ||
1204 | * please wrap this in a helper function! | ||
1205 | * | ||
1206 | * for an example see gup_get_pte in arch/x86/mm/gup.c | ||
1207 | */ | ||
1208 | pte_t pte = READ_ONCE(*ptep); | ||
1209 | struct page *head, *page; | 1213 | struct page *head, *page; |
1210 | 1214 | ||
1211 | /* | 1215 | /* |