diff options
| author | Nick Piggin <npiggin@suse.de> | 2008-07-30 01:23:13 -0400 |
|---|---|---|
| committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-30 01:26:54 -0400 |
| commit | ce0ad7f0952581ba75ab6aee55bb1ed9bb22cf4f (patch) | |
| tree | bf2a8845a031cb685219db2ddcb3d296b4a9ffab /include | |
| parent | 7d2a175b9bf6e9422bebe95130a3c79a25ff4602 (diff) | |
powerpc/mm: Lockless get_user_pages_fast() for 64-bit (v3)
Implement lockless get_user_pages_fast for 64-bit powerpc.
Page table existence is guaranteed with RCU, and speculative page references
are used to take a reference to the pages without having a prior existence
guarantee on them.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-powerpc/pgtable-ppc64.h | 2 | ||||
| -rw-r--r-- | include/linux/pagemap.h | 23 |
2 files changed, 25 insertions, 0 deletions
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index 5fc78c0be302..74c6f380b805 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h | |||
| @@ -461,6 +461,8 @@ void pgtable_cache_init(void); | |||
| 461 | return pt; | 461 | return pt; |
| 462 | } | 462 | } |
| 463 | 463 | ||
| 464 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long address); | ||
| 465 | |||
| 464 | #endif /* __ASSEMBLY__ */ | 466 | #endif /* __ASSEMBLY__ */ |
| 465 | 467 | ||
| 466 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ | 468 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a39b38ccdc97..69ed3cb1197a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -143,6 +143,29 @@ static inline int page_cache_get_speculative(struct page *page) | |||
| 143 | return 1; | 143 | return 1; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | /* | ||
| 147 | * Same as above, but add instead of inc (could just be merged) | ||
| 148 | */ | ||
| 149 | static inline int page_cache_add_speculative(struct page *page, int count) | ||
| 150 | { | ||
| 151 | VM_BUG_ON(in_interrupt()); | ||
| 152 | |||
| 153 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | ||
| 154 | # ifdef CONFIG_PREEMPT | ||
| 155 | VM_BUG_ON(!in_atomic()); | ||
| 156 | # endif | ||
| 157 | VM_BUG_ON(page_count(page) == 0); | ||
| 158 | atomic_add(count, &page->_count); | ||
| 159 | |||
| 160 | #else | ||
| 161 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | ||
| 162 | return 0; | ||
| 163 | #endif | ||
| 164 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); | ||
| 165 | |||
| 166 | return 1; | ||
| 167 | } | ||
| 168 | |||
| 146 | static inline int page_freeze_refs(struct page *page, int count) | 169 | static inline int page_freeze_refs(struct page *page, int count) |
| 147 | { | 170 | { |
| 148 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | 171 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); |
