aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index f5ad996a4a8f..1a8bf76bfd03 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -80,7 +80,7 @@ static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
80 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 80 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
81} 81}
82 82
83void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 83static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
84{ 84{
85 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 85 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
86} 86}
@@ -314,7 +314,7 @@ void __init anon_vma_init(void)
314 * Getting a lock on a stable anon_vma from a page off the LRU is 314 * Getting a lock on a stable anon_vma from a page off the LRU is
315 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 315 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
316 */ 316 */
317struct anon_vma *page_lock_anon_vma(struct page *page) 317struct anon_vma *__page_lock_anon_vma(struct page *page)
318{ 318{
319 struct anon_vma *anon_vma, *root_anon_vma; 319 struct anon_vma *anon_vma, *root_anon_vma;
320 unsigned long anon_mapping; 320 unsigned long anon_mapping;
@@ -348,6 +348,8 @@ out:
348} 348}
349 349
350void page_unlock_anon_vma(struct anon_vma *anon_vma) 350void page_unlock_anon_vma(struct anon_vma *anon_vma)
351 __releases(&anon_vma->root->lock)
352 __releases(RCU)
351{ 353{
352 anon_vma_unlock(anon_vma); 354 anon_vma_unlock(anon_vma);
353 rcu_read_unlock(); 355 rcu_read_unlock();
@@ -407,7 +409,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
407 * 409 *
408 * On success returns with pte mapped and locked. 410 * On success returns with pte mapped and locked.
409 */ 411 */
410pte_t *page_check_address(struct page *page, struct mm_struct *mm, 412pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
411 unsigned long address, spinlock_t **ptlp, int sync) 413 unsigned long address, spinlock_t **ptlp, int sync)
412{ 414{
413 pgd_t *pgd; 415 pgd_t *pgd;