aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/swap_state.c3
5 files changed, 11 insertions, 13 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index ff28c8b31f58..7dca30a26c53 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -8,7 +8,7 @@
8 * - update the page tables 8 * - update the page tables
9 * - inform the TLB about the new one 9 * - inform the TLB about the new one
10 * 10 *
11 * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock. 11 * We hold the mm semaphore for reading, and the pte lock.
12 * 12 *
13 * Note: the old pte is known to not be writable, so we don't need to 13 * Note: the old pte is known to not be writable, so we don't need to
14 * worry about dirty bits etc getting lost. 14 * worry about dirty bits etc getting lost.
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 38e60a099399..7af8cb836e78 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -47,8 +47,7 @@ struct vm_area_struct;
47 * Locking policy for interlave: 47 * Locking policy for interlave:
48 * In process context there is no locking because only the process accesses 48 * In process context there is no locking because only the process accesses
49 * its own state. All vma manipulation is somewhat protected by a down_read on 49 * its own state. All vma manipulation is somewhat protected by a down_read on
50 * mmap_sem. For allocating in the interleave policy the page_table_lock 50 * mmap_sem.
51 * must be also aquired to protect il_next.
52 * 51 *
53 * Freeing policy: 52 * Freeing policy:
54 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. 53 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
diff --git a/mm/filemap.c b/mm/filemap.c
index f560b41c8f61..036599d1177e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -66,7 +66,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
66 * 66 *
67 * ->mmap_sem 67 * ->mmap_sem
68 * ->i_mmap_lock 68 * ->i_mmap_lock
69 * ->page_table_lock (various places, mainly in mmap.c) 69 * ->page_table_lock or pte_lock (various, mainly in memory.c)
70 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 70 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
71 * 71 *
72 * ->mmap_sem 72 * ->mmap_sem
@@ -86,9 +86,9 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
86 * ->anon_vma.lock (vma_adjust) 86 * ->anon_vma.lock (vma_adjust)
87 * 87 *
88 * ->anon_vma.lock 88 * ->anon_vma.lock
89 * ->page_table_lock (anon_vma_prepare and various) 89 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
90 * 90 *
91 * ->page_table_lock 91 * ->page_table_lock or pte_lock
92 * ->swap_lock (try_to_unmap_one) 92 * ->swap_lock (try_to_unmap_one)
93 * ->private_lock (try_to_unmap_one) 93 * ->private_lock (try_to_unmap_one)
94 * ->tree_lock (try_to_unmap_one) 94 * ->tree_lock (try_to_unmap_one)
diff --git a/mm/rmap.c b/mm/rmap.c
index a7427bbf57e4..914d04b98bee 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -32,7 +32,7 @@
32 * page->flags PG_locked (lock_page) 32 * page->flags PG_locked (lock_page)
33 * mapping->i_mmap_lock 33 * mapping->i_mmap_lock
34 * anon_vma->lock 34 * anon_vma->lock
35 * mm->page_table_lock 35 * mm->page_table_lock or pte_lock
36 * zone->lru_lock (in mark_page_accessed) 36 * zone->lru_lock (in mark_page_accessed)
37 * swap_lock (in swap_duplicate, swap_info_get) 37 * swap_lock (in swap_duplicate, swap_info_get)
38 * mmlist_lock (in mmput, drain_mmlist and others) 38 * mmlist_lock (in mmput, drain_mmlist and others)
@@ -244,7 +244,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
244/* 244/*
245 * Check that @page is mapped at @address into @mm. 245 * Check that @page is mapped at @address into @mm.
246 * 246 *
247 * On success returns with mapped pte and locked mm->page_table_lock. 247 * On success returns with pte mapped and locked.
248 */ 248 */
249pte_t *page_check_address(struct page *page, struct mm_struct *mm, 249pte_t *page_check_address(struct page *page, struct mm_struct *mm,
250 unsigned long address, spinlock_t **ptlp) 250 unsigned long address, spinlock_t **ptlp)
@@ -445,7 +445,7 @@ int page_referenced(struct page *page, int is_locked, int ignore_token)
445 * @vma: the vm area in which the mapping is added 445 * @vma: the vm area in which the mapping is added
446 * @address: the user virtual address mapped 446 * @address: the user virtual address mapped
447 * 447 *
448 * The caller needs to hold the mm->page_table_lock. 448 * The caller needs to hold the pte lock.
449 */ 449 */
450void page_add_anon_rmap(struct page *page, 450void page_add_anon_rmap(struct page *page,
451 struct vm_area_struct *vma, unsigned long address) 451 struct vm_area_struct *vma, unsigned long address)
@@ -468,7 +468,7 @@ void page_add_anon_rmap(struct page *page,
468 * page_add_file_rmap - add pte mapping to a file page 468 * page_add_file_rmap - add pte mapping to a file page
469 * @page: the page to add the mapping to 469 * @page: the page to add the mapping to
470 * 470 *
471 * The caller needs to hold the mm->page_table_lock. 471 * The caller needs to hold the pte lock.
472 */ 472 */
473void page_add_file_rmap(struct page *page) 473void page_add_file_rmap(struct page *page)
474{ 474{
@@ -483,7 +483,7 @@ void page_add_file_rmap(struct page *page)
483 * page_remove_rmap - take down pte mapping from a page 483 * page_remove_rmap - take down pte mapping from a page
484 * @page: page to remove mapping from 484 * @page: page to remove mapping from
485 * 485 *
486 * Caller needs to hold the mm->page_table_lock. 486 * The caller needs to hold the pte lock.
487 */ 487 */
488void page_remove_rmap(struct page *page) 488void page_remove_rmap(struct page *page)
489{ 489{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index cafc1edcbeba..dfd9a46755b8 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -259,8 +259,7 @@ static inline void free_swap_cache(struct page *page)
259 259
260/* 260/*
261 * Perform a free_page(), also freeing any swap cache associated with 261 * Perform a free_page(), also freeing any swap cache associated with
262 * this page if it is the last user of the page. Can not do a lock_page, 262 * this page if it is the last user of the page.
263 * as we are holding the page_table_lock spinlock.
264 */ 263 */
265void free_page_and_swap_cache(struct page *page) 264void free_page_and_swap_cache(struct page *page)
266{ 265{