aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-11-23 16:37:39 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-23 19:08:38 -0500
commit01edcd891c3e9f4bb992ff2ceb69836bf76f8ddf (patch)
tree3b8ab4808220d1caaf3ba34db16917f71b852a08 /arch/powerpc
parentcc3327e7dfc16a9a3e164075234c869867a59e45 (diff)
[PATCH] mm: powerpc ptlock comments
Update comments (only) on page_table_lock and mmap_sem in arch/powerpc. Removed the comment on page_table_lock from hash_huge_page: since it's no longer taking page_table_lock itself, it's irrelevant whether others are; but how it is safe (even against huge file truncation?) I can't say. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/tlb_32.c6
-rw-r--r--arch/powerpc/mm/tlb_64.c4
4 files changed, 10 insertions, 6 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 426c269e552e..9250f14be8ef 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -754,9 +754,7 @@ repeat:
754 } 754 }
755 755
756 /* 756 /*
757 * No need to use ldarx/stdcx here because all who 757 * No need to use ldarx/stdcx here
758 * might be updating the pte will hold the
759 * page_table_lock
760 */ 758 */
761 *ptep = __pte(new_pte & ~_PAGE_BUSY); 759 *ptep = __pte(new_pte & ~_PAGE_BUSY);
762 760
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 4bd7b0a70996..ed6ed2e30dac 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -495,7 +495,7 @@ EXPORT_SYMBOL(flush_icache_user_range);
495 * We use it to preload an HPTE into the hash table corresponding to 495 * We use it to preload an HPTE into the hash table corresponding to
496 * the updated linux PTE. 496 * the updated linux PTE.
497 * 497 *
498 * This must always be called with the mm->page_table_lock held 498 * This must always be called with the pte lock held.
499 */ 499 */
500void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 500void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
501 pte_t pte) 501 pte_t pte)
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 6c3dc3c44c86..ad580f3742e5 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -149,6 +149,12 @@ void flush_tlb_mm(struct mm_struct *mm)
149 return; 149 return;
150 } 150 }
151 151
152 /*
153 * It is safe to go down the mm's list of vmas when called
154 * from dup_mmap, holding mmap_sem. It would also be safe from
155 * unmap_region or exit_mmap, but not from vmtruncate on SMP -
156 * but it seems dup_mmap is the only SMP case which gets here.
157 */
152 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) 158 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
153 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); 159 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
154 FINISH_FLUSH; 160 FINISH_FLUSH;
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 53e31b834ace..859d29a0cac5 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -95,7 +95,7 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
95 95
96void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) 96void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
97{ 97{
98 /* This is safe as we are holding page_table_lock */ 98 /* This is safe since tlb_gather_mmu has disabled preemption */
99 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); 99 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
100 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); 100 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
101 101
@@ -206,7 +206,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
206 206
207void pte_free_finish(void) 207void pte_free_finish(void)
208{ 208{
209 /* This is safe as we are holding page_table_lock */ 209 /* This is safe since tlb_gather_mmu has disabled preemption */
210 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); 210 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
211 211
212 if (*batchp == NULL) 212 if (*batchp == NULL)