aboutsummaryrefslogtreecommitdiffstats
path: root/mm/fremap.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:23 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:40 -0400
commitc74df32c724a1652ad8399b4891bb02c9d43743a (patch)
tree5a79d56fdcf7dc2053a277dbf6db7c3b339e9659 /mm/fremap.c
parent1bb3630e89cb8a7b3d3807629c20c5bad88290ff (diff)
[PATCH] mm: ptd_alloc take ptlock
Second step in pushing down the page_table_lock. Remove the temporary bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not to hold page_table_lock, whether it's on init_mm or a user mm; take page_table_lock internally to check if a racing task already allocated. Convert their callers from common code. But avoid coming back to change them again later: instead of moving the spin_lock(&mm->page_table_lock) down, switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which encapsulate the mapping+locking and unlocking+unmapping together, and in the end may use alternatives to the mm page_table_lock itself. These callers all hold mmap_sem (some exclusively, some not), so at no level can a page table be whipped away from beneath them; and pte_alloc uses the "atomic" pmd_present to test whether it needs to allocate. It appears that on all arches we can safely descend without page_table_lock. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/fremap.c')
-rw-r--r--mm/fremap.c48
1 files changed, 18 insertions, 30 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index 49719a35769a..d862be3bc3e3 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -63,23 +63,20 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
63 pud_t *pud; 63 pud_t *pud;
64 pgd_t *pgd; 64 pgd_t *pgd;
65 pte_t pte_val; 65 pte_t pte_val;
66 spinlock_t *ptl;
66 67
67 BUG_ON(vma->vm_flags & VM_RESERVED); 68 BUG_ON(vma->vm_flags & VM_RESERVED);
68 69
69 pgd = pgd_offset(mm, addr); 70 pgd = pgd_offset(mm, addr);
70 spin_lock(&mm->page_table_lock);
71
72 pud = pud_alloc(mm, pgd, addr); 71 pud = pud_alloc(mm, pgd, addr);
73 if (!pud) 72 if (!pud)
74 goto err_unlock; 73 goto out;
75
76 pmd = pmd_alloc(mm, pud, addr); 74 pmd = pmd_alloc(mm, pud, addr);
77 if (!pmd) 75 if (!pmd)
78 goto err_unlock; 76 goto out;
79 77 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
80 pte = pte_alloc_map(mm, pmd, addr);
81 if (!pte) 78 if (!pte)
82 goto err_unlock; 79 goto out;
83 80
84 /* 81 /*
85 * This page may have been truncated. Tell the 82 * This page may have been truncated. Tell the
@@ -89,10 +86,10 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
89 inode = vma->vm_file->f_mapping->host; 86 inode = vma->vm_file->f_mapping->host;
90 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 87 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
91 if (!page->mapping || page->index >= size) 88 if (!page->mapping || page->index >= size)
92 goto err_unlock; 89 goto unlock;
93 err = -ENOMEM; 90 err = -ENOMEM;
94 if (page_mapcount(page) > INT_MAX/2) 91 if (page_mapcount(page) > INT_MAX/2)
95 goto err_unlock; 92 goto unlock;
96 93
97 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte)) 94 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
98 inc_mm_counter(mm, file_rss); 95 inc_mm_counter(mm, file_rss);
@@ -101,17 +98,15 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
101 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 98 set_pte_at(mm, addr, pte, mk_pte(page, prot));
102 page_add_file_rmap(page); 99 page_add_file_rmap(page);
103 pte_val = *pte; 100 pte_val = *pte;
104 pte_unmap(pte);
105 update_mmu_cache(vma, addr, pte_val); 101 update_mmu_cache(vma, addr, pte_val);
106
107 err = 0; 102 err = 0;
108err_unlock: 103unlock:
109 spin_unlock(&mm->page_table_lock); 104 pte_unmap_unlock(pte, ptl);
105out:
110 return err; 106 return err;
111} 107}
112EXPORT_SYMBOL(install_page); 108EXPORT_SYMBOL(install_page);
113 109
114
115/* 110/*
116 * Install a file pte to a given virtual memory address, release any 111 * Install a file pte to a given virtual memory address, release any
117 * previously existing mapping. 112 * previously existing mapping.
@@ -125,23 +120,20 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
125 pud_t *pud; 120 pud_t *pud;
126 pgd_t *pgd; 121 pgd_t *pgd;
127 pte_t pte_val; 122 pte_t pte_val;
123 spinlock_t *ptl;
128 124
129 BUG_ON(vma->vm_flags & VM_RESERVED); 125 BUG_ON(vma->vm_flags & VM_RESERVED);
130 126
131 pgd = pgd_offset(mm, addr); 127 pgd = pgd_offset(mm, addr);
132 spin_lock(&mm->page_table_lock);
133
134 pud = pud_alloc(mm, pgd, addr); 128 pud = pud_alloc(mm, pgd, addr);
135 if (!pud) 129 if (!pud)
136 goto err_unlock; 130 goto out;
137
138 pmd = pmd_alloc(mm, pud, addr); 131 pmd = pmd_alloc(mm, pud, addr);
139 if (!pmd) 132 if (!pmd)
140 goto err_unlock; 133 goto out;
141 134 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
142 pte = pte_alloc_map(mm, pmd, addr);
143 if (!pte) 135 if (!pte)
144 goto err_unlock; 136 goto out;
145 137
146 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) { 138 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
147 update_hiwater_rss(mm); 139 update_hiwater_rss(mm);
@@ -150,17 +142,13 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
150 142
151 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); 143 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
152 pte_val = *pte; 144 pte_val = *pte;
153 pte_unmap(pte);
154 update_mmu_cache(vma, addr, pte_val); 145 update_mmu_cache(vma, addr, pte_val);
155 spin_unlock(&mm->page_table_lock); 146 pte_unmap_unlock(pte, ptl);
156 return 0; 147 err = 0;
157 148out:
158err_unlock:
159 spin_unlock(&mm->page_table_lock);
160 return err; 149 return err;
161} 150}
162 151
163
164/*** 152/***
165 * sys_remap_file_pages - remap arbitrary pages of a shared backing store 153 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
166 * file within an existing vma. 154 * file within an existing vma.