aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/tile/mm/pgtable.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 6b9a109c3e31..2410aa899b3e 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr)
177 if (!pmd_huge_page(*pmd)) 177 if (!pmd_huge_page(*pmd))
178 return; 178 return;
179 179
180 /* 180 spin_lock_irqsave(&init_mm.page_table_lock, flags);
181 * Grab the pgd_lock, since we may need it to walk the pgd_list,
182 * and since we need some kind of lock here to avoid races.
183 */
184 spin_lock_irqsave(&pgd_lock, flags);
185 if (!pmd_huge_page(*pmd)) { 181 if (!pmd_huge_page(*pmd)) {
186 /* Lost the race to convert the huge page. */ 182 /* Lost the race to convert the huge page. */
187 spin_unlock_irqrestore(&pgd_lock, flags); 183 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
188 return; 184 return;
189 } 185 }
190 186
@@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr)
194 190
195#ifdef __PAGETABLE_PMD_FOLDED 191#ifdef __PAGETABLE_PMD_FOLDED
196 /* Walk every pgd on the system and update the pmd there. */ 192 /* Walk every pgd on the system and update the pmd there. */
193 spin_lock(&pgd_lock);
197 list_for_each(pos, &pgd_list) { 194 list_for_each(pos, &pgd_list) {
198 pmd_t *copy_pmd; 195 pmd_t *copy_pmd;
199 pgd = list_to_pgd(pos) + pgd_index(addr); 196 pgd = list_to_pgd(pos) + pgd_index(addr);
@@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr)
201 copy_pmd = pmd_offset(pud, addr); 198 copy_pmd = pmd_offset(pud, addr);
202 __set_pmd(copy_pmd, *pmd); 199 __set_pmd(copy_pmd, *pmd);
203 } 200 }
201 spin_unlock(&pgd_lock);
204#endif 202#endif
205 203
206 /* Tell every cpu to notice the change. */ 204 /* Tell every cpu to notice the change. */
@@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr)
208 cpu_possible_mask, NULL, 0); 206 cpu_possible_mask, NULL, 0);
209 207
210 /* Hold the lock until the TLB flush is finished to avoid races. */ 208 /* Hold the lock until the TLB flush is finished to avoid races. */
211 spin_unlock_irqrestore(&pgd_lock, flags); 209 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
212} 210}
213 211
214/* 212/*
@@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr)
217 * against pageattr.c; it is the unique case in which a valid change 215 * against pageattr.c; it is the unique case in which a valid change
218 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 216 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
219 * vmalloc faults work because attached pagetables are never freed. 217 * vmalloc faults work because attached pagetables are never freed.
220 * The locking scheme was chosen on the basis of manfred's 218 *
221 * recommendations and having no core impact whatsoever. 219 * The lock is always taken with interrupts disabled, unlike on x86
222 * -- wli 220 * and other platforms, because we need to take the lock in
221 * shatter_huge_page(), which may be called from an interrupt context.
222 * We are not at risk from the tlbflush IPI deadlock that was seen on
223 * x86, since we use the flush_remote() API to have the hypervisor do
224 * the TLB flushes regardless of irq disabling.
223 */ 225 */
224DEFINE_SPINLOCK(pgd_lock); 226DEFINE_SPINLOCK(pgd_lock);
225LIST_HEAD(pgd_list); 227LIST_HEAD(pgd_list);