diff options
Diffstat (limited to 'arch/tile/mm/pgtable.c')
-rw-r--r-- | arch/tile/mm/pgtable.c | 38 |
1 files changed, 24 insertions, 14 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 87303693a072..2410aa899b3e 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c | |||
@@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr) | |||
177 | if (!pmd_huge_page(*pmd)) | 177 | if (!pmd_huge_page(*pmd)) |
178 | return; | 178 | return; |
179 | 179 | ||
180 | /* | 180 | spin_lock_irqsave(&init_mm.page_table_lock, flags); |
181 | * Grab the pgd_lock, since we may need it to walk the pgd_list, | ||
182 | * and since we need some kind of lock here to avoid races. | ||
183 | */ | ||
184 | spin_lock_irqsave(&pgd_lock, flags); | ||
185 | if (!pmd_huge_page(*pmd)) { | 181 | if (!pmd_huge_page(*pmd)) { |
186 | /* Lost the race to convert the huge page. */ | 182 | /* Lost the race to convert the huge page. */ |
187 | spin_unlock_irqrestore(&pgd_lock, flags); | 183 | spin_unlock_irqrestore(&init_mm.page_table_lock, flags); |
188 | return; | 184 | return; |
189 | } | 185 | } |
190 | 186 | ||
@@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr) | |||
194 | 190 | ||
195 | #ifdef __PAGETABLE_PMD_FOLDED | 191 | #ifdef __PAGETABLE_PMD_FOLDED |
196 | /* Walk every pgd on the system and update the pmd there. */ | 192 | /* Walk every pgd on the system and update the pmd there. */ |
193 | spin_lock(&pgd_lock); | ||
197 | list_for_each(pos, &pgd_list) { | 194 | list_for_each(pos, &pgd_list) { |
198 | pmd_t *copy_pmd; | 195 | pmd_t *copy_pmd; |
199 | pgd = list_to_pgd(pos) + pgd_index(addr); | 196 | pgd = list_to_pgd(pos) + pgd_index(addr); |
@@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr) | |||
201 | copy_pmd = pmd_offset(pud, addr); | 198 | copy_pmd = pmd_offset(pud, addr); |
202 | __set_pmd(copy_pmd, *pmd); | 199 | __set_pmd(copy_pmd, *pmd); |
203 | } | 200 | } |
201 | spin_unlock(&pgd_lock); | ||
204 | #endif | 202 | #endif |
205 | 203 | ||
206 | /* Tell every cpu to notice the change. */ | 204 | /* Tell every cpu to notice the change. */ |
@@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr) | |||
208 | cpu_possible_mask, NULL, 0); | 206 | cpu_possible_mask, NULL, 0); |
209 | 207 | ||
210 | /* Hold the lock until the TLB flush is finished to avoid races. */ | 208 | /* Hold the lock until the TLB flush is finished to avoid races. */ |
211 | spin_unlock_irqrestore(&pgd_lock, flags); | 209 | spin_unlock_irqrestore(&init_mm.page_table_lock, flags); |
212 | } | 210 | } |
213 | 211 | ||
214 | /* | 212 | /* |
@@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr) | |||
217 | * against pageattr.c; it is the unique case in which a valid change | 215 | * against pageattr.c; it is the unique case in which a valid change |
218 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | 216 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
219 | * vmalloc faults work because attached pagetables are never freed. | 217 | * vmalloc faults work because attached pagetables are never freed. |
220 | * The locking scheme was chosen on the basis of manfred's | 218 | * |
221 | * recommendations and having no core impact whatsoever. | 219 | * The lock is always taken with interrupts disabled, unlike on x86 |
222 | * -- wli | 220 | * and other platforms, because we need to take the lock in |
221 | * shatter_huge_page(), which may be called from an interrupt context. | ||
222 | * We are not at risk from the tlbflush IPI deadlock that was seen on | ||
223 | * x86, since we use the flush_remote() API to have the hypervisor do | ||
224 | * the TLB flushes regardless of irq disabling. | ||
223 | */ | 225 | */ |
224 | DEFINE_SPINLOCK(pgd_lock); | 226 | DEFINE_SPINLOCK(pgd_lock); |
225 | LIST_HEAD(pgd_list); | 227 | LIST_HEAD(pgd_list); |
@@ -469,10 +471,18 @@ void __set_pte(pte_t *ptep, pte_t pte) | |||
469 | 471 | ||
470 | void set_pte(pte_t *ptep, pte_t pte) | 472 | void set_pte(pte_t *ptep, pte_t pte) |
471 | { | 473 | { |
472 | struct page *page = pfn_to_page(pte_pfn(pte)); | 474 | if (pte_present(pte) && |
473 | 475 | (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) { | |
474 | /* Update the home of a PTE if necessary */ | 476 | /* The PTE actually references physical memory. */ |
475 | pte = pte_set_home(pte, page_home(page)); | 477 | unsigned long pfn = pte_pfn(pte); |
478 | if (pfn_valid(pfn)) { | ||
479 | /* Update the home of the PTE from the struct page. */ | ||
480 | pte = pte_set_home(pte, page_home(pfn_to_page(pfn))); | ||
481 | } else if (hv_pte_get_mode(pte) == 0) { | ||
482 | /* remap_pfn_range(), etc, must supply PTE mode. */ | ||
483 | panic("set_pte(): out-of-range PFN and mode 0\n"); | ||
484 | } | ||
485 | } | ||
476 | 486 | ||
477 | __set_pte(ptep, pte); | 487 | __set_pte(ptep, pte); |
478 | } | 488 | } |