aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2019-04-27 14:15:38 -0400
committerHelge Deller <deller@gmx.de>2019-05-03 17:47:41 -0400
commit11c03dc85f02901ce9b6a1ced099fdcaeb188572 (patch)
treed6c8d57c3a8ad26550d6fae3898ae4e67bb9518f
parentb37d1c1898b288c69f3dc9267bc2c41af06f4a4b (diff)
parisc: Update huge TLB page support to use per-pagetable spinlock
This patch updates the parisc huge TLB page support to use per-pagetable spinlocks. This patch requires Mikulas' per-pagetable spinlock patch and the revised TLB serialization patch from Helge and myself. With Mikulas' patch, we need to use the per-pagetable spinlock for page table updates. The TLB lock is only used to serialize TLB flushes on machines with the Merced bus. Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/mm/hugetlbpage.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index d77479ae3af2..d578809e55cf 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -139,9 +139,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
139{ 139{
140 unsigned long flags; 140 unsigned long flags;
141 141
142 purge_tlb_start(flags); 142 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
143 __set_huge_pte_at(mm, addr, ptep, entry); 143 __set_huge_pte_at(mm, addr, ptep, entry);
144 purge_tlb_end(flags); 144 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
145} 145}
146 146
147 147
@@ -151,10 +151,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
151 unsigned long flags; 151 unsigned long flags;
152 pte_t entry; 152 pte_t entry;
153 153
154 purge_tlb_start(flags); 154 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
155 entry = *ptep; 155 entry = *ptep;
156 __set_huge_pte_at(mm, addr, ptep, __pte(0)); 156 __set_huge_pte_at(mm, addr, ptep, __pte(0));
157 purge_tlb_end(flags); 157 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
158 158
159 return entry; 159 return entry;
160} 160}
@@ -166,10 +166,10 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
166 unsigned long flags; 166 unsigned long flags;
167 pte_t old_pte; 167 pte_t old_pte;
168 168
169 purge_tlb_start(flags); 169 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
170 old_pte = *ptep; 170 old_pte = *ptep;
171 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 171 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
172 purge_tlb_end(flags); 172 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
173} 173}
174 174
175int huge_ptep_set_access_flags(struct vm_area_struct *vma, 175int huge_ptep_set_access_flags(struct vm_area_struct *vma,
@@ -178,13 +178,14 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
178{ 178{
179 unsigned long flags; 179 unsigned long flags;
180 int changed; 180 int changed;
181 struct mm_struct *mm = vma->vm_mm;
181 182
182 purge_tlb_start(flags); 183 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
183 changed = !pte_same(*ptep, pte); 184 changed = !pte_same(*ptep, pte);
184 if (changed) { 185 if (changed) {
185 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 186 __set_huge_pte_at(mm, addr, ptep, pte);
186 } 187 }
187 purge_tlb_end(flags); 188 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
188 return changed; 189 return changed;
189} 190}
190 191