aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include')
-rw-r--r--arch/parisc/include/asm/cacheflush.h5
-rw-r--r--arch/parisc/include/asm/pgtable.h47
-rw-r--r--arch/parisc/include/asm/uaccess.h14
3 files changed, 33 insertions, 33 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 79f694f3ad9b..f0e2784e7cca 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -140,7 +140,10 @@ static inline void *kmap(struct page *page)
140 return page_address(page); 140 return page_address(page);
141} 141}
142 142
143#define kunmap(page) kunmap_parisc(page_address(page)) 143static inline void kunmap(struct page *page)
144{
145 kunmap_parisc(page_address(page));
146}
144 147
145static inline void *kmap_atomic(struct page *page) 148static inline void *kmap_atomic(struct page *page)
146{ 149{
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 7df49fad29f9..1e40d7f86be3 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,6 +16,8 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18 18
19extern spinlock_t pa_dbit_lock;
20
19/* 21/*
20 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 22 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
21 * memory. For the return value to be meaningful, ADDR must be >= 23 * memory. For the return value to be meaningful, ADDR must be >=
@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
44 46
45#define set_pte_at(mm, addr, ptep, pteval) \ 47#define set_pte_at(mm, addr, ptep, pteval) \
46 do { \ 48 do { \
49 unsigned long flags; \
50 spin_lock_irqsave(&pa_dbit_lock, flags); \
47 set_pte(ptep, pteval); \ 51 set_pte(ptep, pteval); \
48 purge_tlb_entries(mm, addr); \ 52 purge_tlb_entries(mm, addr); \
53 spin_unlock_irqrestore(&pa_dbit_lock, flags); \
49 } while (0) 54 } while (0)
50 55
51#endif /* !__ASSEMBLY__ */ 56#endif /* !__ASSEMBLY__ */
@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
435 440
436static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 441static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
437{ 442{
438#ifdef CONFIG_SMP 443 pte_t pte;
444 unsigned long flags;
445
439 if (!pte_young(*ptep)) 446 if (!pte_young(*ptep))
440 return 0; 447 return 0;
441 return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); 448
442#else 449 spin_lock_irqsave(&pa_dbit_lock, flags);
443 pte_t pte = *ptep; 450 pte = *ptep;
444 if (!pte_young(pte)) 451 if (!pte_young(pte)) {
452 spin_unlock_irqrestore(&pa_dbit_lock, flags);
445 return 0; 453 return 0;
446 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); 454 }
455 set_pte(ptep, pte_mkold(pte));
456 purge_tlb_entries(vma->vm_mm, addr);
457 spin_unlock_irqrestore(&pa_dbit_lock, flags);
447 return 1; 458 return 1;
448#endif
449} 459}
450 460
451extern spinlock_t pa_dbit_lock;
452
453struct mm_struct; 461struct mm_struct;
454static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 462static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
455{ 463{
456 pte_t old_pte; 464 pte_t old_pte;
465 unsigned long flags;
457 466
458 spin_lock(&pa_dbit_lock); 467 spin_lock_irqsave(&pa_dbit_lock, flags);
459 old_pte = *ptep; 468 old_pte = *ptep;
460 pte_clear(mm,addr,ptep); 469 pte_clear(mm,addr,ptep);
461 spin_unlock(&pa_dbit_lock); 470 purge_tlb_entries(mm, addr);
471 spin_unlock_irqrestore(&pa_dbit_lock, flags);
462 472
463 return old_pte; 473 return old_pte;
464} 474}
465 475
466static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 476static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
467{ 477{
468#ifdef CONFIG_SMP 478 unsigned long flags;
469 unsigned long new, old; 479 spin_lock_irqsave(&pa_dbit_lock, flags);
470 480 set_pte(ptep, pte_wrprotect(*ptep));
471 do {
472 old = pte_val(*ptep);
473 new = pte_val(pte_wrprotect(__pte (old)));
474 } while (cmpxchg((unsigned long *) ptep, old, new) != old);
475 purge_tlb_entries(mm, addr); 481 purge_tlb_entries(mm, addr);
476#else 482 spin_unlock_irqrestore(&pa_dbit_lock, flags);
477 pte_t old_pte = *ptep;
478 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
479#endif
480} 483}
481 484
482#define pte_same(A,B) (pte_val(A) == pte_val(B)) 485#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4ba2c93770f1..e0a82358517e 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -181,30 +181,24 @@ struct exception_data {
181#if !defined(CONFIG_64BIT) 181#if !defined(CONFIG_64BIT)
182 182
183#define __put_kernel_asm64(__val,ptr) do { \ 183#define __put_kernel_asm64(__val,ptr) do { \
184 u64 __val64 = (u64)(__val); \
185 u32 hi = (__val64) >> 32; \
186 u32 lo = (__val64) & 0xffffffff; \
187 __asm__ __volatile__ ( \ 184 __asm__ __volatile__ ( \
188 "\n1:\tstw %2,0(%1)" \ 185 "\n1:\tstw %2,0(%1)" \
189 "\n2:\tstw %3,4(%1)\n\t" \ 186 "\n2:\tstw %R2,4(%1)\n\t" \
190 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ 187 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
191 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ 188 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
192 : "=r"(__pu_err) \ 189 : "=r"(__pu_err) \
193 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 190 : "r"(ptr), "r"(__val), "0"(__pu_err) \
194 : "r1"); \ 191 : "r1"); \
195} while (0) 192} while (0)
196 193
197#define __put_user_asm64(__val,ptr) do { \ 194#define __put_user_asm64(__val,ptr) do { \
198 u64 __val64 = (u64)(__val); \
199 u32 hi = (__val64) >> 32; \
200 u32 lo = (__val64) & 0xffffffff; \
201 __asm__ __volatile__ ( \ 195 __asm__ __volatile__ ( \
202 "\n1:\tstw %2,0(%%sr3,%1)" \ 196 "\n1:\tstw %2,0(%%sr3,%1)" \
203 "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ 197 "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
204 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ 198 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
205 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ 199 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
206 : "=r"(__pu_err) \ 200 : "=r"(__pu_err) \
207 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 201 : "r"(ptr), "r"(__val), "0"(__pu_err) \
208 : "r1"); \ 202 : "r1"); \
209} while (0) 203} while (0)
210 204