aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/lib/clear_page.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/lib/clear_page.S')
-rw-r--r--arch/sparc64/lib/clear_page.S105
1 files changed, 105 insertions, 0 deletions
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
new file mode 100644
index 000000000000..b59884ef051d
--- /dev/null
+++ b/arch/sparc64/lib/clear_page.S
@@ -0,0 +1,105 @@
1/* clear_page.S: UltraSparc optimized clear page.
2 *
3 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <asm/visasm.h>
8#include <asm/thread_info.h>
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/spitfire.h>
12
13 /* What we used to do was lock a TLB entry into a specific
14 * TLB slot, clear the page with interrupts disabled, then
15 * restore the original TLB entry. This was great for
16 * disturbing the TLB as little as possible, but it meant
17 * we had to keep interrupts disabled for a long time.
18 *
19 * Now, we simply use the normal TLB loading mechanism,
20 * and this makes the cpu choose a slot all by itself.
21 * Then we do a normal TLB flush on exit. We need only
22 * disable preemption during the clear.
23 */
24
25#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
26#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
27
28 .text
29
30 .globl _clear_page
31_clear_page: /* %o0=dest */
32 ba,pt %xcc, clear_page_common
33 clr %o4
34
35 /* This thing is pretty important, it shows up
36 * on the profiles via do_anonymous_page().
37 */
38 .align 32
39 .globl clear_user_page
40clear_user_page: /* %o0=dest, %o1=vaddr */
41 lduw [%g6 + TI_PRE_COUNT], %o2
42 sethi %uhi(PAGE_OFFSET), %g2
43 sethi %hi(PAGE_SIZE), %o4
44
45 sllx %g2, 32, %g2
46 sethi %uhi(TTE_BITS_TOP), %g3
47
48 sllx %g3, 32, %g3
49 sub %o0, %g2, %g1 ! paddr
50
51 or %g3, TTE_BITS_BOTTOM, %g3
52 and %o1, %o4, %o0 ! vaddr D-cache alias bit
53
54 or %g1, %g3, %g1 ! TTE data
55 sethi %hi(TLBTEMP_BASE), %o3
56
57 add %o2, 1, %o4
58 add %o0, %o3, %o0 ! TTE vaddr
59
60 /* Disable preemption. */
61 mov TLB_TAG_ACCESS, %g3
62 stw %o4, [%g6 + TI_PRE_COUNT]
63
64 /* Load TLB entry. */
65 rdpr %pstate, %o4
66 wrpr %o4, PSTATE_IE, %pstate
67 stxa %o0, [%g3] ASI_DMMU
68 stxa %g1, [%g0] ASI_DTLB_DATA_IN
69 flush %g6
70 wrpr %o4, 0x0, %pstate
71
72 mov 1, %o4
73
74clear_page_common:
75 VISEntryHalf
76 membar #StoreLoad | #StoreStore | #LoadStore
77 fzero %f0
78 sethi %hi(PAGE_SIZE/64), %o1
79 mov %o0, %g1 ! remember vaddr for tlbflush
80 fzero %f2
81 or %o1, %lo(PAGE_SIZE/64), %o1
82 faddd %f0, %f2, %f4
83 fmuld %f0, %f2, %f6
84 faddd %f0, %f2, %f8
85 fmuld %f0, %f2, %f10
86
87 faddd %f0, %f2, %f12
88 fmuld %f0, %f2, %f14
891: stda %f0, [%o0 + %g0] ASI_BLK_P
90 subcc %o1, 1, %o1
91 bne,pt %icc, 1b
92 add %o0, 0x40, %o0
93 membar #Sync
94 VISExitHalf
95
96 brz,pn %o4, out
97 nop
98
99 stxa %g0, [%g1] ASI_DMMU_DEMAP
100 membar #Sync
101 stw %o2, [%g6 + TI_PRE_COUNT]
102
103out: retl
104 nop
105