aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-01 18:55:21 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:32 -0500
commit517af33237ecfc3c8a93b335365fa61e741ceca4 (patch)
tree58eff40eb4c517c4fd49fd347d38273ee1e1ee4b /arch/sparc64/kernel
parentb0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff)
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/dtlb_miss.S2
-rw-r--r--arch/sparc64/kernel/itlb_miss.S2
-rw-r--r--arch/sparc64/kernel/ktlb.S20
-rw-r--r--arch/sparc64/kernel/tsb.S35
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S4
5 files changed, 46 insertions, 17 deletions
diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S
index d0f1565cb564..2ef6f6e6e72b 100644
--- a/arch/sparc64/kernel/dtlb_miss.S
+++ b/arch/sparc64/kernel/dtlb_miss.S
@@ -4,7 +4,7 @@
4 srlx %g6, 48, %g5 ! Get context 4 srlx %g6, 48, %g5 ! Get context
5 brz,pn %g5, kvmap_dtlb ! Context 0 processing 5 brz,pn %g5, kvmap_dtlb ! Context 0 processing
6 nop ! Delay slot (fill me) 6 nop ! Delay slot (fill me)
7 ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB entry 7 TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
8 nop ! Push branch to next I$ line 8 nop ! Push branch to next I$ line
9 cmp %g4, %g6 ! Compare TAG 9 cmp %g4, %g6 ! Compare TAG
10 10
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index 6b6c8fee04bd..97facce27aad 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -4,7 +4,7 @@
4 srlx %g6, 48, %g5 ! Get context 4 srlx %g6, 48, %g5 ! Get context
5 brz,pn %g5, kvmap_itlb ! Context 0 processing 5 brz,pn %g5, kvmap_itlb ! Context 0 processing
6 nop ! Delay slot (fill me) 6 nop ! Delay slot (fill me)
7 ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB entry 7 TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
8 cmp %g4, %g6 ! Compare TAG 8 cmp %g4, %g6 ! Compare TAG
9 sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check 9 sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check
10 10
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 2b5e71b68882..9b415ab6db6b 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -44,14 +44,14 @@ kvmap_itlb_tsb_miss:
44kvmap_itlb_vmalloc_addr: 44kvmap_itlb_vmalloc_addr:
45 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) 45 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
46 46
47 TSB_LOCK_TAG(%g1, %g2, %g4) 47 KTSB_LOCK_TAG(%g1, %g2, %g4)
48 48
49 /* Load and check PTE. */ 49 /* Load and check PTE. */
50 ldxa [%g5] ASI_PHYS_USE_EC, %g5 50 ldxa [%g5] ASI_PHYS_USE_EC, %g5
51 brgez,a,pn %g5, kvmap_itlb_longpath 51 brgez,a,pn %g5, kvmap_itlb_longpath
52 stx %g0, [%g1] 52 KTSB_STORE(%g1, %g0)
53 53
54 TSB_WRITE(%g1, %g5, %g6) 54 KTSB_WRITE(%g1, %g5, %g6)
55 55
56 /* fallthrough to TLB load */ 56 /* fallthrough to TLB load */
57 57
@@ -69,9 +69,9 @@ kvmap_itlb_longpath:
69kvmap_itlb_obp: 69kvmap_itlb_obp:
70 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) 70 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
71 71
72 TSB_LOCK_TAG(%g1, %g2, %g4) 72 KTSB_LOCK_TAG(%g1, %g2, %g4)
73 73
74 TSB_WRITE(%g1, %g5, %g6) 74 KTSB_WRITE(%g1, %g5, %g6)
75 75
76 ba,pt %xcc, kvmap_itlb_load 76 ba,pt %xcc, kvmap_itlb_load
77 nop 77 nop
@@ -79,9 +79,9 @@ kvmap_itlb_obp:
79kvmap_dtlb_obp: 79kvmap_dtlb_obp:
80 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) 80 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
81 81
82 TSB_LOCK_TAG(%g1, %g2, %g4) 82 KTSB_LOCK_TAG(%g1, %g2, %g4)
83 83
84 TSB_WRITE(%g1, %g5, %g6) 84 KTSB_WRITE(%g1, %g5, %g6)
85 85
86 ba,pt %xcc, kvmap_dtlb_load 86 ba,pt %xcc, kvmap_dtlb_load
87 nop 87 nop
@@ -114,14 +114,14 @@ kvmap_linear_patch:
114kvmap_dtlb_vmalloc_addr: 114kvmap_dtlb_vmalloc_addr:
115 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) 115 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
116 116
117 TSB_LOCK_TAG(%g1, %g2, %g4) 117 KTSB_LOCK_TAG(%g1, %g2, %g4)
118 118
119 /* Load and check PTE. */ 119 /* Load and check PTE. */
120 ldxa [%g5] ASI_PHYS_USE_EC, %g5 120 ldxa [%g5] ASI_PHYS_USE_EC, %g5
121 brgez,a,pn %g5, kvmap_dtlb_longpath 121 brgez,a,pn %g5, kvmap_dtlb_longpath
122 stx %g0, [%g1] 122 KTSB_STORE(%g1, %g0)
123 123
124 TSB_WRITE(%g1, %g5, %g6) 124 KTSB_WRITE(%g1, %g5, %g6)
125 125
126 /* fallthrough to TLB load */ 126 /* fallthrough to TLB load */
127 127
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index e1dd37f5e535..ff6a79beb98d 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -53,7 +53,7 @@ tsb_reload:
53 /* Load and check PTE. */ 53 /* Load and check PTE. */
54 ldxa [%g5] ASI_PHYS_USE_EC, %g5 54 ldxa [%g5] ASI_PHYS_USE_EC, %g5
55 brgez,a,pn %g5, tsb_do_fault 55 brgez,a,pn %g5, tsb_do_fault
56 stx %g0, [%g1] 56 TSB_STORE(%g1, %g0)
57 57
58 /* If it is larger than the base page size, don't 58 /* If it is larger than the base page size, don't
59 * bother putting it into the TSB. 59 * bother putting it into the TSB.
@@ -64,7 +64,7 @@ tsb_reload:
64 and %g2, %g4, %g2 64 and %g2, %g4, %g2
65 cmp %g2, %g7 65 cmp %g2, %g7
66 bne,a,pn %xcc, tsb_tlb_reload 66 bne,a,pn %xcc, tsb_tlb_reload
67 stx %g0, [%g1] 67 TSB_STORE(%g1, %g0)
68 68
69 TSB_WRITE(%g1, %g5, %g6) 69 TSB_WRITE(%g1, %g5, %g6)
70 70
@@ -131,13 +131,13 @@ winfix_trampoline:
131 131
132 /* Insert an entry into the TSB. 132 /* Insert an entry into the TSB.
133 * 133 *
134 * %o0: TSB entry pointer 134 * %o0: TSB entry pointer (virt or phys address)
135 * %o1: tag 135 * %o1: tag
136 * %o2: pte 136 * %o2: pte
137 */ 137 */
138 .align 32 138 .align 32
139 .globl tsb_insert 139 .globl __tsb_insert
140tsb_insert: 140__tsb_insert:
141 rdpr %pstate, %o5 141 rdpr %pstate, %o5
142 wrpr %o5, PSTATE_IE, %pstate 142 wrpr %o5, PSTATE_IE, %pstate
143 TSB_LOCK_TAG(%o0, %g2, %g3) 143 TSB_LOCK_TAG(%o0, %g2, %g3)
@@ -146,6 +146,31 @@ tsb_insert:
146 retl 146 retl
147 nop 147 nop
148 148
149 /* Flush the given TSB entry if it has the matching
150 * tag.
151 *
152 * %o0: TSB entry pointer (virt or phys address)
153 * %o1: tag
154 */
155 .align 32
156 .globl tsb_flush
157tsb_flush:
158 sethi %hi(TSB_TAG_LOCK_HIGH), %g2
1591: TSB_LOAD_TAG(%o0, %g1)
160 srlx %g1, 32, %o3
161 andcc %o3, %g2, %g0
162 bne,pn %icc, 1b
163 membar #LoadLoad
164 cmp %g1, %o1
165 bne,pt %xcc, 2f
166 clr %o3
167 TSB_CAS_TAG(%o0, %g1, %o3)
168 cmp %g1, %o3
169 bne,pn %xcc, 1b
170 nop
1712: retl
172 TSB_MEMBAR
173
149 /* Reload MMU related context switch state at 174 /* Reload MMU related context switch state at
150 * schedule() time. 175 * schedule() time.
151 * 176 *
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 467d13a0d5c1..71b943f1c9b1 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -70,6 +70,10 @@ SECTIONS
70 .con_initcall.init : { *(.con_initcall.init) } 70 .con_initcall.init : { *(.con_initcall.init) }
71 __con_initcall_end = .; 71 __con_initcall_end = .;
72 SECURITY_INIT 72 SECURITY_INIT
73 . = ALIGN(4);
74 __tsb_phys_patch = .;
75 .tsb_phys_patch : { *(.tsb_phys_patch) }
76 __tsb_phys_patch_end = .;
73 . = ALIGN(8192); 77 . = ALIGN(8192);
74 __initramfs_start = .; 78 __initramfs_start = .;
75 .init.ramfs : { *(.init.ramfs) } 79 .init.ramfs : { *(.init.ramfs) }