diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-01 18:55:21 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:11:32 -0500 |
commit | 517af33237ecfc3c8a93b335365fa61e741ceca4 (patch) | |
tree | 58eff40eb4c517c4fd49fd347d38273ee1e1ee4b /arch/sparc64/kernel/ktlb.S | |
parent | b0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff) |
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB.
The trick is that every TSB load/store is registered into
a special instruction patch section. The default uses
virtual addresses, and the patch instructions use physical
address load/stores.
We can't do this on all chips because only cheetah+ and later
have the physical variant of the atomic quad load.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/ktlb.S')
-rw-r--r-- | arch/sparc64/kernel/ktlb.S | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 2b5e71b68882..9b415ab6db6b 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S | |||
@@ -44,14 +44,14 @@ kvmap_itlb_tsb_miss: | |||
44 | kvmap_itlb_vmalloc_addr: | 44 | kvmap_itlb_vmalloc_addr: |
45 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) | 45 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) |
46 | 46 | ||
47 | TSB_LOCK_TAG(%g1, %g2, %g4) | 47 | KTSB_LOCK_TAG(%g1, %g2, %g4) |
48 | 48 | ||
49 | /* Load and check PTE. */ | 49 | /* Load and check PTE. */ |
50 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | 50 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
51 | brgez,a,pn %g5, kvmap_itlb_longpath | 51 | brgez,a,pn %g5, kvmap_itlb_longpath |
52 | stx %g0, [%g1] | 52 | KTSB_STORE(%g1, %g0) |
53 | 53 | ||
54 | TSB_WRITE(%g1, %g5, %g6) | 54 | KTSB_WRITE(%g1, %g5, %g6) |
55 | 55 | ||
56 | /* fallthrough to TLB load */ | 56 | /* fallthrough to TLB load */ |
57 | 57 | ||
@@ -69,9 +69,9 @@ kvmap_itlb_longpath: | |||
69 | kvmap_itlb_obp: | 69 | kvmap_itlb_obp: |
70 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) | 70 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) |
71 | 71 | ||
72 | TSB_LOCK_TAG(%g1, %g2, %g4) | 72 | KTSB_LOCK_TAG(%g1, %g2, %g4) |
73 | 73 | ||
74 | TSB_WRITE(%g1, %g5, %g6) | 74 | KTSB_WRITE(%g1, %g5, %g6) |
75 | 75 | ||
76 | ba,pt %xcc, kvmap_itlb_load | 76 | ba,pt %xcc, kvmap_itlb_load |
77 | nop | 77 | nop |
@@ -79,9 +79,9 @@ kvmap_itlb_obp: | |||
79 | kvmap_dtlb_obp: | 79 | kvmap_dtlb_obp: |
80 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) | 80 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) |
81 | 81 | ||
82 | TSB_LOCK_TAG(%g1, %g2, %g4) | 82 | KTSB_LOCK_TAG(%g1, %g2, %g4) |
83 | 83 | ||
84 | TSB_WRITE(%g1, %g5, %g6) | 84 | KTSB_WRITE(%g1, %g5, %g6) |
85 | 85 | ||
86 | ba,pt %xcc, kvmap_dtlb_load | 86 | ba,pt %xcc, kvmap_dtlb_load |
87 | nop | 87 | nop |
@@ -114,14 +114,14 @@ kvmap_linear_patch: | |||
114 | kvmap_dtlb_vmalloc_addr: | 114 | kvmap_dtlb_vmalloc_addr: |
115 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) | 115 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) |
116 | 116 | ||
117 | TSB_LOCK_TAG(%g1, %g2, %g4) | 117 | KTSB_LOCK_TAG(%g1, %g2, %g4) |
118 | 118 | ||
119 | /* Load and check PTE. */ | 119 | /* Load and check PTE. */ |
120 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | 120 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
121 | brgez,a,pn %g5, kvmap_dtlb_longpath | 121 | brgez,a,pn %g5, kvmap_dtlb_longpath |
122 | stx %g0, [%g1] | 122 | KTSB_STORE(%g1, %g0) |
123 | 123 | ||
124 | TSB_WRITE(%g1, %g5, %g6) | 124 | KTSB_WRITE(%g1, %g5, %g6) |
125 | 125 | ||
126 | /* fallthrough to TLB load */ | 126 | /* fallthrough to TLB load */ |
127 | 127 | ||