diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-10 07:58:35 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-10 07:58:35 -0400 |
commit | ab1f9dac6eea25ee59e4c8e1cf0b7476afbbfe07 (patch) | |
tree | 03577652197b5e58c348ede3c474bc8dd47e046c /arch/powerpc/mm/slb_low.S | |
parent | 70d64ceaa1a84d2502405422a4dfd3f87786a347 (diff) |
powerpc: Merge arch/ppc64/mm to arch/powerpc/mm
This moves the remaining files in arch/ppc64/mm to arch/powerpc/mm,
and arranges that we use them when compiling with ARCH=ppc64.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/slb_low.S')
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 151 |
1 files changed, 151 insertions, 0 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S new file mode 100644 index 000000000000..a3a03da503bc --- /dev/null +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * arch/ppc64/mm/slb_low.S | ||
3 | * | ||
4 | * Low-level SLB routines | ||
5 | * | ||
6 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | ||
7 | * | ||
8 | * Based on earlier C version: | ||
9 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | ||
10 | * Copyright (c) 2001 Dave Engebretsen | ||
11 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/mmu.h> | ||
23 | #include <asm/ppc_asm.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | #include <asm/cputable.h> | ||
26 | |||
27 | /* void slb_allocate(unsigned long ea); | ||
28 | * | ||
29 | * Create an SLB entry for the given EA (user or kernel). | ||
30 | * r3 = faulting address, r13 = PACA | ||
31 | * r9, r10, r11 are clobbered by this function | ||
32 | * No other registers are examined or changed. | ||
33 | */ | ||
34 | _GLOBAL(slb_allocate) | ||
35 | /* | ||
36 | * First find a slot, round robin. Previously we tried to find | ||
37 | * a free slot first but that took too long. Unfortunately we | ||
38 | * dont have any LRU information to help us choose a slot. | ||
39 | */ | ||
40 | #ifdef CONFIG_PPC_ISERIES | ||
41 | /* | ||
42 | * On iSeries, the "bolted" stack segment can be cast out on | ||
43 | * shared processor switch so we need to check for a miss on | ||
44 | * it and restore it to the right slot. | ||
45 | */ | ||
46 | ld r9,PACAKSAVE(r13) | ||
47 | clrrdi r9,r9,28 | ||
48 | clrrdi r11,r3,28 | ||
49 | li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */ | ||
50 | cmpld r9,r11 | ||
51 | beq 3f | ||
52 | #endif /* CONFIG_PPC_ISERIES */ | ||
53 | |||
54 | ld r10,PACASTABRR(r13) | ||
55 | addi r10,r10,1 | ||
56 | /* use a cpu feature mask if we ever change our slb size */ | ||
57 | cmpldi r10,SLB_NUM_ENTRIES | ||
58 | |||
59 | blt+ 4f | ||
60 | li r10,SLB_NUM_BOLTED | ||
61 | |||
62 | 4: | ||
63 | std r10,PACASTABRR(r13) | ||
64 | 3: | ||
65 | /* r3 = faulting address, r10 = entry */ | ||
66 | |||
67 | srdi r9,r3,60 /* get region */ | ||
68 | srdi r3,r3,28 /* get esid */ | ||
69 | cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ | ||
70 | |||
71 | rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */ | ||
72 | oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */ | ||
73 | |||
74 | /* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */ | ||
75 | |||
76 | blt cr7,0f /* user or kernel? */ | ||
77 | |||
78 | /* kernel address: proto-VSID = ESID */ | ||
79 | /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but | ||
80 | * this code will generate the protoVSID 0xfffffffff for the | ||
81 | * top segment. That's ok, the scramble below will translate | ||
82 | * it to VSID 0, which is reserved as a bad VSID - one which | ||
83 | * will never have any pages in it. */ | ||
84 | li r11,SLB_VSID_KERNEL | ||
85 | BEGIN_FTR_SECTION | ||
86 | bne cr7,9f | ||
87 | li r11,(SLB_VSID_KERNEL|SLB_VSID_L) | ||
88 | END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) | ||
89 | b 9f | ||
90 | |||
91 | 0: /* user address: proto-VSID = context<<15 | ESID */ | ||
92 | srdi. r9,r3,USER_ESID_BITS | ||
93 | bne- 8f /* invalid ea bits set */ | ||
94 | |||
95 | #ifdef CONFIG_HUGETLB_PAGE | ||
96 | BEGIN_FTR_SECTION | ||
97 | lhz r9,PACAHIGHHTLBAREAS(r13) | ||
98 | srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT) | ||
99 | srd r9,r9,r11 | ||
100 | lhz r11,PACALOWHTLBAREAS(r13) | ||
101 | srd r11,r11,r3 | ||
102 | or r9,r9,r11 | ||
103 | END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) | ||
104 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
105 | |||
106 | li r11,SLB_VSID_USER | ||
107 | |||
108 | #ifdef CONFIG_HUGETLB_PAGE | ||
109 | BEGIN_FTR_SECTION | ||
110 | rldimi r11,r9,8,55 /* shift masked bit into SLB_VSID_L */ | ||
111 | END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) | ||
112 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
113 | |||
114 | ld r9,PACACONTEXTID(r13) | ||
115 | rldimi r3,r9,USER_ESID_BITS,0 | ||
116 | |||
117 | 9: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */ | ||
118 | ASM_VSID_SCRAMBLE(r3,r9) | ||
119 | |||
120 | rldimi r11,r3,SLB_VSID_SHIFT,16 /* combine VSID and flags */ | ||
121 | |||
122 | /* | ||
123 | * No need for an isync before or after this slbmte. The exception | ||
124 | * we enter with and the rfid we exit with are context synchronizing. | ||
125 | */ | ||
126 | slbmte r11,r10 | ||
127 | |||
128 | bgelr cr7 /* we're done for kernel addresses */ | ||
129 | |||
130 | /* Update the slb cache */ | ||
131 | lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ | ||
132 | cmpldi r3,SLB_CACHE_ENTRIES | ||
133 | bge 1f | ||
134 | |||
135 | /* still room in the slb cache */ | ||
136 | sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ | ||
137 | rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ | ||
138 | add r11,r11,r13 /* r11 = (u16 *)paca + offset */ | ||
139 | sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ | ||
140 | addi r3,r3,1 /* offset++ */ | ||
141 | b 2f | ||
142 | 1: /* offset >= SLB_CACHE_ENTRIES */ | ||
143 | li r3,SLB_CACHE_ENTRIES+1 | ||
144 | 2: | ||
145 | sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ | ||
146 | blr | ||
147 | |||
148 | 8: /* invalid EA */ | ||
149 | li r3,0 /* BAD_VSID */ | ||
150 | li r11,SLB_VSID_USER /* flags don't much matter */ | ||
151 | b 9b | ||