diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-05-05 19:15:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-05 19:36:32 -0400 |
commit | 1f8d419e291f7f7f7f3ffd4f0ba00834621690c8 (patch) | |
tree | 833df93032a38bc749458ce8be3a316eae1d5215 /include/asm-ppc64/mmu_context.h | |
parent | e685752de107201432a055f7c45c396a5b04dc17 (diff) |
[PATCH] ppc64: pgtable.h and other header cleanups
This patch started as simply removing a few never-used macros from
asm-ppc64/pgtable.h, then kind of grew. It now makes a bunch of
cleanups to the ppc64 low-level header files (with corresponding
changes to .c files where necessary) such as:
- Abolishing never-used macros
- Eliminating multiple #defines with the same purpose
- Removing pointless macros (cases where just expanding the
macro everywhere turns out clearer and more sensible)
- Removing some cases where macros which could be defined in
terms of each other weren't
- Moving imalloc() related definitions from pgtable.h to their
own header file (imalloc.h)
- Re-arranging headers to group things more logically
- Moving all VSID allocation related things to mmu.h, instead
of being split between mmu.h and mmu_context.h
- Removing some reserved space for flags from the PMD - we're
not using it.
- Fix some bugs which broke compile with STRICT_MM_TYPECHECKS.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Acked-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ppc64/mmu_context.h')
-rw-r--r-- | include/asm-ppc64/mmu_context.h | 82 |
1 files changed, 0 insertions, 82 deletions
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h index c2e8e0466383..77a743402db4 100644 --- a/include/asm-ppc64/mmu_context.h +++ b/include/asm-ppc64/mmu_context.h | |||
@@ -84,86 +84,4 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
84 | local_irq_restore(flags); | 84 | local_irq_restore(flags); |
85 | } | 85 | } |
86 | 86 | ||
87 | /* VSID allocation | ||
88 | * =============== | ||
89 | * | ||
90 | * We first generate a 36-bit "proto-VSID". For kernel addresses this | ||
91 | * is equal to the ESID, for user addresses it is: | ||
92 | * (context << 15) | (esid & 0x7fff) | ||
93 | * | ||
94 | * The two forms are distinguishable because the top bit is 0 for user | ||
95 | * addresses, whereas the top two bits are 1 for kernel addresses. | ||
96 | * Proto-VSIDs with the top two bits equal to 0b10 are reserved for | ||
97 | * now. | ||
98 | * | ||
99 | * The proto-VSIDs are then scrambled into real VSIDs with the | ||
100 | * multiplicative hash: | ||
101 | * | ||
102 | * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS | ||
103 | * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 | ||
104 | * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF | ||
105 | * | ||
106 | * This scramble is only well defined for proto-VSIDs below | ||
107 | * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are | ||
108 | * reserved. VSID_MULTIPLIER is prime, so in particular it is | ||
109 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. | ||
110 | * Because the modulus is 2^n-1 we can compute it efficiently without | ||
111 | * a divide or extra multiply (see below). | ||
112 | * | ||
113 | * This scheme has several advantages over older methods: | ||
114 | * | ||
115 | * - We have VSIDs allocated for every kernel address | ||
116 | * (i.e. everything above 0xC000000000000000), except the very top | ||
117 | * segment, which simplifies several things. | ||
118 | * | ||
119 | * - We allow for 15 significant bits of ESID and 20 bits of | ||
120 | * context for user addresses. i.e. 8T (43 bits) of address space for | ||
121 | * up to 1M contexts (although the page table structure and context | ||
122 | * allocation will need changes to take advantage of this). | ||
123 | * | ||
124 | * - The scramble function gives robust scattering in the hash | ||
125 | * table (at least based on some initial results). The previous | ||
126 | * method was more susceptible to pathological cases giving excessive | ||
127 | * hash collisions. | ||
128 | */ | ||
129 | |||
130 | /* | ||
131 | * WARNING - If you change these you must make sure the asm | ||
132 | * implementations in slb_allocate(), do_stab_bolted and mmu.h | ||
133 | * (ASM_VSID_SCRAMBLE macro) are changed accordingly. | ||
134 | * | ||
135 | * You'll also need to change the precomputed VSID values in head.S | ||
136 | * which are used by the iSeries firmware. | ||
137 | */ | ||
138 | |||
139 | static inline unsigned long vsid_scramble(unsigned long protovsid) | ||
140 | { | ||
141 | #if 0 | ||
142 | /* The code below is equivalent to this function for arguments | ||
143 | * < 2^VSID_BITS, which is all this should ever be called | ||
144 | * with. However gcc is not clever enough to compute the | ||
145 | * modulus (2^n-1) without a second multiply. */ | ||
146 | return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); | ||
147 | #else /* 1 */ | ||
148 | unsigned long x; | ||
149 | |||
150 | x = protovsid * VSID_MULTIPLIER; | ||
151 | x = (x >> VSID_BITS) + (x & VSID_MODULUS); | ||
152 | return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; | ||
153 | #endif /* 1 */ | ||
154 | } | ||
155 | |||
156 | /* This is only valid for addresses >= KERNELBASE */ | ||
157 | static inline unsigned long get_kernel_vsid(unsigned long ea) | ||
158 | { | ||
159 | return vsid_scramble(ea >> SID_SHIFT); | ||
160 | } | ||
161 | |||
162 | /* This is only valid for user addresses (which are below 2^41) */ | ||
163 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea) | ||
164 | { | ||
165 | return vsid_scramble((context << USER_ESID_BITS) | ||
166 | | (ea >> SID_SHIFT)); | ||
167 | } | ||
168 | |||
169 | #endif /* __PPC64_MMU_CONTEXT_H */ | 87 | #endif /* __PPC64_MMU_CONTEXT_H */ |