aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-07-13 04:11:42 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-13 14:25:25 -0400
commit96e2844999f99878fc5b03b81ccaa60580005b81 (patch)
tree353c1bc9a5602d556e6741f4a261010cde45e93b /arch/ppc64/mm
parentf13487c66c75f5db004a0631047309d9e7c5aab7 (diff)
[PATCH] ppc64: kill bitfields in ppc64 hash code
This patch removes the use of bitfield types from the ppc64 hash table manipulation code. Signed-off-by: David Gibson <dwg@au1.ibm.com> Acked-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/hash_low.S8
-rw-r--r--arch/ppc64/mm/hash_native.c129
-rw-r--r--arch/ppc64/mm/hash_utils.c16
-rw-r--r--arch/ppc64/mm/hugetlbpage.c16
-rw-r--r--arch/ppc64/mm/init.c7
5 files changed, 83 insertions, 93 deletions
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index c23d46956dd9..fbff24827ae7 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -170,9 +170,7 @@ htab_insert_pte:
170 /* Call ppc_md.hpte_insert */ 170 /* Call ppc_md.hpte_insert */
171 ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */ 171 ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
172 mr r4,r29 /* Retreive va */ 172 mr r4,r29 /* Retreive va */
173 li r6,0 /* primary slot */ 173 li r6,0 /* no vflags */
174 li r8,0 /* not bolted and not large */
175 li r9,0
176_GLOBAL(htab_call_hpte_insert1) 174_GLOBAL(htab_call_hpte_insert1)
177 bl . /* Will be patched by htab_finish_init() */ 175 bl . /* Will be patched by htab_finish_init() */
178 cmpdi 0,r3,0 176 cmpdi 0,r3,0
@@ -192,9 +190,7 @@ _GLOBAL(htab_call_hpte_insert1)
192 /* Call ppc_md.hpte_insert */ 190 /* Call ppc_md.hpte_insert */
193 ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */ 191 ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
194 mr r4,r29 /* Retreive va */ 192 mr r4,r29 /* Retreive va */
195 li r6,1 /* secondary slot */ 193 li r6,HPTE_V_SECONDARY@l /* secondary slot */
196 li r8,0 /* not bolted and not large */
197 li r9,0
198_GLOBAL(htab_call_hpte_insert2) 194_GLOBAL(htab_call_hpte_insert2)
199 bl . /* Will be patched by htab_finish_init() */ 195 bl . /* Will be patched by htab_finish_init() */
200 cmpdi 0,r3,0 196 cmpdi 0,r3,0
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 4fec05817d66..a6abd3a979bf 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -27,9 +27,9 @@
27 27
28static DEFINE_SPINLOCK(native_tlbie_lock); 28static DEFINE_SPINLOCK(native_tlbie_lock);
29 29
30static inline void native_lock_hpte(HPTE *hptep) 30static inline void native_lock_hpte(hpte_t *hptep)
31{ 31{
32 unsigned long *word = &hptep->dw0.dword0; 32 unsigned long *word = &hptep->v;
33 33
34 while (1) { 34 while (1) {
35 if (!test_and_set_bit(HPTE_LOCK_BIT, word)) 35 if (!test_and_set_bit(HPTE_LOCK_BIT, word))
@@ -39,32 +39,28 @@ static inline void native_lock_hpte(HPTE *hptep)
39 } 39 }
40} 40}
41 41
42static inline void native_unlock_hpte(HPTE *hptep) 42static inline void native_unlock_hpte(hpte_t *hptep)
43{ 43{
44 unsigned long *word = &hptep->dw0.dword0; 44 unsigned long *word = &hptep->v;
45 45
46 asm volatile("lwsync":::"memory"); 46 asm volatile("lwsync":::"memory");
47 clear_bit(HPTE_LOCK_BIT, word); 47 clear_bit(HPTE_LOCK_BIT, word);
48} 48}
49 49
50long native_hpte_insert(unsigned long hpte_group, unsigned long va, 50long native_hpte_insert(unsigned long hpte_group, unsigned long va,
51 unsigned long prpn, int secondary, 51 unsigned long prpn, unsigned long vflags,
52 unsigned long hpteflags, int bolted, int large) 52 unsigned long rflags)
53{ 53{
54 unsigned long arpn = physRpn_to_absRpn(prpn); 54 unsigned long arpn = physRpn_to_absRpn(prpn);
55 HPTE *hptep = htab_address + hpte_group; 55 hpte_t *hptep = htab_address + hpte_group;
56 Hpte_dword0 dw0; 56 unsigned long hpte_v, hpte_r;
57 HPTE lhpte;
58 int i; 57 int i;
59 58
60 for (i = 0; i < HPTES_PER_GROUP; i++) { 59 for (i = 0; i < HPTES_PER_GROUP; i++) {
61 dw0 = hptep->dw0.dw0; 60 if (! (hptep->v & HPTE_V_VALID)) {
62
63 if (!dw0.v) {
64 /* retry with lock held */ 61 /* retry with lock held */
65 native_lock_hpte(hptep); 62 native_lock_hpte(hptep);
66 dw0 = hptep->dw0.dw0; 63 if (! (hptep->v & HPTE_V_VALID))
67 if (!dw0.v)
68 break; 64 break;
69 native_unlock_hpte(hptep); 65 native_unlock_hpte(hptep);
70 } 66 }
@@ -75,56 +71,45 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
75 if (i == HPTES_PER_GROUP) 71 if (i == HPTES_PER_GROUP)
76 return -1; 72 return -1;
77 73
78 lhpte.dw1.dword1 = 0; 74 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
79 lhpte.dw1.dw1.rpn = arpn; 75 if (vflags & HPTE_V_LARGE)
80 lhpte.dw1.flags.flags = hpteflags; 76 va &= ~(1UL << HPTE_V_AVPN_SHIFT);
81 77 hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
82 lhpte.dw0.dword0 = 0;
83 lhpte.dw0.dw0.avpn = va >> 23;
84 lhpte.dw0.dw0.h = secondary;
85 lhpte.dw0.dw0.bolted = bolted;
86 lhpte.dw0.dw0.v = 1;
87
88 if (large) {
89 lhpte.dw0.dw0.l = 1;
90 lhpte.dw0.dw0.avpn &= ~0x1UL;
91 }
92
93 hptep->dw1.dword1 = lhpte.dw1.dword1;
94 78
79 hptep->r = hpte_r;
95 /* Guarantee the second dword is visible before the valid bit */ 80 /* Guarantee the second dword is visible before the valid bit */
96 __asm__ __volatile__ ("eieio" : : : "memory"); 81 __asm__ __volatile__ ("eieio" : : : "memory");
97
98 /* 82 /*
99 * Now set the first dword including the valid bit 83 * Now set the first dword including the valid bit
100 * NOTE: this also unlocks the hpte 84 * NOTE: this also unlocks the hpte
101 */ 85 */
102 hptep->dw0.dword0 = lhpte.dw0.dword0; 86 hptep->v = hpte_v;
103 87
104 __asm__ __volatile__ ("ptesync" : : : "memory"); 88 __asm__ __volatile__ ("ptesync" : : : "memory");
105 89
106 return i | (secondary << 3); 90 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
107} 91}
108 92
109static long native_hpte_remove(unsigned long hpte_group) 93static long native_hpte_remove(unsigned long hpte_group)
110{ 94{
111 HPTE *hptep; 95 hpte_t *hptep;
112 Hpte_dword0 dw0;
113 int i; 96 int i;
114 int slot_offset; 97 int slot_offset;
98 unsigned long hpte_v;
115 99
116 /* pick a random entry to start at */ 100 /* pick a random entry to start at */
117 slot_offset = mftb() & 0x7; 101 slot_offset = mftb() & 0x7;
118 102
119 for (i = 0; i < HPTES_PER_GROUP; i++) { 103 for (i = 0; i < HPTES_PER_GROUP; i++) {
120 hptep = htab_address + hpte_group + slot_offset; 104 hptep = htab_address + hpte_group + slot_offset;
121 dw0 = hptep->dw0.dw0; 105 hpte_v = hptep->v;
122 106
123 if (dw0.v && !dw0.bolted) { 107 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
124 /* retry with lock held */ 108 /* retry with lock held */
125 native_lock_hpte(hptep); 109 native_lock_hpte(hptep);
126 dw0 = hptep->dw0.dw0; 110 hpte_v = hptep->v;
127 if (dw0.v && !dw0.bolted) 111 if ((hpte_v & HPTE_V_VALID)
112 && !(hpte_v & HPTE_V_BOLTED))
128 break; 113 break;
129 native_unlock_hpte(hptep); 114 native_unlock_hpte(hptep);
130 } 115 }
@@ -137,15 +122,15 @@ static long native_hpte_remove(unsigned long hpte_group)
137 return -1; 122 return -1;
138 123
139 /* Invalidate the hpte. NOTE: this also unlocks it */ 124 /* Invalidate the hpte. NOTE: this also unlocks it */
140 hptep->dw0.dword0 = 0; 125 hptep->v = 0;
141 126
142 return i; 127 return i;
143} 128}
144 129
145static inline void set_pp_bit(unsigned long pp, HPTE *addr) 130static inline void set_pp_bit(unsigned long pp, hpte_t *addr)
146{ 131{
147 unsigned long old; 132 unsigned long old;
148 unsigned long *p = &addr->dw1.dword1; 133 unsigned long *p = &addr->r;
149 134
150 __asm__ __volatile__( 135 __asm__ __volatile__(
151 "1: ldarx %0,0,%3\n\ 136 "1: ldarx %0,0,%3\n\
@@ -163,11 +148,11 @@ static inline void set_pp_bit(unsigned long pp, HPTE *addr)
163 */ 148 */
164static long native_hpte_find(unsigned long vpn) 149static long native_hpte_find(unsigned long vpn)
165{ 150{
166 HPTE *hptep; 151 hpte_t *hptep;
167 unsigned long hash; 152 unsigned long hash;
168 unsigned long i, j; 153 unsigned long i, j;
169 long slot; 154 long slot;
170 Hpte_dword0 dw0; 155 unsigned long hpte_v;
171 156
172 hash = hpt_hash(vpn, 0); 157 hash = hpt_hash(vpn, 0);
173 158
@@ -175,10 +160,11 @@ static long native_hpte_find(unsigned long vpn)
175 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 160 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
176 for (i = 0; i < HPTES_PER_GROUP; i++) { 161 for (i = 0; i < HPTES_PER_GROUP; i++) {
177 hptep = htab_address + slot; 162 hptep = htab_address + slot;
178 dw0 = hptep->dw0.dw0; 163 hpte_v = hptep->v;
179 164
180 if ((dw0.avpn == (vpn >> 11)) && dw0.v && 165 if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
181 (dw0.h == j)) { 166 && (hpte_v & HPTE_V_VALID)
167 && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) {
182 /* HPTE matches */ 168 /* HPTE matches */
183 if (j) 169 if (j)
184 slot = -slot; 170 slot = -slot;
@@ -195,20 +181,21 @@ static long native_hpte_find(unsigned long vpn)
195static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 181static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
196 unsigned long va, int large, int local) 182 unsigned long va, int large, int local)
197{ 183{
198 HPTE *hptep = htab_address + slot; 184 hpte_t *hptep = htab_address + slot;
199 Hpte_dword0 dw0; 185 unsigned long hpte_v;
200 unsigned long avpn = va >> 23; 186 unsigned long avpn = va >> 23;
201 int ret = 0; 187 int ret = 0;
202 188
203 if (large) 189 if (large)
204 avpn &= ~0x1UL; 190 avpn &= ~1;
205 191
206 native_lock_hpte(hptep); 192 native_lock_hpte(hptep);
207 193
208 dw0 = hptep->dw0.dw0; 194 hpte_v = hptep->v;
209 195
210 /* Even if we miss, we need to invalidate the TLB */ 196 /* Even if we miss, we need to invalidate the TLB */
211 if ((dw0.avpn != avpn) || !dw0.v) { 197 if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
198 || !(hpte_v & HPTE_V_VALID)) {
212 native_unlock_hpte(hptep); 199 native_unlock_hpte(hptep);
213 ret = -1; 200 ret = -1;
214 } else { 201 } else {
@@ -244,7 +231,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
244{ 231{
245 unsigned long vsid, va, vpn, flags = 0; 232 unsigned long vsid, va, vpn, flags = 0;
246 long slot; 233 long slot;
247 HPTE *hptep; 234 hpte_t *hptep;
248 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 235 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
249 236
250 vsid = get_kernel_vsid(ea); 237 vsid = get_kernel_vsid(ea);
@@ -269,26 +256,27 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
269static void native_hpte_invalidate(unsigned long slot, unsigned long va, 256static void native_hpte_invalidate(unsigned long slot, unsigned long va,
270 int large, int local) 257 int large, int local)
271{ 258{
272 HPTE *hptep = htab_address + slot; 259 hpte_t *hptep = htab_address + slot;
273 Hpte_dword0 dw0; 260 unsigned long hpte_v;
274 unsigned long avpn = va >> 23; 261 unsigned long avpn = va >> 23;
275 unsigned long flags; 262 unsigned long flags;
276 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 263 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
277 264
278 if (large) 265 if (large)
279 avpn &= ~0x1UL; 266 avpn &= ~1;
280 267
281 local_irq_save(flags); 268 local_irq_save(flags);
282 native_lock_hpte(hptep); 269 native_lock_hpte(hptep);
283 270
284 dw0 = hptep->dw0.dw0; 271 hpte_v = hptep->v;
285 272
286 /* Even if we miss, we need to invalidate the TLB */ 273 /* Even if we miss, we need to invalidate the TLB */
287 if ((dw0.avpn != avpn) || !dw0.v) { 274 if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
275 || !(hpte_v & HPTE_V_VALID)) {
288 native_unlock_hpte(hptep); 276 native_unlock_hpte(hptep);
289 } else { 277 } else {
290 /* Invalidate the hpte. NOTE: this also unlocks it */ 278 /* Invalidate the hpte. NOTE: this also unlocks it */
291 hptep->dw0.dword0 = 0; 279 hptep->v = 0;
292 } 280 }
293 281
294 /* Invalidate the tlb */ 282 /* Invalidate the tlb */
@@ -315,8 +303,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
315static void native_hpte_clear(void) 303static void native_hpte_clear(void)
316{ 304{
317 unsigned long slot, slots, flags; 305 unsigned long slot, slots, flags;
318 HPTE *hptep = htab_address; 306 hpte_t *hptep = htab_address;
319 Hpte_dword0 dw0; 307 unsigned long hpte_v;
320 unsigned long pteg_count; 308 unsigned long pteg_count;
321 309
322 pteg_count = htab_hash_mask + 1; 310 pteg_count = htab_hash_mask + 1;
@@ -336,11 +324,11 @@ static void native_hpte_clear(void)
336 * running, right? and for crash dump, we probably 324 * running, right? and for crash dump, we probably
337 * don't want to wait for a maybe bad cpu. 325 * don't want to wait for a maybe bad cpu.
338 */ 326 */
339 dw0 = hptep->dw0.dw0; 327 hpte_v = hptep->v;
340 328
341 if (dw0.v) { 329 if (hpte_v & HPTE_V_VALID) {
342 hptep->dw0.dword0 = 0; 330 hptep->v = 0;
343 tlbie(slot2va(dw0.avpn, dw0.l, dw0.h, slot), dw0.l); 331 tlbie(slot2va(hpte_v, slot), hpte_v & HPTE_V_LARGE);
344 } 332 }
345 } 333 }
346 334
@@ -353,8 +341,8 @@ static void native_flush_hash_range(unsigned long context,
353{ 341{
354 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn; 342 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
355 int i, j; 343 int i, j;
356 HPTE *hptep; 344 hpte_t *hptep;
357 Hpte_dword0 dw0; 345 unsigned long hpte_v;
358 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 346 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
359 347
360 /* XXX fix for large ptes */ 348 /* XXX fix for large ptes */
@@ -390,14 +378,15 @@ static void native_flush_hash_range(unsigned long context,
390 378
391 native_lock_hpte(hptep); 379 native_lock_hpte(hptep);
392 380
393 dw0 = hptep->dw0.dw0; 381 hpte_v = hptep->v;
394 382
395 /* Even if we miss, we need to invalidate the TLB */ 383 /* Even if we miss, we need to invalidate the TLB */
396 if ((dw0.avpn != avpn) || !dw0.v) { 384 if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
385 || !(hpte_v & HPTE_V_VALID)) {
397 native_unlock_hpte(hptep); 386 native_unlock_hpte(hptep);
398 } else { 387 } else {
399 /* Invalidate the hpte. NOTE: this also unlocks it */ 388 /* Invalidate the hpte. NOTE: this also unlocks it */
400 hptep->dw0.dword0 = 0; 389 hptep->v = 0;
401 } 390 }
402 391
403 j++; 392 j++;
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 1647b1c6f28e..623b5d130c31 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -75,8 +75,8 @@
75extern unsigned long dart_tablebase; 75extern unsigned long dart_tablebase;
76#endif /* CONFIG_U3_DART */ 76#endif /* CONFIG_U3_DART */
77 77
78HPTE *htab_address; 78hpte_t *htab_address;
79unsigned long htab_hash_mask; 79unsigned long htab_hash_mask;
80 80
81extern unsigned long _SDR1; 81extern unsigned long _SDR1;
82 82
@@ -97,11 +97,15 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
97 unsigned long addr; 97 unsigned long addr;
98 unsigned int step; 98 unsigned int step;
99 unsigned long tmp_mode; 99 unsigned long tmp_mode;
100 unsigned long vflags;
100 101
101 if (large) 102 if (large) {
102 step = 16*MB; 103 step = 16*MB;
103 else 104 vflags = HPTE_V_BOLTED | HPTE_V_LARGE;
105 } else {
104 step = 4*KB; 106 step = 4*KB;
107 vflags = HPTE_V_BOLTED;
108 }
105 109
106 for (addr = start; addr < end; addr += step) { 110 for (addr = start; addr < end; addr += step) {
107 unsigned long vpn, hash, hpteg; 111 unsigned long vpn, hash, hpteg;
@@ -129,12 +133,12 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
129 if (systemcfg->platform & PLATFORM_LPAR) 133 if (systemcfg->platform & PLATFORM_LPAR)
130 ret = pSeries_lpar_hpte_insert(hpteg, va, 134 ret = pSeries_lpar_hpte_insert(hpteg, va,
131 virt_to_abs(addr) >> PAGE_SHIFT, 135 virt_to_abs(addr) >> PAGE_SHIFT,
132 0, tmp_mode, 1, large); 136 vflags, tmp_mode);
133 else 137 else
134#endif /* CONFIG_PPC_PSERIES */ 138#endif /* CONFIG_PPC_PSERIES */
135 ret = native_hpte_insert(hpteg, va, 139 ret = native_hpte_insert(hpteg, va,
136 virt_to_abs(addr) >> PAGE_SHIFT, 140 virt_to_abs(addr) >> PAGE_SHIFT,
137 0, tmp_mode, 1, large); 141 vflags, tmp_mode);
138 142
139 if (ret == -1) { 143 if (ret == -1) {
140 ppc64_terminate_msg(0x20, "create_pte_mapping"); 144 ppc64_terminate_msg(0x20, "create_pte_mapping");
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index fdcfe97c75c1..f9524602818d 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -583,7 +583,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
583 pte_t *ptep; 583 pte_t *ptep;
584 unsigned long va, vpn; 584 unsigned long va, vpn;
585 pte_t old_pte, new_pte; 585 pte_t old_pte, new_pte;
586 unsigned long hpteflags, prpn; 586 unsigned long rflags, prpn;
587 long slot; 587 long slot;
588 int err = 1; 588 int err = 1;
589 589
@@ -626,9 +626,9 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
626 old_pte = *ptep; 626 old_pte = *ptep;
627 new_pte = old_pte; 627 new_pte = old_pte;
628 628
629 hpteflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW)); 629 rflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
630 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ 630 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
631 hpteflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC); 631 rflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
632 632
633 /* Check if pte already has an hpte (case 2) */ 633 /* Check if pte already has an hpte (case 2) */
634 if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) { 634 if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
@@ -641,7 +641,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
641 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 641 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
642 slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12; 642 slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
643 643
644 if (ppc_md.hpte_updatepp(slot, hpteflags, va, 1, local) == -1) 644 if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
645 pte_val(old_pte) &= ~_PAGE_HPTEFLAGS; 645 pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
646 } 646 }
647 647
@@ -661,10 +661,10 @@ repeat:
661 661
662 /* Add in WIMG bits */ 662 /* Add in WIMG bits */
663 /* XXX We should store these in the pte */ 663 /* XXX We should store these in the pte */
664 hpteflags |= _PAGE_COHERENT; 664 rflags |= _PAGE_COHERENT;
665 665
666 slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0, 666 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
667 hpteflags, 0, 1); 667 HPTE_V_LARGE, rflags);
668 668
669 /* Primary is full, try the secondary */ 669 /* Primary is full, try the secondary */
670 if (unlikely(slot == -1)) { 670 if (unlikely(slot == -1)) {
@@ -672,7 +672,7 @@ repeat:
672 hpte_group = ((~hash & htab_hash_mask) * 672 hpte_group = ((~hash & htab_hash_mask) *
673 HPTES_PER_GROUP) & ~0x7UL; 673 HPTES_PER_GROUP) & ~0x7UL;
674 slot = ppc_md.hpte_insert(hpte_group, va, prpn, 674 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
675 1, hpteflags, 0, 1); 675 HPTE_V_LARGE, rflags);
676 if (slot == -1) { 676 if (slot == -1) {
677 if (mftb() & 0x1) 677 if (mftb() & 0x1)
678 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; 678 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index b50b3a446dbe..e58a24d42879 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -180,9 +180,10 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
180 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 180 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
181 181
182 /* Panic if a pte grpup is full */ 182 /* Panic if a pte grpup is full */
183 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0, 183 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
184 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX, 184 HPTE_V_BOLTED,
185 1, 0) == -1) { 185 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
186 == -1) {
186 panic("map_io_page: could not insert mapping"); 187 panic("map_io_page: could not insert mapping");
187 } 188 }
188 } 189 }