aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/44x_mmu.c1
-rw-r--r--arch/powerpc/mm/4xx_mmu.c1
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c1
-rw-r--r--arch/powerpc/mm/hash_native_64.c27
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/imalloc.c313
-rw-r--r--arch/powerpc/mm/init_32.c1
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmu_context_32.c1
-rw-r--r--arch/powerpc/mm/mmu_decl.h17
-rw-r--r--arch/powerpc/mm/pgtable_32.c123
-rw-r--r--arch/powerpc/mm/pgtable_64.c206
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c7
-rw-r--r--arch/powerpc/mm/stab.c4
-rw-r--r--arch/powerpc/mm/tlb_32.c1
-rw-r--r--arch/powerpc/mm/tlb_64.c57
19 files changed, 128 insertions, 643 deletions
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index ca4dcb07a939..c3df50476539 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -12,7 +12,6 @@
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras 14 * Copyright (C) 1996 Paul Mackerras
15 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * 15 *
17 * Derived from "arch/i386/mm/init.c" 16 * Derived from "arch/i386/mm/init.c"
18 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
index 838e09db71d9..7ff2609b64d1 100644
--- a/arch/powerpc/mm/4xx_mmu.c
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -9,7 +9,6 @@
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras 11 * Copyright (C) 1996 Paul Mackerras
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 * 12 *
14 * Derived from "arch/i386/mm/init.c" 13 * Derived from "arch/i386/mm/init.c"
15 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 4f839c6a9768..7e4d27ad3dee 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -11,8 +11,7 @@ obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
11hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o 11hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \ 12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
13 hash_utils_64.o hash_low_64.o tlb_64.o \ 13 hash_utils_64.o hash_low_64.o tlb_64.o \
14 slb_low.o slb.o stab.o mmap.o imalloc.o \ 14 slb_low.o slb.o stab.o mmap.o $(hash-y)
15 $(hash-y)
16obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o 15obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o
17obj-$(CONFIG_40x) += 4xx_mmu.o 16obj-$(CONFIG_40x) += 4xx_mmu.o
18obj-$(CONFIG_44x) += 44x_mmu.o 17obj-$(CONFIG_44x) += 44x_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 115b25f50bf8..0ece51310bfe 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -380,7 +380,7 @@ out_of_memory:
380 } 380 }
381 printk("VM: killing process %s\n", current->comm); 381 printk("VM: killing process %s\n", current->comm);
382 if (user_mode(regs)) 382 if (user_mode(regs))
383 do_exit(SIGKILL); 383 do_group_exit(SIGKILL);
384 return SIGKILL; 384 return SIGKILL;
385 385
386do_sigbus: 386do_sigbus:
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 123da03ab118..afab247d472f 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -14,7 +14,6 @@
14 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 14 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
15 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 15 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
16 * Copyright (C) 1996 Paul Mackerras 16 * Copyright (C) 1996 Paul Mackerras
17 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
18 * 17 *
19 * Derived from "arch/i386/mm/init.c" 18 * Derived from "arch/i386/mm/init.c"
20 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 19 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 4a20d890e2f4..6ba9b47e55af 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int local)
104 spin_unlock(&native_tlbie_lock); 104 spin_unlock(&native_tlbie_lock);
105} 105}
106 106
107static inline void native_lock_hpte(hpte_t *hptep) 107static inline void native_lock_hpte(struct hash_pte *hptep)
108{ 108{
109 unsigned long *word = &hptep->v; 109 unsigned long *word = &hptep->v;
110 110
@@ -116,7 +116,7 @@ static inline void native_lock_hpte(hpte_t *hptep)
116 } 116 }
117} 117}
118 118
119static inline void native_unlock_hpte(hpte_t *hptep) 119static inline void native_unlock_hpte(struct hash_pte *hptep)
120{ 120{
121 unsigned long *word = &hptep->v; 121 unsigned long *word = &hptep->v;
122 122
@@ -128,7 +128,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
128 unsigned long pa, unsigned long rflags, 128 unsigned long pa, unsigned long rflags,
129 unsigned long vflags, int psize) 129 unsigned long vflags, int psize)
130{ 130{
131 hpte_t *hptep = htab_address + hpte_group; 131 struct hash_pte *hptep = htab_address + hpte_group;
132 unsigned long hpte_v, hpte_r; 132 unsigned long hpte_v, hpte_r;
133 int i; 133 int i;
134 134
@@ -163,7 +163,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
163 163
164 hptep->r = hpte_r; 164 hptep->r = hpte_r;
165 /* Guarantee the second dword is visible before the valid bit */ 165 /* Guarantee the second dword is visible before the valid bit */
166 __asm__ __volatile__ ("eieio" : : : "memory"); 166 eieio();
167 /* 167 /*
168 * Now set the first dword including the valid bit 168 * Now set the first dword including the valid bit
169 * NOTE: this also unlocks the hpte 169 * NOTE: this also unlocks the hpte
@@ -177,7 +177,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
177 177
178static long native_hpte_remove(unsigned long hpte_group) 178static long native_hpte_remove(unsigned long hpte_group)
179{ 179{
180 hpte_t *hptep; 180 struct hash_pte *hptep;
181 int i; 181 int i;
182 int slot_offset; 182 int slot_offset;
183 unsigned long hpte_v; 183 unsigned long hpte_v;
@@ -217,7 +217,7 @@ static long native_hpte_remove(unsigned long hpte_group)
217static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 217static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
218 unsigned long va, int psize, int local) 218 unsigned long va, int psize, int local)
219{ 219{
220 hpte_t *hptep = htab_address + slot; 220 struct hash_pte *hptep = htab_address + slot;
221 unsigned long hpte_v, want_v; 221 unsigned long hpte_v, want_v;
222 int ret = 0; 222 int ret = 0;
223 223
@@ -233,15 +233,14 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
233 /* Even if we miss, we need to invalidate the TLB */ 233 /* Even if we miss, we need to invalidate the TLB */
234 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { 234 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
235 DBG_LOW(" -> miss\n"); 235 DBG_LOW(" -> miss\n");
236 native_unlock_hpte(hptep);
237 ret = -1; 236 ret = -1;
238 } else { 237 } else {
239 DBG_LOW(" -> hit\n"); 238 DBG_LOW(" -> hit\n");
240 /* Update the HPTE */ 239 /* Update the HPTE */
241 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 240 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
242 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); 241 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
243 native_unlock_hpte(hptep);
244 } 242 }
243 native_unlock_hpte(hptep);
245 244
246 /* Ensure it is out of the tlb too. */ 245 /* Ensure it is out of the tlb too. */
247 tlbie(va, psize, local); 246 tlbie(va, psize, local);
@@ -251,7 +250,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
251 250
252static long native_hpte_find(unsigned long va, int psize) 251static long native_hpte_find(unsigned long va, int psize)
253{ 252{
254 hpte_t *hptep; 253 struct hash_pte *hptep;
255 unsigned long hash; 254 unsigned long hash;
256 unsigned long i, j; 255 unsigned long i, j;
257 long slot; 256 long slot;
@@ -294,7 +293,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
294{ 293{
295 unsigned long vsid, va; 294 unsigned long vsid, va;
296 long slot; 295 long slot;
297 hpte_t *hptep; 296 struct hash_pte *hptep;
298 297
299 vsid = get_kernel_vsid(ea); 298 vsid = get_kernel_vsid(ea);
300 va = (vsid << 28) | (ea & 0x0fffffff); 299 va = (vsid << 28) | (ea & 0x0fffffff);
@@ -315,7 +314,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
315static void native_hpte_invalidate(unsigned long slot, unsigned long va, 314static void native_hpte_invalidate(unsigned long slot, unsigned long va,
316 int psize, int local) 315 int psize, int local)
317{ 316{
318 hpte_t *hptep = htab_address + slot; 317 struct hash_pte *hptep = htab_address + slot;
319 unsigned long hpte_v; 318 unsigned long hpte_v;
320 unsigned long want_v; 319 unsigned long want_v;
321 unsigned long flags; 320 unsigned long flags;
@@ -345,7 +344,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
345#define LP_BITS 8 344#define LP_BITS 8
346#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 345#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
347 346
348static void hpte_decode(hpte_t *hpte, unsigned long slot, 347static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
349 int *psize, unsigned long *va) 348 int *psize, unsigned long *va)
350{ 349{
351 unsigned long hpte_r = hpte->r; 350 unsigned long hpte_r = hpte->r;
@@ -415,7 +414,7 @@ static void hpte_decode(hpte_t *hpte, unsigned long slot,
415static void native_hpte_clear(void) 414static void native_hpte_clear(void)
416{ 415{
417 unsigned long slot, slots, flags; 416 unsigned long slot, slots, flags;
418 hpte_t *hptep = htab_address; 417 struct hash_pte *hptep = htab_address;
419 unsigned long hpte_v, va; 418 unsigned long hpte_v, va;
420 unsigned long pteg_count; 419 unsigned long pteg_count;
421 int psize; 420 int psize;
@@ -462,7 +461,7 @@ static void native_hpte_clear(void)
462static void native_flush_hash_range(unsigned long number, int local) 461static void native_flush_hash_range(unsigned long number, int local)
463{ 462{
464 unsigned long va, hash, index, hidx, shift, slot; 463 unsigned long va, hash, index, hidx, shift, slot;
465 hpte_t *hptep; 464 struct hash_pte *hptep;
466 unsigned long hpte_v; 465 unsigned long hpte_v;
467 unsigned long want_v; 466 unsigned long want_v;
468 unsigned long flags; 467 unsigned long flags;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 4f2f4534a9d8..2ce9491b48d4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -87,7 +87,7 @@ extern unsigned long dart_tablebase;
87static unsigned long _SDR1; 87static unsigned long _SDR1;
88struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 88struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
89 89
90hpte_t *htab_address; 90struct hash_pte *htab_address;
91unsigned long htab_size_bytes; 91unsigned long htab_size_bytes;
92unsigned long htab_hash_mask; 92unsigned long htab_hash_mask;
93int mmu_linear_psize = MMU_PAGE_4K; 93int mmu_linear_psize = MMU_PAGE_4K;
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
deleted file mode 100644
index c831815c31f0..000000000000
--- a/arch/powerpc/mm/imalloc.c
+++ /dev/null
@@ -1,313 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12
13#include <asm/uaccess.h>
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <linux/mutex.h>
17#include <asm/cacheflush.h>
18
19#include "mmu_decl.h"
20
21static DEFINE_MUTEX(imlist_mutex);
22struct vm_struct * imlist = NULL;
23
24static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
25{
26 unsigned long addr;
27 struct vm_struct **p, *tmp;
28
29 addr = ioremap_bot;
30 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
31 if (size + addr < (unsigned long) tmp->addr)
32 break;
33 if ((unsigned long)tmp->addr >= ioremap_bot)
34 addr = tmp->size + (unsigned long) tmp->addr;
35 if (addr >= IMALLOC_END-size)
36 return 1;
37 }
38 *im_addr = addr;
39
40 return 0;
41}
42
43/* Return whether the region described by v_addr and size is a subset
44 * of the region described by parent
45 */
46static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
47 struct vm_struct *parent)
48{
49 return (int) (v_addr >= (unsigned long) parent->addr &&
50 v_addr < (unsigned long) parent->addr + parent->size &&
51 size < parent->size);
52}
53
54/* Return whether the region described by v_addr and size is a superset
55 * of the region described by child
56 */
57static int im_region_is_superset(unsigned long v_addr, unsigned long size,
58 struct vm_struct *child)
59{
60 struct vm_struct parent;
61
62 parent.addr = (void *) v_addr;
63 parent.size = size;
64
65 return im_region_is_subset((unsigned long) child->addr, child->size,
66 &parent);
67}
68
69/* Return whether the region described by v_addr and size overlaps
70 * the region described by vm. Overlapping regions meet the
71 * following conditions:
72 * 1) The regions share some part of the address space
73 * 2) The regions aren't identical
74 * 3) Neither region is a subset of the other
75 */
76static int im_region_overlaps(unsigned long v_addr, unsigned long size,
77 struct vm_struct *vm)
78{
79 if (im_region_is_superset(v_addr, size, vm))
80 return 0;
81
82 return (v_addr + size > (unsigned long) vm->addr + vm->size &&
83 v_addr < (unsigned long) vm->addr + vm->size) ||
84 (v_addr < (unsigned long) vm->addr &&
85 v_addr + size > (unsigned long) vm->addr);
86}
87
88/* Determine imalloc status of region described by v_addr and size.
89 * Can return one of the following:
90 * IM_REGION_UNUSED - Entire region is unallocated in imalloc space.
91 * IM_REGION_SUBSET - Region is a subset of a region that is already
92 * allocated in imalloc space.
93 * vm will be assigned to a ptr to the parent region.
94 * IM_REGION_EXISTS - Exact region already allocated in imalloc space.
95 * vm will be assigned to a ptr to the existing imlist
96 * member.
97 * IM_REGION_OVERLAPS - Region overlaps an allocated region in imalloc space.
98 * IM_REGION_SUPERSET - Region is a superset of a region that is already
99 * allocated in imalloc space.
100 */
101static int im_region_status(unsigned long v_addr, unsigned long size,
102 struct vm_struct **vm)
103{
104 struct vm_struct *tmp;
105
106 for (tmp = imlist; tmp; tmp = tmp->next)
107 if (v_addr < (unsigned long) tmp->addr + tmp->size)
108 break;
109
110 *vm = NULL;
111 if (tmp) {
112 if (im_region_overlaps(v_addr, size, tmp))
113 return IM_REGION_OVERLAP;
114
115 *vm = tmp;
116 if (im_region_is_subset(v_addr, size, tmp)) {
117 /* Return with tmp pointing to superset */
118 return IM_REGION_SUBSET;
119 }
120 if (im_region_is_superset(v_addr, size, tmp)) {
121 /* Return with tmp pointing to first subset */
122 return IM_REGION_SUPERSET;
123 }
124 else if (v_addr == (unsigned long) tmp->addr &&
125 size == tmp->size) {
126 /* Return with tmp pointing to exact region */
127 return IM_REGION_EXISTS;
128 }
129 }
130
131 return IM_REGION_UNUSED;
132}
133
134static struct vm_struct * split_im_region(unsigned long v_addr,
135 unsigned long size, struct vm_struct *parent)
136{
137 struct vm_struct *vm1 = NULL;
138 struct vm_struct *vm2 = NULL;
139 struct vm_struct *new_vm = NULL;
140
141 vm1 = kmalloc(sizeof(*vm1), GFP_KERNEL);
142 if (vm1 == NULL) {
143 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
144 return NULL;
145 }
146
147 if (v_addr == (unsigned long) parent->addr) {
148 /* Use existing parent vm_struct to represent child, allocate
149 * new one for the remainder of parent range
150 */
151 vm1->size = parent->size - size;
152 vm1->addr = (void *) (v_addr + size);
153 vm1->next = parent->next;
154
155 parent->size = size;
156 parent->next = vm1;
157 new_vm = parent;
158 } else if (v_addr + size == (unsigned long) parent->addr +
159 parent->size) {
160 /* Allocate new vm_struct to represent child, use existing
161 * parent one for remainder of parent range
162 */
163 vm1->size = size;
164 vm1->addr = (void *) v_addr;
165 vm1->next = parent->next;
166 new_vm = vm1;
167
168 parent->size -= size;
169 parent->next = vm1;
170 } else {
171 /* Allocate two new vm_structs for the new child and
172 * uppermost remainder, and use existing parent one for the
173 * lower remainder of parent range
174 */
175 vm2 = kmalloc(sizeof(*vm2), GFP_KERNEL);
176 if (vm2 == NULL) {
177 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
178 kfree(vm1);
179 return NULL;
180 }
181
182 vm1->size = size;
183 vm1->addr = (void *) v_addr;
184 vm1->next = vm2;
185 new_vm = vm1;
186
187 vm2->size = ((unsigned long) parent->addr + parent->size) -
188 (v_addr + size);
189 vm2->addr = (void *) v_addr + size;
190 vm2->next = parent->next;
191
192 parent->size = v_addr - (unsigned long) parent->addr;
193 parent->next = vm1;
194 }
195
196 return new_vm;
197}
198
199static struct vm_struct * __add_new_im_area(unsigned long req_addr,
200 unsigned long size)
201{
202 struct vm_struct **p, *tmp, *area;
203
204 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
205 if (req_addr + size <= (unsigned long)tmp->addr)
206 break;
207 }
208
209 area = kmalloc(sizeof(*area), GFP_KERNEL);
210 if (!area)
211 return NULL;
212 area->flags = 0;
213 area->addr = (void *)req_addr;
214 area->size = size;
215 area->next = *p;
216 *p = area;
217
218 return area;
219}
220
221static struct vm_struct * __im_get_area(unsigned long req_addr,
222 unsigned long size,
223 int criteria)
224{
225 struct vm_struct *tmp;
226 int status;
227
228 status = im_region_status(req_addr, size, &tmp);
229 if ((criteria & status) == 0) {
230 return NULL;
231 }
232
233 switch (status) {
234 case IM_REGION_UNUSED:
235 tmp = __add_new_im_area(req_addr, size);
236 break;
237 case IM_REGION_SUBSET:
238 tmp = split_im_region(req_addr, size, tmp);
239 break;
240 case IM_REGION_EXISTS:
241 /* Return requested region */
242 break;
243 case IM_REGION_SUPERSET:
244 /* Return first existing subset of requested region */
245 break;
246 default:
247 printk(KERN_ERR "%s() unexpected imalloc region status\n",
248 __FUNCTION__);
249 tmp = NULL;
250 }
251
252 return tmp;
253}
254
255struct vm_struct * im_get_free_area(unsigned long size)
256{
257 struct vm_struct *area;
258 unsigned long addr;
259
260 mutex_lock(&imlist_mutex);
261 if (get_free_im_addr(size, &addr)) {
262 printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
263 __FUNCTION__, size);
264 area = NULL;
265 goto next_im_done;
266 }
267
268 area = __im_get_area(addr, size, IM_REGION_UNUSED);
269 if (area == NULL) {
270 printk(KERN_ERR
271 "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
272 __FUNCTION__, addr, size);
273 }
274next_im_done:
275 mutex_unlock(&imlist_mutex);
276 return area;
277}
278
279struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
280 int criteria)
281{
282 struct vm_struct *area;
283
284 mutex_lock(&imlist_mutex);
285 area = __im_get_area(v_addr, size, criteria);
286 mutex_unlock(&imlist_mutex);
287 return area;
288}
289
290void im_free(void * addr)
291{
292 struct vm_struct **p, *tmp;
293
294 if (!addr)
295 return;
296 if ((unsigned long) addr & ~PAGE_MASK) {
297 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
298 return;
299 }
300 mutex_lock(&imlist_mutex);
301 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
302 if (tmp->addr == addr) {
303 *p = tmp->next;
304 unmap_vm_area(tmp);
305 kfree(tmp);
306 mutex_unlock(&imlist_mutex);
307 return;
308 }
309 }
310 mutex_unlock(&imlist_mutex);
311 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
312 addr);
313}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 5fce6ccecb8d..e1f5ded851f6 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -5,7 +5,6 @@
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras 7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * 9 *
11 * Derived from "arch/i386/mm/init.c" 10 * Derived from "arch/i386/mm/init.c"
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7312a265545f..1d6edf724c85 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -5,7 +5,6 @@
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras 7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * 8 *
10 * Derived from "arch/i386/mm/init.c" 9 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0266a94d83b6..f0e7eedb1ba3 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -5,7 +5,6 @@
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras 7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * 9 *
11 * Derived from "arch/i386/mm/init.c" 10 * Derived from "arch/i386/mm/init.c"
@@ -129,8 +128,6 @@ int __devinit arch_add_memory(int nid, u64 start, u64 size)
129 zone = pgdata->node_zones; 128 zone = pgdata->node_zones;
130 129
131 return __add_pages(zone, start_pfn, nr_pages); 130 return __add_pages(zone, start_pfn, nr_pages);
132
133 return 0;
134} 131}
135 132
136/* 133/*
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
index 792086b01000..cc32ba41d900 100644
--- a/arch/powerpc/mm/mmu_context_32.c
+++ b/arch/powerpc/mm/mmu_context_32.c
@@ -11,7 +11,6 @@
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras 13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 * 14 *
16 * Derived from "arch/i386/mm/init.c" 15 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 2558c34eedaa..c94a64fd3c01 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -8,7 +8,6 @@
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras 10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 * 11 *
13 * Derived from "arch/i386/mm/init.c" 12 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
@@ -40,8 +39,8 @@ extern int __map_without_bats;
40extern unsigned long ioremap_base; 39extern unsigned long ioremap_base;
41extern unsigned int rtas_data, rtas_size; 40extern unsigned int rtas_data, rtas_size;
42 41
43struct _PTE; 42struct hash_pte;
44extern struct _PTE *Hash, *Hash_end; 43extern struct hash_pte *Hash, *Hash_end;
45extern unsigned long Hash_size, Hash_mask; 44extern unsigned long Hash_size, Hash_mask;
46 45
47extern unsigned int num_tlbcam_entries; 46extern unsigned int num_tlbcam_entries;
@@ -90,16 +89,4 @@ static inline void flush_HPTE(unsigned context, unsigned long va,
90 else 89 else
91 _tlbie(va); 90 _tlbie(va);
92} 91}
93#else /* CONFIG_PPC64 */
94/* imalloc region types */
95#define IM_REGION_UNUSED 0x1
96#define IM_REGION_SUBSET 0x2
97#define IM_REGION_EXISTS 0x4
98#define IM_REGION_OVERLAP 0x8
99#define IM_REGION_SUPERSET 0x10
100
101extern struct vm_struct * im_get_free_area(unsigned long size);
102extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
103 int region_type);
104extern void im_free(void *addr);
105#endif 92#endif
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index f6ae1a57d652..64488723162a 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -8,7 +8,6 @@
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras 10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 * 11 *
13 * Derived from "arch/i386/mm/init.c" 12 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
@@ -37,7 +36,6 @@
37unsigned long ioremap_base; 36unsigned long ioremap_base;
38unsigned long ioremap_bot; 37unsigned long ioremap_bot;
39EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ 38EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
40int io_bat_index;
41 39
42#if defined(CONFIG_6xx) || defined(CONFIG_POWER3) 40#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
43#define HAVE_BATS 1 41#define HAVE_BATS 1
@@ -300,51 +298,6 @@ void __init mapin_ram(void)
300 } 298 }
301} 299}
302 300
303/* is x a power of 4? */
304#define is_power_of_4(x) is_power_of_2(x) && (ffs(x) & 1)
305
306/*
307 * Set up a mapping for a block of I/O.
308 * virt, phys, size must all be page-aligned.
309 * This should only be called before ioremap is called.
310 */
311void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
312 unsigned int size, int flags)
313{
314 int i;
315
316 if (virt > KERNELBASE && virt < ioremap_bot)
317 ioremap_bot = ioremap_base = virt;
318
319#ifdef HAVE_BATS
320 /*
321 * Use a BAT for this if possible...
322 */
323 if (io_bat_index < 2 && is_power_of_2(size)
324 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
325 setbat(io_bat_index, virt, phys, size, flags);
326 ++io_bat_index;
327 return;
328 }
329#endif /* HAVE_BATS */
330
331#ifdef HAVE_TLBCAM
332 /*
333 * Use a CAM for this if possible...
334 */
335 if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
336 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
337 settlbcam(tlbcam_index, virt, phys, size, flags, 0);
338 ++tlbcam_index;
339 return;
340 }
341#endif /* HAVE_TLBCAM */
342
343 /* No BATs available, put it in the page tables. */
344 for (i = 0; i < size; i += PAGE_SIZE)
345 map_page(virt + i, phys + i, flags);
346}
347
348/* Scan the real Linux page tables and return a PTE pointer for 301/* Scan the real Linux page tables and return a PTE pointer for
349 * a virtual address in a context. 302 * a virtual address in a context.
350 * Returns true (1) if PTE was found, zero otherwise. The pointer to 303 * Returns true (1) if PTE was found, zero otherwise. The pointer to
@@ -379,82 +332,6 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
379 return(retval); 332 return(retval);
380} 333}
381 334
382/* Find physical address for this virtual address. Normally used by
383 * I/O functions, but anyone can call it.
384 */
385unsigned long iopa(unsigned long addr)
386{
387 unsigned long pa;
388
389 /* I don't know why this won't work on PMacs or CHRP. It
390 * appears there is some bug, or there is some implicit
391 * mapping done not properly represented by BATs or in page
392 * tables.......I am actively working on resolving this, but
393 * can't hold up other stuff. -- Dan
394 */
395 pte_t *pte;
396 struct mm_struct *mm;
397
398 /* Check the BATs */
399 pa = v_mapped_by_bats(addr);
400 if (pa)
401 return pa;
402
403 /* Allow mapping of user addresses (within the thread)
404 * for DMA if necessary.
405 */
406 if (addr < TASK_SIZE)
407 mm = current->mm;
408 else
409 mm = &init_mm;
410
411 pa = 0;
412 if (get_pteptr(mm, addr, &pte, NULL)) {
413 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
414 pte_unmap(pte);
415 }
416
417 return(pa);
418}
419
420/* This is will find the virtual address for a physical one....
421 * Swiped from APUS, could be dangerous :-).
422 * This is only a placeholder until I really find a way to make this
423 * work. -- Dan
424 */
425unsigned long
426mm_ptov (unsigned long paddr)
427{
428 unsigned long ret;
429#if 0
430 if (paddr < 16*1024*1024)
431 ret = ZTWO_VADDR(paddr);
432 else {
433 int i;
434
435 for (i = 0; i < kmap_chunk_count;){
436 unsigned long phys = kmap_chunks[i++];
437 unsigned long size = kmap_chunks[i++];
438 unsigned long virt = kmap_chunks[i++];
439 if (paddr >= phys
440 && paddr < (phys + size)){
441 ret = virt + paddr - phys;
442 goto exit;
443 }
444 }
445
446 ret = (unsigned long) __va(paddr);
447 }
448exit:
449#ifdef DEBUGPV
450 printk ("PTOV(%lx)=%lx\n", paddr, ret);
451#endif
452#else
453 ret = (unsigned long)paddr + KERNELBASE;
454#endif
455 return ret;
456}
457
458#ifdef CONFIG_DEBUG_PAGEALLOC 335#ifdef CONFIG_DEBUG_PAGEALLOC
459 336
460static int __change_page_attr(struct page *page, pgprot_t prot) 337static int __change_page_attr(struct page *page, pgprot_t prot)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ad6e135bf212..3dfd10db931a 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -7,7 +7,6 @@
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) 7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras 9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 * 10 *
12 * Derived from "arch/i386/mm/init.c" 11 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
@@ -34,41 +33,27 @@
34#include <linux/stddef.h> 33#include <linux/stddef.h>
35#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/delay.h>
38#include <linux/bootmem.h>
39#include <linux/highmem.h>
40#include <linux/idr.h>
41#include <linux/nodemask.h>
42#include <linux/module.h>
43 36
44#include <asm/pgalloc.h> 37#include <asm/pgalloc.h>
45#include <asm/page.h> 38#include <asm/page.h>
46#include <asm/prom.h> 39#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/rtas.h>
49#include <asm/io.h> 40#include <asm/io.h>
50#include <asm/mmu_context.h> 41#include <asm/mmu_context.h>
51#include <asm/pgtable.h> 42#include <asm/pgtable.h>
52#include <asm/mmu.h> 43#include <asm/mmu.h>
53#include <asm/uaccess.h>
54#include <asm/smp.h> 44#include <asm/smp.h>
55#include <asm/machdep.h> 45#include <asm/machdep.h>
56#include <asm/tlb.h> 46#include <asm/tlb.h>
57#include <asm/eeh.h>
58#include <asm/processor.h> 47#include <asm/processor.h>
59#include <asm/mmzone.h>
60#include <asm/cputable.h> 48#include <asm/cputable.h>
61#include <asm/sections.h> 49#include <asm/sections.h>
62#include <asm/system.h> 50#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h> 51#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/firmware.h> 52#include <asm/firmware.h>
67 53
68#include "mmu_decl.h" 54#include "mmu_decl.h"
69 55
70unsigned long ioremap_bot = IMALLOC_BASE; 56unsigned long ioremap_bot = IOREMAP_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72 57
73/* 58/*
74 * map_io_page currently only called by __ioremap 59 * map_io_page currently only called by __ioremap
@@ -102,8 +87,8 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
102 * entry in the hardware page table. 87 * entry in the hardware page table.
103 * 88 *
104 */ 89 */
105 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, 90 if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE,
106 mmu_io_psize)) { 91 pa, flags, mmu_io_psize)) {
107 printk(KERN_ERR "Failed to do bolted mapping IO " 92 printk(KERN_ERR "Failed to do bolted mapping IO "
108 "memory at %016lx !\n", pa); 93 "memory at %016lx !\n", pa);
109 return -ENOMEM; 94 return -ENOMEM;
@@ -113,8 +98,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
113} 98}
114 99
115 100
116static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa, 101/**
117 unsigned long ea, unsigned long size, 102 * __ioremap_at - Low level function to establish the page tables
103 * for an IO mapping
104 */
105void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
118 unsigned long flags) 106 unsigned long flags)
119{ 107{
120 unsigned long i; 108 unsigned long i;
@@ -122,17 +110,35 @@ static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
122 if ((flags & _PAGE_PRESENT) == 0) 110 if ((flags & _PAGE_PRESENT) == 0)
123 flags |= pgprot_val(PAGE_KERNEL); 111 flags |= pgprot_val(PAGE_KERNEL);
124 112
113 WARN_ON(pa & ~PAGE_MASK);
114 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
115 WARN_ON(size & ~PAGE_MASK);
116
125 for (i = 0; i < size; i += PAGE_SIZE) 117 for (i = 0; i < size; i += PAGE_SIZE)
126 if (map_io_page(ea+i, pa+i, flags)) 118 if (map_io_page((unsigned long)ea+i, pa+i, flags))
127 return NULL; 119 return NULL;
128 120
129 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 121 return (void __iomem *)ea;
122}
123
124/**
125 * __iounmap_from - Low level function to tear down the page tables
126 * for an IO mapping. This is used for mappings that
127 * are manipulated manually, like partial unmapping of
128 * PCI IOs or ISA space.
129 */
130void __iounmap_at(void *ea, unsigned long size)
131{
132 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
133 WARN_ON(size & ~PAGE_MASK);
134
135 unmap_kernel_range((unsigned long)ea, size);
130} 136}
131 137
132void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 138void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
133 unsigned long flags) 139 unsigned long flags)
134{ 140{
135 unsigned long pa, ea; 141 phys_addr_t paligned;
136 void __iomem *ret; 142 void __iomem *ret;
137 143
138 /* 144 /*
@@ -144,27 +150,30 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
144 * IMALLOC_END 150 * IMALLOC_END
145 * 151 *
146 */ 152 */
147 pa = addr & PAGE_MASK; 153 paligned = addr & PAGE_MASK;
148 size = PAGE_ALIGN(addr + size) - pa; 154 size = PAGE_ALIGN(addr + size) - paligned;
149 155
150 if ((size == 0) || (pa == 0)) 156 if ((size == 0) || (paligned == 0))
151 return NULL; 157 return NULL;
152 158
153 if (mem_init_done) { 159 if (mem_init_done) {
154 struct vm_struct *area; 160 struct vm_struct *area;
155 area = im_get_free_area(size); 161
162 area = __get_vm_area(size, VM_IOREMAP,
163 ioremap_bot, IOREMAP_END);
156 if (area == NULL) 164 if (area == NULL)
157 return NULL; 165 return NULL;
158 ea = (unsigned long)(area->addr); 166 ret = __ioremap_at(paligned, area->addr, size, flags);
159 ret = __ioremap_com(addr, pa, ea, size, flags);
160 if (!ret) 167 if (!ret)
161 im_free(area->addr); 168 vunmap(area->addr);
162 } else { 169 } else {
163 ea = ioremap_bot; 170 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
164 ret = __ioremap_com(addr, pa, ea, size, flags);
165 if (ret) 171 if (ret)
166 ioremap_bot += size; 172 ioremap_bot += size;
167 } 173 }
174
175 if (ret)
176 ret += addr & ~PAGE_MASK;
168 return ret; 177 return ret;
169} 178}
170 179
@@ -187,62 +196,9 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
187} 196}
188 197
189 198
190#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
191
192int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
193 unsigned long size, unsigned long flags)
194{
195 struct vm_struct *area;
196 void __iomem *ret;
197
198 /* For now, require page-aligned values for pa, ea, and size */
199 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
200 !IS_PAGE_ALIGNED(size)) {
201 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
202 return 1;
203 }
204
205 if (!mem_init_done) {
206 /* Two things to consider in this case:
207 * 1) No records will be kept (imalloc, etc) that the region
208 * has been remapped
209 * 2) It won't be easy to iounmap() the region later (because
210 * of 1)
211 */
212 ;
213 } else {
214 area = im_get_area(ea, size,
215 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
216 if (area == NULL) {
217 /* Expected when PHB-dlpar is in play */
218 return 1;
219 }
220 if (ea != (unsigned long) area->addr) {
221 printk(KERN_ERR "unexpected addr return from "
222 "im_get_area\n");
223 return 1;
224 }
225 }
226
227 ret = __ioremap_com(pa, pa, ea, size, flags);
228 if (ret == NULL) {
229 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
230 return 1;
231 }
232 if (ret != (void *) ea) {
233 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
234 return 1;
235 }
236
237 return 0;
238}
239
240/* 199/*
241 * Unmap an IO region and remove it from imalloc'd list. 200 * Unmap an IO region and remove it from imalloc'd list.
242 * Access to IO memory should be serialized by driver. 201 * Access to IO memory should be serialized by driver.
243 * This code is modeled after vmalloc code - unmap_vm_area()
244 *
245 * XXX what about calls before mem_init_done (ie python_countermeasures())
246 */ 202 */
247void __iounmap(volatile void __iomem *token) 203void __iounmap(volatile void __iomem *token)
248{ 204{
@@ -251,9 +207,14 @@ void __iounmap(volatile void __iomem *token)
251 if (!mem_init_done) 207 if (!mem_init_done)
252 return; 208 return;
253 209
254 addr = (void *) ((unsigned long __force) token & PAGE_MASK); 210 addr = (void *) ((unsigned long __force)
255 211 PCI_FIX_ADDR(token) & PAGE_MASK);
256 im_free(addr); 212 if ((unsigned long)addr < ioremap_bot) {
213 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
214 " at 0x%p\n", addr);
215 return;
216 }
217 vunmap(addr);
257} 218}
258 219
259void iounmap(volatile void __iomem *token) 220void iounmap(volatile void __iomem *token)
@@ -264,77 +225,8 @@ void iounmap(volatile void __iomem *token)
264 __iounmap(token); 225 __iounmap(token);
265} 226}
266 227
267static int iounmap_subset_regions(unsigned long addr, unsigned long size)
268{
269 struct vm_struct *area;
270
271 /* Check whether subsets of this region exist */
272 area = im_get_area(addr, size, IM_REGION_SUPERSET);
273 if (area == NULL)
274 return 1;
275
276 while (area) {
277 iounmap((void __iomem *) area->addr);
278 area = im_get_area(addr, size,
279 IM_REGION_SUPERSET);
280 }
281
282 return 0;
283}
284
285int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
286{
287 struct vm_struct *area;
288 unsigned long addr;
289 int rc;
290
291 addr = (unsigned long __force) start & PAGE_MASK;
292
293 /* Verify that the region either exists or is a subset of an existing
294 * region. In the latter case, split the parent region to create
295 * the exact region
296 */
297 area = im_get_area(addr, size,
298 IM_REGION_EXISTS | IM_REGION_SUBSET);
299 if (area == NULL) {
300 /* Determine whether subset regions exist. If so, unmap */
301 rc = iounmap_subset_regions(addr, size);
302 if (rc) {
303 printk(KERN_ERR
304 "%s() cannot unmap nonexistent range 0x%lx\n",
305 __FUNCTION__, addr);
306 return 1;
307 }
308 } else {
309 iounmap((void __iomem *) area->addr);
310 }
311 /*
312 * FIXME! This can't be right:
313 iounmap(area->addr);
314 * Maybe it should be "iounmap(area);"
315 */
316 return 0;
317}
318
319EXPORT_SYMBOL(ioremap); 228EXPORT_SYMBOL(ioremap);
320EXPORT_SYMBOL(ioremap_flags); 229EXPORT_SYMBOL(ioremap_flags);
321EXPORT_SYMBOL(__ioremap); 230EXPORT_SYMBOL(__ioremap);
322EXPORT_SYMBOL(iounmap); 231EXPORT_SYMBOL(iounmap);
323EXPORT_SYMBOL(__iounmap); 232EXPORT_SYMBOL(__iounmap);
324
325static DEFINE_SPINLOCK(phb_io_lock);
326
327void __iomem * reserve_phb_iospace(unsigned long size)
328{
329 void __iomem *virt_addr;
330
331 if (phbs_io_bot >= IMALLOC_BASE)
332 panic("reserve_phb_iospace(): phb io space overflow\n");
333
334 spin_lock(&phb_io_lock);
335 virt_addr = (void __iomem *) phbs_io_bot;
336 phbs_io_bot += size;
337 spin_unlock(&phb_io_lock);
338
339 return virt_addr;
340}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index ec1421a20aaa..5c45d474cfcc 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -11,7 +11,6 @@
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras 13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 * 14 *
16 * Derived from "arch/i386/mm/init.c" 15 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
@@ -35,12 +34,12 @@
35 34
36#include "mmu_decl.h" 35#include "mmu_decl.h"
37 36
38PTE *Hash, *Hash_end; 37struct hash_pte *Hash, *Hash_end;
39unsigned long Hash_size, Hash_mask; 38unsigned long Hash_size, Hash_mask;
40unsigned long _SDR1; 39unsigned long _SDR1;
41 40
42union ubat { /* BAT register values to be loaded */ 41union ubat { /* BAT register values to be loaded */
43 BAT bat; 42 struct ppc_bat bat;
44 u32 word[2]; 43 u32 word[2];
45} BATS[8][2]; /* 8 pairs of IBAT, DBAT */ 44} BATS[8][2]; /* 8 pairs of IBAT, DBAT */
46 45
@@ -245,7 +244,7 @@ void __init MMU_init_hw(void)
245 cacheable_memzero(Hash, Hash_size); 244 cacheable_memzero(Hash, Hash_size);
246 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 245 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
247 246
248 Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); 247 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
249 248
250 printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n", 249 printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
251 total_memory >> 20, Hash_size >> 10, Hash); 250 total_memory >> 20, Hash_size >> 10, Hash);
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 132c6bc66ce1..28492bbdee8e 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -55,7 +55,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
55 for (entry = 0; entry < 8; entry++, ste++) { 55 for (entry = 0; entry < 8; entry++, ste++) {
56 if (!(ste->esid_data & STE_ESID_V)) { 56 if (!(ste->esid_data & STE_ESID_V)) {
57 ste->vsid_data = vsid_data; 57 ste->vsid_data = vsid_data;
58 asm volatile("eieio":::"memory"); 58 eieio();
59 ste->esid_data = esid_data; 59 ste->esid_data = esid_data;
60 return (global_entry | entry); 60 return (global_entry | entry);
61 } 61 }
@@ -101,7 +101,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
101 asm volatile("sync" : : : "memory"); /* Order update */ 101 asm volatile("sync" : : : "memory"); /* Order update */
102 102
103 castout_ste->vsid_data = vsid_data; 103 castout_ste->vsid_data = vsid_data;
104 asm volatile("eieio" : : : "memory"); /* Order update */ 104 eieio(); /* Order update */
105 castout_ste->esid_data = esid_data; 105 castout_ste->esid_data = esid_data;
106 106
107 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT)); 107 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 6a69417cbc0e..06c7e77e097a 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -11,7 +11,6 @@
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras 13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 * 14 *
16 * Derived from "arch/i386/mm/init.c" 15 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 2bfc4d7e1aa2..cbd34fc813ee 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -8,7 +8,6 @@
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras 10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 * 11 *
13 * Derived from "arch/i386/mm/init.c" 12 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
@@ -239,3 +238,59 @@ void pte_free_finish(void)
239 pte_free_submit(*batchp); 238 pte_free_submit(*batchp);
240 *batchp = NULL; 239 *batchp = NULL;
241} 240}
241
242/**
243 * __flush_hash_table_range - Flush all HPTEs for a given address range
244 * from the hash table (and the TLB). But keeps
245 * the linux PTEs intact.
246 *
247 * @mm : mm_struct of the target address space (generally init_mm)
248 * @start : starting address
249 * @end : ending address (not included in the flush)
250 *
251 * This function is mostly to be used by some IO hotplug code in order
252 * to remove all hash entries from a given address range used to map IO
253 * space on a removed PCI-PCI bidge without tearing down the full mapping
254 * since 64K pages may overlap with other bridges when using 64K pages
255 * with 4K HW pages on IO space.
256 *
257 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
258 * and is implemented for small size rather than speed.
259 */
260#ifdef CONFIG_HOTPLUG
261
262void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
263 unsigned long end)
264{
265 unsigned long flags;
266
267 start = _ALIGN_DOWN(start, PAGE_SIZE);
268 end = _ALIGN_UP(end, PAGE_SIZE);
269
270 BUG_ON(!mm->pgd);
271
272 /* Note: Normally, we should only ever use a batch within a
273 * PTE locked section. This violates the rule, but will work
274 * since we don't actually modify the PTEs, we just flush the
275 * hash while leaving the PTEs intact (including their reference
276 * to being hashed). This is not the most performance oriented
277 * way to do things but is fine for our needs here.
278 */
279 local_irq_save(flags);
280 arch_enter_lazy_mmu_mode();
281 for (; start < end; start += PAGE_SIZE) {
282 pte_t *ptep = find_linux_pte(mm->pgd, start);
283 unsigned long pte;
284
285 if (ptep == NULL)
286 continue;
287 pte = pte_val(*ptep);
288 if (!(pte & _PAGE_HASHPTE))
289 continue;
290 hpte_need_flush(mm, start, ptep, pte, 0);
291 }
292 arch_leave_lazy_mmu_mode();
293 local_irq_restore(flags);
294}
295
296#endif /* CONFIG_HOTPLUG */