aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/parisc/mm
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/parisc/mm')
-rw-r--r--arch/parisc/mm/Makefile5
-rw-r--r--arch/parisc/mm/fault.c271
-rw-r--r--arch/parisc/mm/init.c1019
-rw-r--r--arch/parisc/mm/ioremap.c207
-rw-r--r--arch/parisc/mm/kmap.c166
5 files changed, 1668 insertions, 0 deletions
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile
new file mode 100644
index 000000000000..758ceefb373a
--- /dev/null
+++ b/arch/parisc/mm/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for arch/parisc/mm
3#
4
5obj-y := init.o fault.o ioremap.o
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
new file mode 100644
index 000000000000..eaa701479f5f
--- /dev/null
+++ b/arch/parisc/mm/fault.c
@@ -0,0 +1,271 @@
1/* $Id: fault.c,v 1.5 2000/01/26 16:20:29 jsm Exp $
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 *
8 * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
9 * Copyright 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
10 * Copyright 1999 Hewlett Packard Co.
11 *
12 */
13
14#include <linux/mm.h>
15#include <linux/ptrace.h>
16#include <linux/sched.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19
20#include <asm/uaccess.h>
21#include <asm/traps.h>
22
23#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
24 /* dumped to the console via printk) */
25
26
27/* Defines for parisc_acctyp() */
28#define READ 0
29#define WRITE 1
30
31/* Various important other fields */
32#define bit22set(x) (x & 0x00000200)
33#define bits23_25set(x) (x & 0x000001c0)
34#define isGraphicsFlushRead(x) ((x & 0xfc003fdf) == 0x04001a80)
35 /* extended opcode is 0x6a */
36
37#define BITSSET 0x1c0 /* for identifying LDCW */
38
39
40DEFINE_PER_CPU(struct exception_data, exception_data);
41
42/*
43 * parisc_acctyp(unsigned int inst) --
44 * Given a PA-RISC memory access instruction, determine if the
45 * the instruction would perform a memory read or memory write
46 * operation.
47 *
48 * This function assumes that the given instruction is a memory access
49 * instruction (i.e. you should really only call it if you know that
50 * the instruction has generated some sort of a memory access fault).
51 *
52 * Returns:
53 * VM_READ if read operation
54 * VM_WRITE if write operation
55 * VM_EXEC if execute operation
56 */
57static unsigned long
58parisc_acctyp(unsigned long code, unsigned int inst)
59{
60 if (code == 6 || code == 16)
61 return VM_EXEC;
62
63 switch (inst & 0xf0000000) {
64 case 0x40000000: /* load */
65 case 0x50000000: /* new load */
66 return VM_READ;
67
68 case 0x60000000: /* store */
69 case 0x70000000: /* new store */
70 return VM_WRITE;
71
72 case 0x20000000: /* coproc */
73 case 0x30000000: /* coproc2 */
74 if (bit22set(inst))
75 return VM_WRITE;
76
77 case 0x0: /* indexed/memory management */
78 if (bit22set(inst)) {
79 /*
80 * Check for the 'Graphics Flush Read' instruction.
81 * It resembles an FDC instruction, except for bits
82 * 20 and 21. Any combination other than zero will
83 * utilize the block mover functionality on some
84 * older PA-RISC platforms. The case where a block
85 * move is performed from VM to graphics IO space
86 * should be treated as a READ.
87 *
88 * The significance of bits 20,21 in the FDC
89 * instruction is:
90 *
91 * 00 Flush data cache (normal instruction behavior)
92 * 01 Graphics flush write (IO space -> VM)
93 * 10 Graphics flush read (VM -> IO space)
94 * 11 Graphics flush read/write (VM <-> IO space)
95 */
96 if (isGraphicsFlushRead(inst))
97 return VM_READ;
98 return VM_WRITE;
99 } else {
100 /*
101 * Check for LDCWX and LDCWS (semaphore instructions).
102 * If bits 23 through 25 are all 1's it is one of
103 * the above two instructions and is a write.
104 *
105 * Note: With the limited bits we are looking at,
106 * this will also catch PROBEW and PROBEWI. However,
107 * these should never get in here because they don't
108 * generate exceptions of the type:
109 * Data TLB miss fault/data page fault
110 * Data memory protection trap
111 */
112 if (bits23_25set(inst) == BITSSET)
113 return VM_WRITE;
114 }
115 return VM_READ; /* Default */
116 }
117 return VM_READ; /* Default */
118}
119
120#undef bit22set
121#undef bits23_25set
122#undef isGraphicsFlushRead
123#undef BITSSET
124
125
126#if 0
127/* This is the treewalk to find a vma which is the highest that has
128 * a start < addr. We're using find_vma_prev instead right now, but
129 * we might want to use this at some point in the future. Probably
130 * not, but I want it committed to CVS so I don't lose it :-)
131 */
132 while (tree != vm_avl_empty) {
133 if (tree->vm_start > addr) {
134 tree = tree->vm_avl_left;
135 } else {
136 prev = tree;
137 if (prev->vm_next == NULL)
138 break;
139 if (prev->vm_next->vm_start > addr)
140 break;
141 tree = tree->vm_avl_right;
142 }
143 }
144#endif
145
146void do_page_fault(struct pt_regs *regs, unsigned long code,
147 unsigned long address)
148{
149 struct vm_area_struct *vma, *prev_vma;
150 struct task_struct *tsk = current;
151 struct mm_struct *mm = tsk->mm;
152 const struct exception_table_entry *fix;
153 unsigned long acc_type;
154
155 if (in_interrupt() || !mm)
156 goto no_context;
157
158 down_read(&mm->mmap_sem);
159 vma = find_vma_prev(mm, address, &prev_vma);
160 if (!vma || address < vma->vm_start)
161 goto check_expansion;
162/*
163 * Ok, we have a good vm_area for this memory access. We still need to
164 * check the access permissions.
165 */
166
167good_area:
168
169 acc_type = parisc_acctyp(code,regs->iir);
170
171 if ((vma->vm_flags & acc_type) != acc_type)
172 goto bad_area;
173
174 /*
175 * If for any reason at all we couldn't handle the fault, make
176 * sure we exit gracefully rather than endlessly redo the
177 * fault.
178 */
179
180 switch (handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0)) {
181 case 1:
182 ++current->min_flt;
183 break;
184 case 2:
185 ++current->maj_flt;
186 break;
187 case 0:
188 /*
189 * We ran out of memory, or some other thing happened
190 * to us that made us unable to handle the page fault
191 * gracefully.
192 */
193 goto bad_area;
194 default:
195 goto out_of_memory;
196 }
197 up_read(&mm->mmap_sem);
198 return;
199
200check_expansion:
201 vma = prev_vma;
202 if (vma && (expand_stack(vma, address) == 0))
203 goto good_area;
204
205/*
206 * Something tried to access memory that isn't in our memory map..
207 */
208bad_area:
209 up_read(&mm->mmap_sem);
210
211 if (user_mode(regs)) {
212 struct siginfo si;
213
214#ifdef PRINT_USER_FAULTS
215 printk(KERN_DEBUG "\n");
216 printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
217 tsk->pid, tsk->comm, code, address);
218 if (vma) {
219 printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
220 vma->vm_start, vma->vm_end);
221 }
222 show_regs(regs);
223#endif
224 /* FIXME: actually we need to get the signo and code correct */
225 si.si_signo = SIGSEGV;
226 si.si_errno = 0;
227 si.si_code = SEGV_MAPERR;
228 si.si_addr = (void __user *) address;
229 force_sig_info(SIGSEGV, &si, current);
230 return;
231 }
232
233no_context:
234
235 if (!user_mode(regs)) {
236 fix = search_exception_tables(regs->iaoq[0]);
237
238 if (fix) {
239 struct exception_data *d;
240
241 d = &__get_cpu_var(exception_data);
242 d->fault_ip = regs->iaoq[0];
243 d->fault_space = regs->isr;
244 d->fault_addr = regs->ior;
245
246 regs->iaoq[0] = ((fix->fixup) & ~3);
247
248 /*
249 * NOTE: In some cases the faulting instruction
250 * may be in the delay slot of a branch. We
251 * don't want to take the branch, so we don't
252 * increment iaoq[1], instead we set it to be
253 * iaoq[0]+4, and clear the B bit in the PSW
254 */
255
256 regs->iaoq[1] = regs->iaoq[0] + 4;
257 regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
258
259 return;
260 }
261 }
262
263 parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
264
265 out_of_memory:
266 up_read(&mm->mmap_sem);
267 printk(KERN_CRIT "VM: killing process %s\n", current->comm);
268 if (user_mode(regs))
269 do_exit(SIGKILL);
270 goto no_context;
271}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
new file mode 100644
index 000000000000..cac37589e35c
--- /dev/null
+++ b/arch/parisc/mm/init.c
@@ -0,0 +1,1019 @@
1/*
2 * linux/arch/parisc/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
9 *
10 */
11
12#include <linux/config.h>
13
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/bootmem.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
20#include <linux/initrd.h>
21#include <linux/swap.h>
22#include <linux/unistd.h>
23#include <linux/nodemask.h> /* for node_online_map */
24#include <linux/pagemap.h> /* for release_pages and page_cache_release */
25
26#include <asm/pgalloc.h>
27#include <asm/tlb.h>
28#include <asm/pdc_chassis.h>
29#include <asm/mmzone.h>
30
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32
33extern char _text; /* start of kernel code, defined by linker */
34extern int data_start;
35extern char _end; /* end of BSS, defined by linker */
36extern char __init_begin, __init_end;
37
38#ifdef CONFIG_DISCONTIGMEM
39struct node_map_data node_data[MAX_NUMNODES];
40bootmem_data_t bmem_data[MAX_NUMNODES];
41unsigned char pfnnid_map[PFNNID_MAP_MAX];
42#endif
43
44static struct resource data_resource = {
45 .name = "Kernel data",
46 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
47};
48
49static struct resource code_resource = {
50 .name = "Kernel code",
51 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
52};
53
54static struct resource pdcdata_resource = {
55 .name = "PDC data (Page Zero)",
56 .start = 0,
57 .end = 0x9ff,
58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
59};
60
61static struct resource sysram_resources[MAX_PHYSMEM_RANGES];
62
63/* The following array is initialized from the firmware specific
64 * information retrieved in kernel/inventory.c.
65 */
66
67physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES];
68int npmem_ranges;
69
70#ifdef __LP64__
71#define MAX_MEM (~0UL)
72#else /* !__LP64__ */
73#define MAX_MEM (3584U*1024U*1024U)
74#endif /* !__LP64__ */
75
76static unsigned long mem_limit = MAX_MEM;
77
78static void __init mem_limit_func(void)
79{
80 char *cp, *end;
81 unsigned long limit;
82 extern char saved_command_line[];
83
84 /* We need this before __setup() functions are called */
85
86 limit = MAX_MEM;
87 for (cp = saved_command_line; *cp; ) {
88 if (memcmp(cp, "mem=", 4) == 0) {
89 cp += 4;
90 limit = memparse(cp, &end);
91 if (end != cp)
92 break;
93 cp = end;
94 } else {
95 while (*cp != ' ' && *cp)
96 ++cp;
97 while (*cp == ' ')
98 ++cp;
99 }
100 }
101
102 if (limit < mem_limit)
103 mem_limit = limit;
104}
105
106#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
107
108static void __init setup_bootmem(void)
109{
110 unsigned long bootmap_size;
111 unsigned long mem_max;
112 unsigned long bootmap_pages;
113 unsigned long bootmap_start_pfn;
114 unsigned long bootmap_pfn;
115#ifndef CONFIG_DISCONTIGMEM
116 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
117 int npmem_holes;
118#endif
119 int i, sysram_resource_count;
120
121 disable_sr_hashing(); /* Turn off space register hashing */
122
123 /*
124 * Sort the ranges. Since the number of ranges is typically
125 * small, and performance is not an issue here, just do
126 * a simple insertion sort.
127 */
128
129 for (i = 1; i < npmem_ranges; i++) {
130 int j;
131
132 for (j = i; j > 0; j--) {
133 unsigned long tmp;
134
135 if (pmem_ranges[j-1].start_pfn <
136 pmem_ranges[j].start_pfn) {
137
138 break;
139 }
140 tmp = pmem_ranges[j-1].start_pfn;
141 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
142 pmem_ranges[j].start_pfn = tmp;
143 tmp = pmem_ranges[j-1].pages;
144 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
145 pmem_ranges[j].pages = tmp;
146 }
147 }
148
149#ifndef CONFIG_DISCONTIGMEM
150 /*
151 * Throw out ranges that are too far apart (controlled by
152 * MAX_GAP).
153 */
154
155 for (i = 1; i < npmem_ranges; i++) {
156 if (pmem_ranges[i].start_pfn -
157 (pmem_ranges[i-1].start_pfn +
158 pmem_ranges[i-1].pages) > MAX_GAP) {
159 npmem_ranges = i;
160 printk("Large gap in memory detected (%ld pages). "
161 "Consider turning on CONFIG_DISCONTIGMEM\n",
162 pmem_ranges[i].start_pfn -
163 (pmem_ranges[i-1].start_pfn +
164 pmem_ranges[i-1].pages));
165 break;
166 }
167 }
168#endif
169
170 if (npmem_ranges > 1) {
171
172 /* Print the memory ranges */
173
174 printk(KERN_INFO "Memory Ranges:\n");
175
176 for (i = 0; i < npmem_ranges; i++) {
177 unsigned long start;
178 unsigned long size;
179
180 size = (pmem_ranges[i].pages << PAGE_SHIFT);
181 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
182 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
183 i,start, start + (size - 1), size >> 20);
184 }
185 }
186
187 sysram_resource_count = npmem_ranges;
188 for (i = 0; i < sysram_resource_count; i++) {
189 struct resource *res = &sysram_resources[i];
190 res->name = "System RAM";
191 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
192 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
193 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
194 request_resource(&iomem_resource, res);
195 }
196
197 /*
198 * For 32 bit kernels we limit the amount of memory we can
199 * support, in order to preserve enough kernel address space
200 * for other purposes. For 64 bit kernels we don't normally
201 * limit the memory, but this mechanism can be used to
202 * artificially limit the amount of memory (and it is written
203 * to work with multiple memory ranges).
204 */
205
206 mem_limit_func(); /* check for "mem=" argument */
207
208 mem_max = 0;
209 num_physpages = 0;
210 for (i = 0; i < npmem_ranges; i++) {
211 unsigned long rsize;
212
213 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
214 if ((mem_max + rsize) > mem_limit) {
215 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
216 if (mem_max == mem_limit)
217 npmem_ranges = i;
218 else {
219 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
220 - (mem_max >> PAGE_SHIFT);
221 npmem_ranges = i + 1;
222 mem_max = mem_limit;
223 }
224 num_physpages += pmem_ranges[i].pages;
225 break;
226 }
227 num_physpages += pmem_ranges[i].pages;
228 mem_max += rsize;
229 }
230
231 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
232
233#ifndef CONFIG_DISCONTIGMEM
234 /* Merge the ranges, keeping track of the holes */
235
236 {
237 unsigned long end_pfn;
238 unsigned long hole_pages;
239
240 npmem_holes = 0;
241 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
242 for (i = 1; i < npmem_ranges; i++) {
243
244 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
245 if (hole_pages) {
246 pmem_holes[npmem_holes].start_pfn = end_pfn;
247 pmem_holes[npmem_holes++].pages = hole_pages;
248 end_pfn += hole_pages;
249 }
250 end_pfn += pmem_ranges[i].pages;
251 }
252
253 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
254 npmem_ranges = 1;
255 }
256#endif
257
258 bootmap_pages = 0;
259 for (i = 0; i < npmem_ranges; i++)
260 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
261
262 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
263
264#ifdef CONFIG_DISCONTIGMEM
265 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
266 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
267 NODE_DATA(i)->bdata = &bmem_data[i];
268 }
269 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
270
271 for (i = 0; i < npmem_ranges; i++)
272 node_set_online(i);
273#endif
274
275 /*
276 * Initialize and free the full range of memory in each range.
277 * Note that the only writing these routines do are to the bootmap,
278 * and we've made sure to locate the bootmap properly so that they
279 * won't be writing over anything important.
280 */
281
282 bootmap_pfn = bootmap_start_pfn;
283 max_pfn = 0;
284 for (i = 0; i < npmem_ranges; i++) {
285 unsigned long start_pfn;
286 unsigned long npages;
287
288 start_pfn = pmem_ranges[i].start_pfn;
289 npages = pmem_ranges[i].pages;
290
291 bootmap_size = init_bootmem_node(NODE_DATA(i),
292 bootmap_pfn,
293 start_pfn,
294 (start_pfn + npages) );
295 free_bootmem_node(NODE_DATA(i),
296 (start_pfn << PAGE_SHIFT),
297 (npages << PAGE_SHIFT) );
298 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
299 if ((start_pfn + npages) > max_pfn)
300 max_pfn = start_pfn + npages;
301 }
302
303 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
304 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
305 BUG();
306 }
307
308 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
309
310#define PDC_CONSOLE_IO_IODC_SIZE 32768
311
312 reserve_bootmem_node(NODE_DATA(0), 0UL,
313 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
314 reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text),
315 (unsigned long)(&_end - &_text));
316 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
317 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
318
319#ifndef CONFIG_DISCONTIGMEM
320
321 /* reserve the holes */
322
323 for (i = 0; i < npmem_holes; i++) {
324 reserve_bootmem_node(NODE_DATA(0),
325 (pmem_holes[i].start_pfn << PAGE_SHIFT),
326 (pmem_holes[i].pages << PAGE_SHIFT));
327 }
328#endif
329
330#ifdef CONFIG_BLK_DEV_INITRD
331 if (initrd_start) {
332 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
333 if (__pa(initrd_start) < mem_max) {
334 unsigned long initrd_reserve;
335
336 if (__pa(initrd_end) > mem_max) {
337 initrd_reserve = mem_max - __pa(initrd_start);
338 } else {
339 initrd_reserve = initrd_end - initrd_start;
340 }
341 initrd_below_start_ok = 1;
342 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
343
344 reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
345 }
346 }
347#endif
348
349 data_resource.start = virt_to_phys(&data_start);
350 data_resource.end = virt_to_phys(&_end)-1;
351 code_resource.start = virt_to_phys(&_text);
352 code_resource.end = virt_to_phys(&data_start)-1;
353
354 /* We don't know which region the kernel will be in, so try
355 * all of them.
356 */
357 for (i = 0; i < sysram_resource_count; i++) {
358 struct resource *res = &sysram_resources[i];
359 request_resource(res, &code_resource);
360 request_resource(res, &data_resource);
361 }
362 request_resource(&sysram_resources[0], &pdcdata_resource);
363}
364
365void free_initmem(void)
366{
367 /* FIXME: */
368#if 0
369 printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n",
370 (&__init_end - &__init_begin) >> 10);
371 return;
372#else
373 unsigned long addr;
374
375 printk(KERN_INFO "Freeing unused kernel memory: ");
376
377#if 1
378 /* Attempt to catch anyone trying to execute code here
379 * by filling the page with BRK insns.
380 *
381 * If we disable interrupts for all CPUs, then IPI stops working.
382 * Kinda breaks the global cache flushing.
383 */
384 local_irq_disable();
385
386 memset(&__init_begin, 0x00,
387 (unsigned long)&__init_end - (unsigned long)&__init_begin);
388
389 flush_data_cache();
390 asm volatile("sync" : : );
391 flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end);
392 asm volatile("sync" : : );
393
394 local_irq_enable();
395#endif
396
397 addr = (unsigned long)(&__init_begin);
398 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
399 ClearPageReserved(virt_to_page(addr));
400 set_page_count(virt_to_page(addr), 1);
401 free_page(addr);
402 num_physpages++;
403 totalram_pages++;
404 }
405
406 /* set up a new led state on systems shipped LED State panel */
407 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
408
409 printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
410#endif
411}
412
413/*
414 * Just an arbitrary offset to serve as a "hole" between mapping areas
415 * (between top of physical memory and a potential pcxl dma mapping
416 * area, and below the vmalloc mapping area).
417 *
418 * The current 32K value just means that there will be a 32K "hole"
419 * between mapping areas. That means that any out-of-bounds memory
420 * accesses will hopefully be caught. The vmalloc() routines leaves
421 * a hole of 4kB between each vmalloced area for the same reason.
422 */
423
424 /* Leave room for gateway page expansion */
425#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
426#error KERNEL_MAP_START is in gateway reserved region
427#endif
428#define MAP_START (KERNEL_MAP_START)
429
430#define VM_MAP_OFFSET (32*1024)
431#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
432 & ~(VM_MAP_OFFSET-1)))
433
434void *vmalloc_start;
435EXPORT_SYMBOL(vmalloc_start);
436
437#ifdef CONFIG_PA11
438unsigned long pcxl_dma_start;
439#endif
440
441void __init mem_init(void)
442{
443 high_memory = __va((max_pfn << PAGE_SHIFT));
444
445#ifndef CONFIG_DISCONTIGMEM
446 max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
447 totalram_pages += free_all_bootmem();
448#else
449 {
450 int i;
451
452 for (i = 0; i < npmem_ranges; i++)
453 totalram_pages += free_all_bootmem_node(NODE_DATA(i));
454 }
455#endif
456
457 printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
458
459#ifdef CONFIG_PA11
460 if (hppa_dma_ops == &pcxl_dma_ops) {
461 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
462 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
463 } else {
464 pcxl_dma_start = 0;
465 vmalloc_start = SET_MAP_OFFSET(MAP_START);
466 }
467#else
468 vmalloc_start = SET_MAP_OFFSET(MAP_START);
469#endif
470
471}
472
473int do_check_pgt_cache(int low, int high)
474{
475 return 0;
476}
477
478unsigned long *empty_zero_page;
479
480void show_mem(void)
481{
482 int i,free = 0,total = 0,reserved = 0;
483 int shared = 0, cached = 0;
484
485 printk(KERN_INFO "Mem-info:\n");
486 show_free_areas();
487 printk(KERN_INFO "Free swap: %6ldkB\n",
488 nr_swap_pages<<(PAGE_SHIFT-10));
489#ifndef CONFIG_DISCONTIGMEM
490 i = max_mapnr;
491 while (i-- > 0) {
492 total++;
493 if (PageReserved(mem_map+i))
494 reserved++;
495 else if (PageSwapCache(mem_map+i))
496 cached++;
497 else if (!page_count(&mem_map[i]))
498 free++;
499 else
500 shared += page_count(&mem_map[i]) - 1;
501 }
502#else
503 for (i = 0; i < npmem_ranges; i++) {
504 int j;
505
506 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
507 struct page *p;
508
509 p = node_mem_map(i) + j - node_start_pfn(i);
510
511 total++;
512 if (PageReserved(p))
513 reserved++;
514 else if (PageSwapCache(p))
515 cached++;
516 else if (!page_count(p))
517 free++;
518 else
519 shared += page_count(p) - 1;
520 }
521 }
522#endif
523 printk(KERN_INFO "%d pages of RAM\n", total);
524 printk(KERN_INFO "%d reserved pages\n", reserved);
525 printk(KERN_INFO "%d pages shared\n", shared);
526 printk(KERN_INFO "%d pages swap cached\n", cached);
527
528
529#ifdef CONFIG_DISCONTIGMEM
530 {
531 struct zonelist *zl;
532 int i, j, k;
533
534 for (i = 0; i < npmem_ranges; i++) {
535 for (j = 0; j < MAX_NR_ZONES; j++) {
536 zl = NODE_DATA(i)->node_zonelists + j;
537
538 printk("Zone list for zone %d on node %d: ", j, i);
539 for (k = 0; zl->zones[k] != NULL; k++)
540 printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name);
541 printk("\n");
542 }
543 }
544 }
545#endif
546}
547
548
549static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
550{
551 pgd_t *pg_dir;
552 pmd_t *pmd;
553 pte_t *pg_table;
554 unsigned long end_paddr;
555 unsigned long start_pmd;
556 unsigned long start_pte;
557 unsigned long tmp1;
558 unsigned long tmp2;
559 unsigned long address;
560 unsigned long ro_start;
561 unsigned long ro_end;
562 unsigned long fv_addr;
563 unsigned long gw_addr;
564 extern const unsigned long fault_vector_20;
565 extern void * const linux_gateway_page;
566
567 ro_start = __pa((unsigned long)&_text);
568 ro_end = __pa((unsigned long)&data_start);
569 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
570 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
571
572 end_paddr = start_paddr + size;
573
574 pg_dir = pgd_offset_k(start_vaddr);
575
576#if PTRS_PER_PMD == 1
577 start_pmd = 0;
578#else
579 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
580#endif
581 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
582
583 address = start_paddr;
584 while (address < end_paddr) {
585#if PTRS_PER_PMD == 1
586 pmd = (pmd_t *)__pa(pg_dir);
587#else
588 pmd = (pmd_t *)pgd_address(*pg_dir);
589
590 /*
591 * pmd is physical at this point
592 */
593
594 if (!pmd) {
595 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
596 pmd = (pmd_t *) __pa(pmd);
597 }
598
599 pgd_populate(NULL, pg_dir, __va(pmd));
600#endif
601 pg_dir++;
602
603 /* now change pmd to kernel virtual addresses */
604
605 pmd = (pmd_t *)__va(pmd) + start_pmd;
606 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
607
608 /*
609 * pg_table is physical at this point
610 */
611
612 pg_table = (pte_t *)pmd_address(*pmd);
613 if (!pg_table) {
614 pg_table = (pte_t *)
615 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
616 pg_table = (pte_t *) __pa(pg_table);
617 }
618
619 pmd_populate_kernel(NULL, pmd, __va(pg_table));
620
621 /* now change pg_table to kernel virtual addresses */
622
623 pg_table = (pte_t *) __va(pg_table) + start_pte;
624 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
625 pte_t pte;
626
627 /*
628 * Map the fault vector writable so we can
629 * write the HPMC checksum.
630 */
631 if (address >= ro_start && address < ro_end
632 && address != fv_addr
633 && address != gw_addr)
634 pte = __mk_pte(address, PAGE_KERNEL_RO);
635 else
636 pte = __mk_pte(address, pgprot);
637
638 if (address >= end_paddr)
639 pte_val(pte) = 0;
640
641 set_pte(pg_table, pte);
642
643 address += PAGE_SIZE;
644 }
645 start_pte = 0;
646
647 if (address >= end_paddr)
648 break;
649 }
650 start_pmd = 0;
651 }
652}
653
654/*
655 * pagetable_init() sets up the page tables
656 *
657 * Note that gateway_init() places the Linux gateway page at page 0.
658 * Since gateway pages cannot be dereferenced this has the desirable
659 * side effect of trapping those pesky NULL-reference errors in the
660 * kernel.
661 */
662static void __init pagetable_init(void)
663{
664 int range;
665
666 /* Map each physical memory range to its kernel vaddr */
667
668 for (range = 0; range < npmem_ranges; range++) {
669 unsigned long start_paddr;
670 unsigned long end_paddr;
671 unsigned long size;
672
673 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
674 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
675 size = pmem_ranges[range].pages << PAGE_SHIFT;
676
677 map_pages((unsigned long)__va(start_paddr), start_paddr,
678 size, PAGE_KERNEL);
679 }
680
681#ifdef CONFIG_BLK_DEV_INITRD
682 if (initrd_end && initrd_end > mem_limit) {
683 printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
684 map_pages(initrd_start, __pa(initrd_start),
685 initrd_end - initrd_start, PAGE_KERNEL);
686 }
687#endif
688
689 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
690 memset(empty_zero_page, 0, PAGE_SIZE);
691}
692
693static void __init gateway_init(void)
694{
695 unsigned long linux_gateway_page_addr;
696 /* FIXME: This is 'const' in order to trick the compiler
697 into not treating it as DP-relative data. */
698 extern void * const linux_gateway_page;
699
700 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
701
702 /*
703 * Setup Linux Gateway page.
704 *
705 * The Linux gateway page will reside in kernel space (on virtual
706 * page 0), so it doesn't need to be aliased into user space.
707 */
708
709 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
710 PAGE_SIZE, PAGE_GATEWAY);
711}
712
713#ifdef CONFIG_HPUX
714void
715map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
716{
717 pgd_t *pg_dir;
718 pmd_t *pmd;
719 pte_t *pg_table;
720 unsigned long start_pmd;
721 unsigned long start_pte;
722 unsigned long address;
723 unsigned long hpux_gw_page_addr;
724 /* FIXME: This is 'const' in order to trick the compiler
725 into not treating it as DP-relative data. */
726 extern void * const hpux_gateway_page;
727
728 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
729
730 /*
731 * Setup HP-UX Gateway page.
732 *
733 * The HP-UX gateway page resides in the user address space,
734 * so it needs to be aliased into each process.
735 */
736
737 pg_dir = pgd_offset(mm,hpux_gw_page_addr);
738
739#if PTRS_PER_PMD == 1
740 start_pmd = 0;
741#else
742 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
743#endif
744 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
745
746 address = __pa(&hpux_gateway_page);
747#if PTRS_PER_PMD == 1
748 pmd = (pmd_t *)__pa(pg_dir);
749#else
750 pmd = (pmd_t *) pgd_address(*pg_dir);
751
752 /*
753 * pmd is physical at this point
754 */
755
756 if (!pmd) {
757 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
758 pmd = (pmd_t *) __pa(pmd);
759 }
760
761 __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
762#endif
763 /* now change pmd to kernel virtual addresses */
764
765 pmd = (pmd_t *)__va(pmd) + start_pmd;
766
767 /*
768 * pg_table is physical at this point
769 */
770
771 pg_table = (pte_t *) pmd_address(*pmd);
772 if (!pg_table)
773 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
774
775 __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
776
777 /* now change pg_table to kernel virtual addresses */
778
779 pg_table = (pte_t *) __va(pg_table) + start_pte;
780 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
781}
782EXPORT_SYMBOL(map_hpux_gateway_page);
783#endif
784
785extern void flush_tlb_all_local(void);
786
787void __init paging_init(void)
788{
789 int i;
790
791 setup_bootmem();
792 pagetable_init();
793 gateway_init();
794 flush_cache_all_local(); /* start with known state */
795 flush_tlb_all_local();
796
797 for (i = 0; i < npmem_ranges; i++) {
798 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
799
800 /* We have an IOMMU, so all memory can go into a single
801 ZONE_DMA zone. */
802 zones_size[ZONE_DMA] = pmem_ranges[i].pages;
803
804#ifdef CONFIG_DISCONTIGMEM
805 /* Need to initialize the pfnnid_map before we can initialize
806 the zone */
807 {
808 int j;
809 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
810 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
811 j++) {
812 pfnnid_map[j] = i;
813 }
814 }
815#endif
816
817 free_area_init_node(i, NODE_DATA(i), zones_size,
818 pmem_ranges[i].start_pfn, NULL);
819 }
820}
821
822#ifdef CONFIG_PA20
823
824/*
825 * Currently, all PA20 chips have 18 bit protection id's, which is the
826 * limiting factor (space ids are 32 bits).
827 */
828
829#define NR_SPACE_IDS 262144
830
831#else
832
833/*
834 * Currently we have a one-to-one relationship between space id's and
835 * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
836 * support 15 bit protection id's, so that is the limiting factor.
837 * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
838 * probably not worth the effort for a special case here.
839 */
840
841#define NR_SPACE_IDS 32768
842
843#endif /* !CONFIG_PA20 */
844
845#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
846#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
847
848static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
849static unsigned long dirty_space_id[SID_ARRAY_SIZE];
850static unsigned long space_id_index;
851static unsigned long free_space_ids = NR_SPACE_IDS - 1;
852static unsigned long dirty_space_ids = 0;
853
854static DEFINE_SPINLOCK(sid_lock);
855
856unsigned long alloc_sid(void)
857{
858 unsigned long index;
859
860 spin_lock(&sid_lock);
861
862 if (free_space_ids == 0) {
863 if (dirty_space_ids != 0) {
864 spin_unlock(&sid_lock);
865 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
866 spin_lock(&sid_lock);
867 }
868 if (free_space_ids == 0)
869 BUG();
870 }
871
872 free_space_ids--;
873
874 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
875 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
876 space_id_index = index;
877
878 spin_unlock(&sid_lock);
879
880 return index << SPACEID_SHIFT;
881}
882
883void free_sid(unsigned long spaceid)
884{
885 unsigned long index = spaceid >> SPACEID_SHIFT;
886 unsigned long *dirty_space_offset;
887
888 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
889 index &= (BITS_PER_LONG - 1);
890
891 spin_lock(&sid_lock);
892
893 if (*dirty_space_offset & (1L << index))
894 BUG(); /* attempt to free space id twice */
895
896 *dirty_space_offset |= (1L << index);
897 dirty_space_ids++;
898
899 spin_unlock(&sid_lock);
900}
901
902
903#ifdef CONFIG_SMP
904static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
905{
906 int i;
907
908 /* NOTE: sid_lock must be held upon entry */
909
910 *ndirtyptr = dirty_space_ids;
911 if (dirty_space_ids != 0) {
912 for (i = 0; i < SID_ARRAY_SIZE; i++) {
913 dirty_array[i] = dirty_space_id[i];
914 dirty_space_id[i] = 0;
915 }
916 dirty_space_ids = 0;
917 }
918
919 return;
920}
921
922static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
923{
924 int i;
925
926 /* NOTE: sid_lock must be held upon entry */
927
928 if (ndirty != 0) {
929 for (i = 0; i < SID_ARRAY_SIZE; i++) {
930 space_id[i] ^= dirty_array[i];
931 }
932
933 free_space_ids += ndirty;
934 space_id_index = 0;
935 }
936}
937
938#else /* CONFIG_SMP */
939
940static void recycle_sids(void)
941{
942 int i;
943
944 /* NOTE: sid_lock must be held upon entry */
945
946 if (dirty_space_ids != 0) {
947 for (i = 0; i < SID_ARRAY_SIZE; i++) {
948 space_id[i] ^= dirty_space_id[i];
949 dirty_space_id[i] = 0;
950 }
951
952 free_space_ids += dirty_space_ids;
953 dirty_space_ids = 0;
954 space_id_index = 0;
955 }
956}
957#endif
958
959/*
960 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
961 * purged, we can safely reuse the space ids that were released but
962 * not flushed from the tlb.
963 */
964
965#ifdef CONFIG_SMP
966
967static unsigned long recycle_ndirty;
968static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
969static unsigned int recycle_inuse = 0;
970
971void flush_tlb_all(void)
972{
973 int do_recycle;
974
975 do_recycle = 0;
976 spin_lock(&sid_lock);
977 if (dirty_space_ids > RECYCLE_THRESHOLD) {
978 if (recycle_inuse) {
979 BUG(); /* FIXME: Use a semaphore/wait queue here */
980 }
981 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
982 recycle_inuse++;
983 do_recycle++;
984 }
985 spin_unlock(&sid_lock);
986 on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
987 if (do_recycle) {
988 spin_lock(&sid_lock);
989 recycle_sids(recycle_ndirty,recycle_dirty_array);
990 recycle_inuse = 0;
991 spin_unlock(&sid_lock);
992 }
993}
994#else
995void flush_tlb_all(void)
996{
997 spin_lock(&sid_lock);
998 flush_tlb_all_local();
999 recycle_sids();
1000 spin_unlock(&sid_lock);
1001}
1002#endif
1003
1004#ifdef CONFIG_BLK_DEV_INITRD
1005void free_initrd_mem(unsigned long start, unsigned long end)
1006{
1007#if 0
1008 if (start < end)
1009 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1010 for (; start < end; start += PAGE_SIZE) {
1011 ClearPageReserved(virt_to_page(start));
1012 set_page_count(virt_to_page(start), 1);
1013 free_page(start);
1014 num_physpages++;
1015 totalram_pages++;
1016 }
1017#endif
1018}
1019#endif
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
new file mode 100644
index 000000000000..f2df502cdae3
--- /dev/null
+++ b/arch/parisc/mm/ioremap.c
@@ -0,0 +1,207 @@
1/*
2 * arch/parisc/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 * (C) Copyright 2001 Helge Deller <deller@gmx.de>
10 */
11
12#include <linux/vmalloc.h>
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <asm/io.h>
16#include <asm/pgalloc.h>
17
18static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
19 unsigned long phys_addr, unsigned long flags)
20{
21 unsigned long end;
22
23 address &= ~PMD_MASK;
24 end = address + size;
25 if (end > PMD_SIZE)
26 end = PMD_SIZE;
27 if (address >= end)
28 BUG();
29 do {
30 if (!pte_none(*pte)) {
31 printk(KERN_ERR "remap_area_pte: page already exists\n");
32 BUG();
33 }
34 set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
35 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
36 address += PAGE_SIZE;
37 phys_addr += PAGE_SIZE;
38 pte++;
39 } while (address && (address < end));
40}
41
42static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
43 unsigned long phys_addr, unsigned long flags)
44{
45 unsigned long end;
46
47 address &= ~PGDIR_MASK;
48 end = address + size;
49 if (end > PGDIR_SIZE)
50 end = PGDIR_SIZE;
51 phys_addr -= address;
52 if (address >= end)
53 BUG();
54 do {
55 pte_t * pte = pte_alloc_kernel(NULL, pmd, address);
56 if (!pte)
57 return -ENOMEM;
58 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
59 address = (address + PMD_SIZE) & PMD_MASK;
60 pmd++;
61 } while (address && (address < end));
62 return 0;
63}
64
65#if (USE_HPPA_IOREMAP)
66static int remap_area_pages(unsigned long address, unsigned long phys_addr,
67 unsigned long size, unsigned long flags)
68{
69 int error;
70 pgd_t * dir;
71 unsigned long end = address + size;
72
73 phys_addr -= address;
74 dir = pgd_offset(&init_mm, address);
75 flush_cache_all();
76 if (address >= end)
77 BUG();
78 spin_lock(&init_mm.page_table_lock);
79 do {
80 pmd_t *pmd;
81 pmd = pmd_alloc(dir, address);
82 error = -ENOMEM;
83 if (!pmd)
84 break;
85 if (remap_area_pmd(pmd, address, end - address,
86 phys_addr + address, flags))
87 break;
88 error = 0;
89 address = (address + PGDIR_SIZE) & PGDIR_MASK;
90 dir++;
91 } while (address && (address < end));
92 spin_unlock(&init_mm.page_table_lock);
93 flush_tlb_all();
94 return error;
95}
96#endif /* USE_HPPA_IOREMAP */
97
98#ifdef CONFIG_DEBUG_IOREMAP
99static unsigned long last = 0;
100
101void gsc_bad_addr(unsigned long addr)
102{
103 if (time_after(jiffies, last + HZ*10)) {
104 printk("gsc_foo() called with bad address 0x%lx\n", addr);
105 dump_stack();
106 last = jiffies;
107 }
108}
109EXPORT_SYMBOL(gsc_bad_addr);
110
111void __raw_bad_addr(const volatile void __iomem *addr)
112{
113 if (time_after(jiffies, last + HZ*10)) {
114 printk("__raw_foo() called with bad address 0x%p\n", addr);
115 dump_stack();
116 last = jiffies;
117 }
118}
119EXPORT_SYMBOL(__raw_bad_addr);
120#endif
121
122/*
123 * Generic mapping function (not visible outside):
124 */
125
126/*
127 * Remap an arbitrary physical address space into the kernel virtual
128 * address space. Needed when the kernel wants to access high addresses
129 * directly.
130 *
131 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
132 * have to convert them into an offset in a page-aligned mapping, but the
133 * caller shouldn't need to know that small detail.
134 */
135void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
136{
137#if !(USE_HPPA_IOREMAP)
138
139 unsigned long end = phys_addr + size - 1;
140 /* Support EISA addresses */
141 if ((phys_addr >= 0x00080000 && end < 0x000fffff)
142 || (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
143 phys_addr |= 0xfc000000;
144 }
145
146#ifdef CONFIG_DEBUG_IOREMAP
147 return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
148#else
149 return (void __iomem *)phys_addr;
150#endif
151
152#else
153 void * addr;
154 struct vm_struct * area;
155 unsigned long offset, last_addr;
156
157 /* Don't allow wraparound or zero size */
158 last_addr = phys_addr + size - 1;
159 if (!size || last_addr < phys_addr)
160 return NULL;
161
162 /*
163 * Don't allow anybody to remap normal RAM that we're using..
164 */
165 if (phys_addr < virt_to_phys(high_memory)) {
166 char *t_addr, *t_end;
167 struct page *page;
168
169 t_addr = __va(phys_addr);
170 t_end = t_addr + (size - 1);
171
172 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
173 if(!PageReserved(page))
174 return NULL;
175 }
176
177 /*
178 * Mappings have to be page-aligned
179 */
180 offset = phys_addr & ~PAGE_MASK;
181 phys_addr &= PAGE_MASK;
182 size = PAGE_ALIGN(last_addr) - phys_addr;
183
184 /*
185 * Ok, go for it..
186 */
187 area = get_vm_area(size, VM_IOREMAP);
188 if (!area)
189 return NULL;
190 addr = area->addr;
191 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
192 vfree(addr);
193 return NULL;
194 }
195 return (void __iomem *) (offset + (char *)addr);
196#endif
197}
198
199void iounmap(void __iomem *addr)
200{
201#if !(USE_HPPA_IOREMAP)
202 return;
203#else
204 if (addr > high_memory)
205 return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
206#endif
207}
diff --git a/arch/parisc/mm/kmap.c b/arch/parisc/mm/kmap.c
new file mode 100644
index 000000000000..1b1acd5e2f6e
--- /dev/null
+++ b/arch/parisc/mm/kmap.c
@@ -0,0 +1,166 @@
1/*
2 * kmap/page table map and unmap support routines
3 *
4 * Copyright 1999,2000 Hewlett-Packard Company
5 * Copyright 2000 John Marvin <jsm at hp.com>
6 * Copyright 2000 Grant Grundler <grundler at parisc-linux.org>
7 * Copyright 2000 Philipp Rumpf <prumpf@tux.org>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24/*
25** Stolen mostly from arch/parisc/kernel/pci-dma.c
26*/
27
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/string.h>
31#include <linux/pci.h>
32
33#include <linux/slab.h>
34#include <linux/vmalloc.h>
35
36#include <asm/uaccess.h>
37#include <asm/pgalloc.h>
38
39#include <asm/io.h>
40#include <asm/page.h> /* get_order */
41
42#undef flush_cache_all
43#define flush_cache_all flush_all_caches
44
45typedef void (*pte_iterator_t) (pte_t * pte, unsigned long arg);
46
47#if 0
48/* XXX This routine could be used with iterate_page() to replace
49 * unmap_uncached_page() and save a little code space but I didn't
50 * do that since I'm not certain whether this is the right path. -PB
51 */
52static void unmap_cached_pte(pte_t * pte, unsigned long addr, unsigned long arg)
53{
54 pte_t page = *pte;
55 pte_clear(&init_mm, addr, pte);
56 if (!pte_none(page)) {
57 if (pte_present(page)) {
58 unsigned long map_nr = pte_pagenr(page);
59 if (map_nr < max_mapnr)
60 __free_page(mem_map + map_nr);
61 } else {
62 printk(KERN_CRIT
63 "Whee.. Swapped out page in kernel page table\n");
64 }
65 }
66}
67#endif
68
69/* These two routines should probably check a few things... */
70static void set_uncached(pte_t * pte, unsigned long arg)
71{
72 pte_val(*pte) |= _PAGE_NO_CACHE;
73}
74
75static void set_cached(pte_t * pte, unsigned long arg)
76{
77 pte_val(*pte) &= ~_PAGE_NO_CACHE;
78}
79
80static inline void iterate_pte(pmd_t * pmd, unsigned long address,
81 unsigned long size, pte_iterator_t op,
82 unsigned long arg)
83{
84 pte_t *pte;
85 unsigned long end;
86
87 if (pmd_none(*pmd))
88 return;
89 if (pmd_bad(*pmd)) {
90 pmd_ERROR(*pmd);
91 pmd_clear(pmd);
92 return;
93 }
94 pte = pte_offset(pmd, address);
95 address &= ~PMD_MASK;
96 end = address + size;
97 if (end > PMD_SIZE)
98 end = PMD_SIZE;
99 do {
100 op(pte, arg);
101 address += PAGE_SIZE;
102 pte++;
103 } while (address < end);
104}
105
106static inline void iterate_pmd(pgd_t * dir, unsigned long address,
107 unsigned long size, pte_iterator_t op,
108 unsigned long arg)
109{
110 pmd_t *pmd;
111 unsigned long end;
112
113 if (pgd_none(*dir))
114 return;
115 if (pgd_bad(*dir)) {
116 pgd_ERROR(*dir);
117 pgd_clear(dir);
118 return;
119 }
120 pmd = pmd_offset(dir, address);
121 address &= ~PGDIR_MASK;
122 end = address + size;
123 if (end > PGDIR_SIZE)
124 end = PGDIR_SIZE;
125 do {
126 iterate_pte(pmd, address, end - address, op, arg);
127 address = (address + PMD_SIZE) & PMD_MASK;
128 pmd++;
129 } while (address < end);
130}
131
132static void iterate_pages(unsigned long address, unsigned long size,
133 pte_iterator_t op, unsigned long arg)
134{
135 pgd_t *dir;
136 unsigned long end = address + size;
137
138 dir = pgd_offset_k(address);
139 flush_cache_all();
140 do {
141 iterate_pmd(dir, address, end - address, op, arg);
142 address = (address + PGDIR_SIZE) & PGDIR_MASK;
143 dir++;
144 } while (address && (address < end));
145 flush_tlb_all();
146}
147
148void
149kernel_set_cachemode(unsigned long vaddr, unsigned long size, int what)
150{
151 switch (what) {
152 case IOMAP_FULL_CACHING:
153 iterate_pages(vaddr, size, set_cached, 0);
154 flush_tlb_range(NULL, vaddr, size);
155 break;
156 case IOMAP_NOCACHE_SER:
157 iterate_pages(vaddr, size, set_uncached, 0);
158 flush_tlb_range(NULL, vaddr, size);
159 break;
160 default:
161 printk(KERN_CRIT
162 "kernel_set_cachemode mode %d not understood\n",
163 what);
164 break;
165 }
166}