diff options
Diffstat (limited to 'arch/avr32/mm')
-rw-r--r-- | arch/avr32/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/avr32/mm/cache.c | 150 | ||||
-rw-r--r-- | arch/avr32/mm/clear_page.S | 25 | ||||
-rw-r--r-- | arch/avr32/mm/copy_page.S | 28 | ||||
-rw-r--r-- | arch/avr32/mm/dma-coherent.c | 139 | ||||
-rw-r--r-- | arch/avr32/mm/fault.c | 315 | ||||
-rw-r--r-- | arch/avr32/mm/init.c | 480 | ||||
-rw-r--r-- | arch/avr32/mm/ioremap.c | 197 | ||||
-rw-r--r-- | arch/avr32/mm/tlb.c | 378 |
9 files changed, 1718 insertions, 0 deletions
diff --git a/arch/avr32/mm/Makefile b/arch/avr32/mm/Makefile new file mode 100644 index 000000000000..0066491f90d4 --- /dev/null +++ b/arch/avr32/mm/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for the Linux/AVR32 kernel. | ||
3 | # | ||
4 | |||
5 | obj-y += init.o clear_page.o copy_page.o dma-coherent.o | ||
6 | obj-y += ioremap.o cache.o fault.o tlb.o | ||
diff --git a/arch/avr32/mm/cache.c b/arch/avr32/mm/cache.c new file mode 100644 index 000000000000..450515b245a0 --- /dev/null +++ b/arch/avr32/mm/cache.c | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2006 Atmel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/highmem.h> | ||
10 | #include <linux/unistd.h> | ||
11 | |||
12 | #include <asm/cacheflush.h> | ||
13 | #include <asm/cachectl.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/uaccess.h> | ||
16 | |||
17 | /* | ||
18 | * If you attempt to flush anything more than this, you need superuser | ||
19 | * privileges. The value is completely arbitrary. | ||
20 | */ | ||
21 | #define CACHEFLUSH_MAX_LEN 1024 | ||
22 | |||
23 | void invalidate_dcache_region(void *start, size_t size) | ||
24 | { | ||
25 | unsigned long v, begin, end, linesz; | ||
26 | |||
27 | linesz = boot_cpu_data.dcache.linesz; | ||
28 | |||
29 | //printk("invalidate dcache: %p + %u\n", start, size); | ||
30 | |||
31 | /* You asked for it, you got it */ | ||
32 | begin = (unsigned long)start & ~(linesz - 1); | ||
33 | end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); | ||
34 | |||
35 | for (v = begin; v < end; v += linesz) | ||
36 | invalidate_dcache_line((void *)v); | ||
37 | } | ||
38 | |||
39 | void clean_dcache_region(void *start, size_t size) | ||
40 | { | ||
41 | unsigned long v, begin, end, linesz; | ||
42 | |||
43 | linesz = boot_cpu_data.dcache.linesz; | ||
44 | begin = (unsigned long)start & ~(linesz - 1); | ||
45 | end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); | ||
46 | |||
47 | for (v = begin; v < end; v += linesz) | ||
48 | clean_dcache_line((void *)v); | ||
49 | flush_write_buffer(); | ||
50 | } | ||
51 | |||
52 | void flush_dcache_region(void *start, size_t size) | ||
53 | { | ||
54 | unsigned long v, begin, end, linesz; | ||
55 | |||
56 | linesz = boot_cpu_data.dcache.linesz; | ||
57 | begin = (unsigned long)start & ~(linesz - 1); | ||
58 | end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); | ||
59 | |||
60 | for (v = begin; v < end; v += linesz) | ||
61 | flush_dcache_line((void *)v); | ||
62 | flush_write_buffer(); | ||
63 | } | ||
64 | |||
65 | void invalidate_icache_region(void *start, size_t size) | ||
66 | { | ||
67 | unsigned long v, begin, end, linesz; | ||
68 | |||
69 | linesz = boot_cpu_data.icache.linesz; | ||
70 | begin = (unsigned long)start & ~(linesz - 1); | ||
71 | end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); | ||
72 | |||
73 | for (v = begin; v < end; v += linesz) | ||
74 | invalidate_icache_line((void *)v); | ||
75 | } | ||
76 | |||
77 | static inline void __flush_icache_range(unsigned long start, unsigned long end) | ||
78 | { | ||
79 | unsigned long v, linesz; | ||
80 | |||
81 | linesz = boot_cpu_data.dcache.linesz; | ||
82 | for (v = start; v < end; v += linesz) { | ||
83 | clean_dcache_line((void *)v); | ||
84 | invalidate_icache_line((void *)v); | ||
85 | } | ||
86 | |||
87 | flush_write_buffer(); | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * This one is called after a module has been loaded. | ||
92 | */ | ||
93 | void flush_icache_range(unsigned long start, unsigned long end) | ||
94 | { | ||
95 | unsigned long linesz; | ||
96 | |||
97 | linesz = boot_cpu_data.dcache.linesz; | ||
98 | __flush_icache_range(start & ~(linesz - 1), | ||
99 | (end + linesz - 1) & ~(linesz - 1)); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * This one is called from do_no_page(), do_swap_page() and install_page(). | ||
104 | */ | ||
105 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
106 | { | ||
107 | if (vma->vm_flags & VM_EXEC) { | ||
108 | void *v = kmap(page); | ||
109 | __flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE); | ||
110 | kunmap(v); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * This one is used by copy_to_user_page() | ||
116 | */ | ||
117 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
118 | unsigned long addr, int len) | ||
119 | { | ||
120 | if (vma->vm_flags & VM_EXEC) | ||
121 | flush_icache_range(addr, addr + len); | ||
122 | } | ||
123 | |||
124 | asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) | ||
125 | { | ||
126 | int ret; | ||
127 | |||
128 | if (len > CACHEFLUSH_MAX_LEN) { | ||
129 | ret = -EPERM; | ||
130 | if (!capable(CAP_SYS_ADMIN)) | ||
131 | goto out; | ||
132 | } | ||
133 | |||
134 | ret = -EFAULT; | ||
135 | if (!access_ok(VERIFY_WRITE, addr, len)) | ||
136 | goto out; | ||
137 | |||
138 | switch (operation) { | ||
139 | case CACHE_IFLUSH: | ||
140 | flush_icache_range((unsigned long)addr, | ||
141 | (unsigned long)addr + len); | ||
142 | ret = 0; | ||
143 | break; | ||
144 | default: | ||
145 | ret = -EINVAL; | ||
146 | } | ||
147 | |||
148 | out: | ||
149 | return ret; | ||
150 | } | ||
diff --git a/arch/avr32/mm/clear_page.S b/arch/avr32/mm/clear_page.S new file mode 100644 index 000000000000..5d70dca00699 --- /dev/null +++ b/arch/avr32/mm/clear_page.S | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2006 Atmel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | #include <asm/page.h> | ||
11 | |||
12 | /* | ||
13 | * clear_page | ||
14 | * r12: P1 address (to) | ||
15 | */ | ||
16 | .text | ||
17 | .global clear_page | ||
18 | clear_page: | ||
19 | sub r9, r12, -PAGE_SIZE | ||
20 | mov r10, 0 | ||
21 | mov r11, 0 | ||
22 | 0: st.d r12++, r10 | ||
23 | cp r12, r9 | ||
24 | brne 0b | ||
25 | mov pc, lr | ||
diff --git a/arch/avr32/mm/copy_page.S b/arch/avr32/mm/copy_page.S new file mode 100644 index 000000000000..c2b3752946b8 --- /dev/null +++ b/arch/avr32/mm/copy_page.S | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2006 Atmel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/linkage.h> | ||
9 | #include <asm/page.h> | ||
10 | |||
11 | /* | ||
12 | * copy_page | ||
13 | * | ||
14 | * r12 to (P1 address) | ||
15 | * r11 from (P1 address) | ||
16 | * r8-r10 scratch | ||
17 | */ | ||
18 | .text | ||
19 | .global copy_page | ||
20 | copy_page: | ||
21 | sub r10, r11, -(1 << PAGE_SHIFT) | ||
22 | /* pref r11[0] */ | ||
23 | 1: /* pref r11[8] */ | ||
24 | ld.d r8, r11++ | ||
25 | st.d r12++, r8 | ||
26 | cp r11, r10 | ||
27 | brlo 1b | ||
28 | mov pc, lr | ||
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c new file mode 100644 index 000000000000..44ab8a7bdae2 --- /dev/null +++ b/arch/avr32/mm/dma-coherent.c | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2006 Atmel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/dma-mapping.h> | ||
10 | |||
11 | #include <asm/addrspace.h> | ||
12 | #include <asm/cacheflush.h> | ||
13 | |||
14 | void dma_cache_sync(void *vaddr, size_t size, int direction) | ||
15 | { | ||
16 | /* | ||
17 | * No need to sync an uncached area | ||
18 | */ | ||
19 | if (PXSEG(vaddr) == P2SEG) | ||
20 | return; | ||
21 | |||
22 | switch (direction) { | ||
23 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
24 | dma_cache_inv(vaddr, size); | ||
25 | break; | ||
26 | case DMA_TO_DEVICE: /* writeback only */ | ||
27 | dma_cache_wback(vaddr, size); | ||
28 | break; | ||
29 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
30 | dma_cache_wback_inv(vaddr, size); | ||
31 | break; | ||
32 | default: | ||
33 | BUG(); | ||
34 | } | ||
35 | } | ||
36 | EXPORT_SYMBOL(dma_cache_sync); | ||
37 | |||
38 | static struct page *__dma_alloc(struct device *dev, size_t size, | ||
39 | dma_addr_t *handle, gfp_t gfp) | ||
40 | { | ||
41 | struct page *page, *free, *end; | ||
42 | int order; | ||
43 | |||
44 | size = PAGE_ALIGN(size); | ||
45 | order = get_order(size); | ||
46 | |||
47 | page = alloc_pages(gfp, order); | ||
48 | if (!page) | ||
49 | return NULL; | ||
50 | split_page(page, order); | ||
51 | |||
52 | /* | ||
53 | * When accessing physical memory with valid cache data, we | ||
54 | * get a cache hit even if the virtual memory region is marked | ||
55 | * as uncached. | ||
56 | * | ||
57 | * Since the memory is newly allocated, there is no point in | ||
58 | * doing a writeback. If the previous owner cares, he should | ||
59 | * have flushed the cache before releasing the memory. | ||
60 | */ | ||
61 | invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size); | ||
62 | |||
63 | *handle = page_to_bus(page); | ||
64 | free = page + (size >> PAGE_SHIFT); | ||
65 | end = page + (1 << order); | ||
66 | |||
67 | /* | ||
68 | * Free any unused pages | ||
69 | */ | ||
70 | while (free < end) { | ||
71 | __free_page(free); | ||
72 | free++; | ||
73 | } | ||
74 | |||
75 | return page; | ||
76 | } | ||
77 | |||
78 | static void __dma_free(struct device *dev, size_t size, | ||
79 | struct page *page, dma_addr_t handle) | ||
80 | { | ||
81 | struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT); | ||
82 | |||
83 | while (page < end) | ||
84 | __free_page(page++); | ||
85 | } | ||
86 | |||
87 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
88 | dma_addr_t *handle, gfp_t gfp) | ||
89 | { | ||
90 | struct page *page; | ||
91 | void *ret = NULL; | ||
92 | |||
93 | page = __dma_alloc(dev, size, handle, gfp); | ||
94 | if (page) | ||
95 | ret = phys_to_uncached(page_to_phys(page)); | ||
96 | |||
97 | return ret; | ||
98 | } | ||
99 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
100 | |||
101 | void dma_free_coherent(struct device *dev, size_t size, | ||
102 | void *cpu_addr, dma_addr_t handle) | ||
103 | { | ||
104 | void *addr = phys_to_cached(uncached_to_phys(cpu_addr)); | ||
105 | struct page *page; | ||
106 | |||
107 | pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n", | ||
108 | cpu_addr, (unsigned long)handle, (unsigned)size); | ||
109 | BUG_ON(!virt_addr_valid(addr)); | ||
110 | page = virt_to_page(addr); | ||
111 | __dma_free(dev, size, page, handle); | ||
112 | } | ||
113 | EXPORT_SYMBOL(dma_free_coherent); | ||
114 | |||
115 | #if 0 | ||
116 | void *dma_alloc_writecombine(struct device *dev, size_t size, | ||
117 | dma_addr_t *handle, gfp_t gfp) | ||
118 | { | ||
119 | struct page *page; | ||
120 | |||
121 | page = __dma_alloc(dev, size, handle, gfp); | ||
122 | |||
123 | /* Now, map the page into P3 with write-combining turned on */ | ||
124 | return __ioremap(page_to_phys(page), size, _PAGE_BUFFER); | ||
125 | } | ||
126 | EXPORT_SYMBOL(dma_alloc_writecombine); | ||
127 | |||
128 | void dma_free_writecombine(struct device *dev, size_t size, | ||
129 | void *cpu_addr, dma_addr_t handle) | ||
130 | { | ||
131 | struct page *page; | ||
132 | |||
133 | iounmap(cpu_addr); | ||
134 | |||
135 | page = bus_to_page(handle); | ||
136 | __dma_free(dev, size, page, handle); | ||
137 | } | ||
138 | EXPORT_SYMBOL(dma_free_writecombine); | ||
139 | #endif | ||
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c new file mode 100644 index 000000000000..678557260a35 --- /dev/null +++ b/arch/avr32/mm/fault.c | |||
@@ -0,0 +1,315 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2006 Atmel Corporation | ||
3 | * | ||
4 | * Based on linux/arch/sh/mm/fault.c: | ||
5 | * Copyright (C) 1999 Niibe Yutaka | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/mm.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/pagemap.h> | ||
15 | |||
16 | #include <asm/kdebug.h> | ||
17 | #include <asm/mmu_context.h> | ||
18 | #include <asm/sysreg.h> | ||
19 | #include <asm/uaccess.h> | ||
20 | #include <asm/tlb.h> | ||
21 | |||
22 | #ifdef DEBUG | ||
23 | static void dump_code(unsigned long pc) | ||
24 | { | ||
25 | char *p = (char *)pc; | ||
26 | char val; | ||
27 | int i; | ||
28 | |||
29 | |||
30 | printk(KERN_DEBUG "Code:"); | ||
31 | for (i = 0; i < 16; i++) { | ||
32 | if (__get_user(val, p + i)) | ||
33 | break; | ||
34 | printk(" %02x", val); | ||
35 | } | ||
36 | printk("\n"); | ||
37 | } | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_KPROBES | ||
41 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | ||
42 | |||
43 | /* Hook to register for page fault notifications */ | ||
44 | int register_page_fault_notifier(struct notifier_block *nb) | ||
45 | { | ||
46 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | ||
47 | } | ||
48 | |||
49 | int unregister_page_fault_notifier(struct notifier_block *nb) | ||
50 | { | ||
51 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | ||
52 | } | ||
53 | |||
54 | static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, | ||
55 | int trap, int sig) | ||
56 | { | ||
57 | struct die_args args = { | ||
58 | .regs = regs, | ||
59 | .trapnr = trap, | ||
60 | }; | ||
61 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); | ||
62 | } | ||
63 | #else | ||
64 | static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, | ||
65 | int trap, int sig) | ||
66 | { | ||
67 | return NOTIFY_DONE; | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | /* | ||
72 | * This routine handles page faults. It determines the address and the | ||
73 | * problem, and then passes it off to one of the appropriate routines. | ||
74 | * | ||
75 | * ecr is the Exception Cause Register. Possible values are: | ||
76 | * 5: Page not found (instruction access) | ||
77 | * 6: Protection fault (instruction access) | ||
78 | * 12: Page not found (read access) | ||
79 | * 13: Page not found (write access) | ||
80 | * 14: Protection fault (read access) | ||
81 | * 15: Protection fault (write access) | ||
82 | */ | ||
83 | asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) | ||
84 | { | ||
85 | struct task_struct *tsk; | ||
86 | struct mm_struct *mm; | ||
87 | struct vm_area_struct *vma; | ||
88 | const struct exception_table_entry *fixup; | ||
89 | unsigned long address; | ||
90 | unsigned long page; | ||
91 | int writeaccess = 0; | ||
92 | |||
93 | if (notify_page_fault(DIE_PAGE_FAULT, regs, | ||
94 | ecr, SIGSEGV) == NOTIFY_STOP) | ||
95 | return; | ||
96 | |||
97 | address = sysreg_read(TLBEAR); | ||
98 | |||
99 | tsk = current; | ||
100 | mm = tsk->mm; | ||
101 | |||
102 | /* | ||
103 | * If we're in an interrupt or have no user context, we must | ||
104 | * not take the fault... | ||
105 | */ | ||
106 | if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) | ||
107 | goto no_context; | ||
108 | |||
109 | local_irq_enable(); | ||
110 | |||
111 | down_read(&mm->mmap_sem); | ||
112 | |||
113 | vma = find_vma(mm, address); | ||
114 | if (!vma) | ||
115 | goto bad_area; | ||
116 | if (vma->vm_start <= address) | ||
117 | goto good_area; | ||
118 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
119 | goto bad_area; | ||
120 | if (expand_stack(vma, address)) | ||
121 | goto bad_area; | ||
122 | |||
123 | /* | ||
124 | * Ok, we have a good vm_area for this memory access, so we | ||
125 | * can handle it... | ||
126 | */ | ||
127 | good_area: | ||
128 | //pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags); | ||
129 | switch (ecr) { | ||
130 | case ECR_PROTECTION_X: | ||
131 | case ECR_TLB_MISS_X: | ||
132 | if (!(vma->vm_flags & VM_EXEC)) | ||
133 | goto bad_area; | ||
134 | break; | ||
135 | case ECR_PROTECTION_R: | ||
136 | case ECR_TLB_MISS_R: | ||
137 | if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) | ||
138 | goto bad_area; | ||
139 | break; | ||
140 | case ECR_PROTECTION_W: | ||
141 | case ECR_TLB_MISS_W: | ||
142 | if (!(vma->vm_flags & VM_WRITE)) | ||
143 | goto bad_area; | ||
144 | writeaccess = 1; | ||
145 | break; | ||
146 | default: | ||
147 | panic("Unhandled case %lu in do_page_fault!", ecr); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * If for any reason at all we couldn't handle the fault, make | ||
152 | * sure we exit gracefully rather than endlessly redo the | ||
153 | * fault. | ||
154 | */ | ||
155 | survive: | ||
156 | switch (handle_mm_fault(mm, vma, address, writeaccess)) { | ||
157 | case VM_FAULT_MINOR: | ||
158 | tsk->min_flt++; | ||
159 | break; | ||
160 | case VM_FAULT_MAJOR: | ||
161 | tsk->maj_flt++; | ||
162 | break; | ||
163 | case VM_FAULT_SIGBUS: | ||
164 | goto do_sigbus; | ||
165 | case VM_FAULT_OOM: | ||
166 | goto out_of_memory; | ||
167 | default: | ||
168 | BUG(); | ||
169 | } | ||
170 | |||
171 | up_read(&mm->mmap_sem); | ||
172 | return; | ||
173 | |||
174 | /* | ||
175 | * Something tried to access memory that isn't in our memory | ||
176 | * map. Fix it, but check if it's kernel or user first... | ||
177 | */ | ||
178 | bad_area: | ||
179 | pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n", | ||
180 | tsk->comm, tsk->pid, address, ecr); | ||
181 | |||
182 | up_read(&mm->mmap_sem); | ||
183 | |||
184 | if (user_mode(regs)) { | ||
185 | /* Hmm...we have to pass address and ecr somehow... */ | ||
186 | /* tsk->thread.address = address; | ||
187 | tsk->thread.error_code = ecr; */ | ||
188 | #ifdef DEBUG | ||
189 | show_regs(regs); | ||
190 | dump_code(regs->pc); | ||
191 | |||
192 | page = sysreg_read(PTBR); | ||
193 | printk("ptbr = %08lx", page); | ||
194 | if (page) { | ||
195 | page = ((unsigned long *)page)[address >> 22]; | ||
196 | printk(" pgd = %08lx", page); | ||
197 | if (page & _PAGE_PRESENT) { | ||
198 | page &= PAGE_MASK; | ||
199 | address &= 0x003ff000; | ||
200 | page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; | ||
201 | printk(" pte = %08lx\n", page); | ||
202 | } | ||
203 | } | ||
204 | #endif | ||
205 | pr_debug("Sending SIGSEGV to PID %d...\n", | ||
206 | tsk->pid); | ||
207 | force_sig(SIGSEGV, tsk); | ||
208 | return; | ||
209 | } | ||
210 | |||
211 | no_context: | ||
212 | pr_debug("No context\n"); | ||
213 | |||
214 | /* Are we prepared to handle this kernel fault? */ | ||
215 | fixup = search_exception_tables(regs->pc); | ||
216 | if (fixup) { | ||
217 | regs->pc = fixup->fixup; | ||
218 | pr_debug("Found fixup at %08lx\n", fixup->fixup); | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Oops. The kernel tried to access some bad page. We'll have | ||
224 | * to terminate things with extreme prejudice. | ||
225 | */ | ||
226 | if (address < PAGE_SIZE) | ||
227 | printk(KERN_ALERT | ||
228 | "Unable to handle kernel NULL pointer dereference"); | ||
229 | else | ||
230 | printk(KERN_ALERT | ||
231 | "Unable to handle kernel paging request"); | ||
232 | printk(" at virtual address %08lx\n", address); | ||
233 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); | ||
234 | |||
235 | page = sysreg_read(PTBR); | ||
236 | printk(KERN_ALERT "ptbr = %08lx", page); | ||
237 | if (page) { | ||
238 | page = ((unsigned long *)page)[address >> 22]; | ||
239 | printk(" pgd = %08lx", page); | ||
240 | if (page & _PAGE_PRESENT) { | ||
241 | page &= PAGE_MASK; | ||
242 | address &= 0x003ff000; | ||
243 | page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; | ||
244 | printk(" pte = %08lx\n", page); | ||
245 | } | ||
246 | } | ||
247 | die("\nOops", regs, ecr); | ||
248 | do_exit(SIGKILL); | ||
249 | |||
250 | /* | ||
251 | * We ran out of memory, or some other thing happened to us | ||
252 | * that made us unable to handle the page fault gracefully. | ||
253 | */ | ||
254 | out_of_memory: | ||
255 | printk("Out of memory\n"); | ||
256 | up_read(&mm->mmap_sem); | ||
257 | if (current->pid == 1) { | ||
258 | yield(); | ||
259 | down_read(&mm->mmap_sem); | ||
260 | goto survive; | ||
261 | } | ||
262 | printk("VM: Killing process %s\n", tsk->comm); | ||
263 | if (user_mode(regs)) | ||
264 | do_exit(SIGKILL); | ||
265 | goto no_context; | ||
266 | |||
267 | do_sigbus: | ||
268 | up_read(&mm->mmap_sem); | ||
269 | |||
270 | /* | ||
271 | * Send a sigbus, regardless of whether we were in kernel or | ||
272 | * user mode. | ||
273 | */ | ||
274 | /* address, error_code, trap_no, ... */ | ||
275 | #ifdef DEBUG | ||
276 | show_regs(regs); | ||
277 | dump_code(regs->pc); | ||
278 | #endif | ||
279 | pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid); | ||
280 | force_sig(SIGBUS, tsk); | ||
281 | |||
282 | /* Kernel mode? Handle exceptions or die */ | ||
283 | if (!user_mode(regs)) | ||
284 | goto no_context; | ||
285 | } | ||
286 | |||
287 | asmlinkage void do_bus_error(unsigned long addr, int write_access, | ||
288 | struct pt_regs *regs) | ||
289 | { | ||
290 | printk(KERN_ALERT | ||
291 | "Bus error at physical address 0x%08lx (%s access)\n", | ||
292 | addr, write_access ? "write" : "read"); | ||
293 | printk(KERN_INFO "DTLB dump:\n"); | ||
294 | dump_dtlb(); | ||
295 | die("Bus Error", regs, write_access); | ||
296 | do_exit(SIGKILL); | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * This functionality is currently not possible to implement because | ||
301 | * we're using segmentation to ensure a fixed mapping of the kernel | ||
302 | * virtual address space. | ||
303 | * | ||
304 | * It would be possible to implement this, but it would require us to | ||
305 | * disable segmentation at startup and load the kernel mappings into | ||
306 | * the TLB like any other pages. There will be lots of trickery to | ||
307 | * avoid recursive invocation of the TLB miss handler, though... | ||
308 | */ | ||
309 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
310 | void kernel_map_pages(struct page *page, int numpages, int enable) | ||
311 | { | ||
312 | |||
313 | } | ||
314 | EXPORT_SYMBOL(kernel_map_pages); | ||
315 | #endif | ||
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c new file mode 100644 index 000000000000..3e6c41039808 --- /dev/null +++ b/arch/avr32/mm/init.c | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2006 Atmel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/swap.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/initrd.h> | ||
14 | #include <linux/mmzone.h> | ||
15 | #include <linux/bootmem.h> | ||
16 | #include <linux/pagemap.h> | ||
17 | #include <linux/pfn.h> | ||
18 | #include <linux/nodemask.h> | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | #include <asm/tlb.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/dma.h> | ||
25 | #include <asm/setup.h> | ||
26 | #include <asm/sections.h> | ||
27 | |||
28 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
29 | |||
30 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
31 | |||
32 | struct page *empty_zero_page; | ||
33 | |||
34 | /* | ||
35 | * Cache of MMU context last used. | ||
36 | */ | ||
37 | unsigned long mmu_context_cache = NO_CONTEXT; | ||
38 | |||
39 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) | ||
40 | #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) | ||
41 | |||
42 | void show_mem(void) | ||
43 | { | ||
44 | int total = 0, reserved = 0, cached = 0; | ||
45 | int slab = 0, free = 0, shared = 0; | ||
46 | pg_data_t *pgdat; | ||
47 | |||
48 | printk("Mem-info:\n"); | ||
49 | show_free_areas(); | ||
50 | |||
51 | for_each_online_pgdat(pgdat) { | ||
52 | struct page *page, *end; | ||
53 | |||
54 | page = pgdat->node_mem_map; | ||
55 | end = page + pgdat->node_spanned_pages; | ||
56 | |||
57 | do { | ||
58 | total++; | ||
59 | if (PageReserved(page)) | ||
60 | reserved++; | ||
61 | else if (PageSwapCache(page)) | ||
62 | cached++; | ||
63 | else if (PageSlab(page)) | ||
64 | slab++; | ||
65 | else if (!page_count(page)) | ||
66 | free++; | ||
67 | else | ||
68 | shared += page_count(page) - 1; | ||
69 | page++; | ||
70 | } while (page < end); | ||
71 | } | ||
72 | |||
73 | printk ("%d pages of RAM\n", total); | ||
74 | printk ("%d free pages\n", free); | ||
75 | printk ("%d reserved pages\n", reserved); | ||
76 | printk ("%d slab pages\n", slab); | ||
77 | printk ("%d pages shared\n", shared); | ||
78 | printk ("%d pages swap cached\n", cached); | ||
79 | } | ||
80 | |||
81 | static void __init print_memory_map(const char *what, | ||
82 | struct tag_mem_range *mem) | ||
83 | { | ||
84 | printk ("%s:\n", what); | ||
85 | for (; mem; mem = mem->next) { | ||
86 | printk (" %08lx - %08lx\n", | ||
87 | (unsigned long)mem->addr, | ||
88 | (unsigned long)(mem->addr + mem->size)); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | #define MAX_LOWMEM HIGHMEM_START | ||
93 | #define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) | ||
94 | |||
95 | /* | ||
96 | * Sort a list of memory regions in-place by ascending address. | ||
97 | * | ||
98 | * We're using bubble sort because we only have singly linked lists | ||
99 | * with few elements. | ||
100 | */ | ||
101 | static void __init sort_mem_list(struct tag_mem_range **pmem) | ||
102 | { | ||
103 | int done; | ||
104 | struct tag_mem_range **a, **b; | ||
105 | |||
106 | if (!*pmem) | ||
107 | return; | ||
108 | |||
109 | do { | ||
110 | done = 1; | ||
111 | a = pmem, b = &(*pmem)->next; | ||
112 | while (*b) { | ||
113 | if ((*a)->addr > (*b)->addr) { | ||
114 | struct tag_mem_range *tmp; | ||
115 | tmp = (*b)->next; | ||
116 | (*b)->next = *a; | ||
117 | *a = *b; | ||
118 | *b = tmp; | ||
119 | done = 0; | ||
120 | } | ||
121 | a = &(*a)->next; | ||
122 | b = &(*a)->next; | ||
123 | } | ||
124 | } while (!done); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Find a free memory region large enough for storing the | ||
129 | * bootmem bitmap. | ||
130 | */ | ||
131 | static unsigned long __init | ||
132 | find_bootmap_pfn(const struct tag_mem_range *mem) | ||
133 | { | ||
134 | unsigned long bootmap_pages, bootmap_len; | ||
135 | unsigned long node_pages = PFN_UP(mem->size); | ||
136 | unsigned long bootmap_addr = mem->addr; | ||
137 | struct tag_mem_range *reserved = mem_reserved; | ||
138 | struct tag_mem_range *ramdisk = mem_ramdisk; | ||
139 | unsigned long kern_start = virt_to_phys(_stext); | ||
140 | unsigned long kern_end = virt_to_phys(_end); | ||
141 | |||
142 | bootmap_pages = bootmem_bootmap_pages(node_pages); | ||
143 | bootmap_len = bootmap_pages << PAGE_SHIFT; | ||
144 | |||
145 | /* | ||
146 | * Find a large enough region without reserved pages for | ||
147 | * storing the bootmem bitmap. We can take advantage of the | ||
148 | * fact that all lists have been sorted. | ||
149 | * | ||
150 | * We have to check explicitly reserved regions as well as the | ||
151 | * kernel image and any RAMDISK images... | ||
152 | * | ||
153 | * Oh, and we have to make sure we don't overwrite the taglist | ||
154 | * since we're going to use it until the bootmem allocator is | ||
155 | * fully up and running. | ||
156 | */ | ||
157 | while (1) { | ||
158 | if ((bootmap_addr < kern_end) && | ||
159 | ((bootmap_addr + bootmap_len) > kern_start)) | ||
160 | bootmap_addr = kern_end; | ||
161 | |||
162 | while (reserved && | ||
163 | (bootmap_addr >= (reserved->addr + reserved->size))) | ||
164 | reserved = reserved->next; | ||
165 | |||
166 | if (reserved && | ||
167 | ((bootmap_addr + bootmap_len) >= reserved->addr)) { | ||
168 | bootmap_addr = reserved->addr + reserved->size; | ||
169 | continue; | ||
170 | } | ||
171 | |||
172 | while (ramdisk && | ||
173 | (bootmap_addr >= (ramdisk->addr + ramdisk->size))) | ||
174 | ramdisk = ramdisk->next; | ||
175 | |||
176 | if (!ramdisk || | ||
177 | ((bootmap_addr + bootmap_len) < ramdisk->addr)) | ||
178 | break; | ||
179 | |||
180 | bootmap_addr = ramdisk->addr + ramdisk->size; | ||
181 | } | ||
182 | |||
183 | if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size)) | ||
184 | return ~0UL; | ||
185 | |||
186 | return PFN_UP(bootmap_addr); | ||
187 | } | ||
188 | |||
189 | void __init setup_bootmem(void) | ||
190 | { | ||
191 | unsigned bootmap_size; | ||
192 | unsigned long first_pfn, bootmap_pfn, pages; | ||
193 | unsigned long max_pfn, max_low_pfn; | ||
194 | unsigned long kern_start = virt_to_phys(_stext); | ||
195 | unsigned long kern_end = virt_to_phys(_end); | ||
196 | unsigned node = 0; | ||
197 | struct tag_mem_range *bank, *res; | ||
198 | |||
199 | sort_mem_list(&mem_phys); | ||
200 | sort_mem_list(&mem_reserved); | ||
201 | |||
202 | print_memory_map("Physical memory", mem_phys); | ||
203 | print_memory_map("Reserved memory", mem_reserved); | ||
204 | |||
205 | nodes_clear(node_online_map); | ||
206 | |||
207 | if (mem_ramdisk) { | ||
208 | #ifdef CONFIG_BLK_DEV_INITRD | ||
209 | initrd_start = __va(mem_ramdisk->addr); | ||
210 | initrd_end = initrd_start + mem_ramdisk->size; | ||
211 | |||
212 | print_memory_map("RAMDISK images", mem_ramdisk); | ||
213 | if (mem_ramdisk->next) | ||
214 | printk(KERN_WARNING | ||
215 | "Warning: Only the first RAMDISK image " | ||
216 | "will be used\n"); | ||
217 | sort_mem_list(&mem_ramdisk); | ||
218 | #else | ||
219 | printk(KERN_WARNING "RAM disk image present, but " | ||
220 | "no initrd support in kernel!\n"); | ||
221 | #endif | ||
222 | } | ||
223 | |||
224 | if (mem_phys->next) | ||
225 | printk(KERN_WARNING "Only using first memory bank\n"); | ||
226 | |||
227 | for (bank = mem_phys; bank; bank = NULL) { | ||
228 | first_pfn = PFN_UP(bank->addr); | ||
229 | max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size); | ||
230 | bootmap_pfn = find_bootmap_pfn(bank); | ||
231 | if (bootmap_pfn > max_pfn) | ||
232 | panic("No space for bootmem bitmap!\n"); | ||
233 | |||
234 | if (max_low_pfn > MAX_LOWMEM_PFN) { | ||
235 | max_low_pfn = MAX_LOWMEM_PFN; | ||
236 | #ifndef CONFIG_HIGHMEM | ||
237 | /* | ||
238 | * Lowmem is memory that can be addressed | ||
239 | * directly through P1/P2 | ||
240 | */ | ||
241 | printk(KERN_WARNING | ||
242 | "Node %u: Only %ld MiB of memory will be used.\n", | ||
243 | node, MAX_LOWMEM >> 20); | ||
244 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); | ||
245 | #else | ||
246 | #error HIGHMEM is not supported by AVR32 yet | ||
247 | #endif | ||
248 | } | ||
249 | |||
250 | /* Initialize the boot-time allocator with low memory only. */ | ||
251 | bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, | ||
252 | first_pfn, max_low_pfn); | ||
253 | |||
254 | printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n", | ||
255 | node, NODE_DATA(node)->bdata, | ||
256 | NODE_DATA(node)->bdata->node_bootmem_map); | ||
257 | |||
258 | /* | ||
259 | * Register fully available RAM pages with the bootmem | ||
260 | * allocator. | ||
261 | */ | ||
262 | pages = max_low_pfn - first_pfn; | ||
263 | free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), | ||
264 | PFN_PHYS(pages)); | ||
265 | |||
266 | /* | ||
267 | * Reserve space for the kernel image (if present in | ||
268 | * this node)... | ||
269 | */ | ||
270 | if ((kern_start >= PFN_PHYS(first_pfn)) && | ||
271 | (kern_start < PFN_PHYS(max_pfn))) { | ||
272 | printk("Node %u: Kernel image %08lx - %08lx\n", | ||
273 | node, kern_start, kern_end); | ||
274 | reserve_bootmem_node(NODE_DATA(node), kern_start, | ||
275 | kern_end - kern_start); | ||
276 | } | ||
277 | |||
278 | /* ...the bootmem bitmap... */ | ||
279 | reserve_bootmem_node(NODE_DATA(node), | ||
280 | PFN_PHYS(bootmap_pfn), | ||
281 | bootmap_size); | ||
282 | |||
283 | /* ...any RAMDISK images... */ | ||
284 | for (res = mem_ramdisk; res; res = res->next) { | ||
285 | if (res->addr > PFN_PHYS(max_pfn)) | ||
286 | break; | ||
287 | |||
288 | if (res->addr >= PFN_PHYS(first_pfn)) { | ||
289 | printk("Node %u: RAMDISK %08lx - %08lx\n", | ||
290 | node, | ||
291 | (unsigned long)res->addr, | ||
292 | (unsigned long)(res->addr + res->size)); | ||
293 | reserve_bootmem_node(NODE_DATA(node), | ||
294 | res->addr, res->size); | ||
295 | } | ||
296 | } | ||
297 | |||
298 | /* ...and any other reserved regions. */ | ||
299 | for (res = mem_reserved; res; res = res->next) { | ||
300 | if (res->addr > PFN_PHYS(max_pfn)) | ||
301 | break; | ||
302 | |||
303 | if (res->addr >= PFN_PHYS(first_pfn)) { | ||
304 | printk("Node %u: Reserved %08lx - %08lx\n", | ||
305 | node, | ||
306 | (unsigned long)res->addr, | ||
307 | (unsigned long)(res->addr + res->size)); | ||
308 | reserve_bootmem_node(NODE_DATA(node), | ||
309 | res->addr, res->size); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | node_set_online(node); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * paging_init() sets up the page tables | ||
319 | * | ||
320 | * This routine also unmaps the page at virtual kernel address 0, so | ||
321 | * that we can trap those pesky NULL-reference errors in the kernel. | ||
322 | */ | ||
323 | void __init paging_init(void) | ||
324 | { | ||
325 | extern unsigned long _evba; | ||
326 | void *zero_page; | ||
327 | int nid; | ||
328 | |||
329 | /* | ||
330 | * Make sure we can handle exceptions before enabling | ||
331 | * paging. Not that we should ever _get_ any exceptions this | ||
332 | * early, but you never know... | ||
333 | */ | ||
334 | printk("Exception vectors start at %p\n", &_evba); | ||
335 | sysreg_write(EVBA, (unsigned long)&_evba); | ||
336 | |||
337 | /* | ||
338 | * Since we are ready to handle exceptions now, we should let | ||
339 | * the CPU generate them... | ||
340 | */ | ||
341 | __asm__ __volatile__ ("csrf %0" : : "i"(SR_EM_BIT)); | ||
342 | |||
343 | /* | ||
344 | * Allocate the zero page. The allocator will panic if it | ||
345 | * can't satisfy the request, so no need to check. | ||
346 | */ | ||
347 | zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0), | ||
348 | PAGE_SIZE); | ||
349 | |||
350 | { | ||
351 | pgd_t *pg_dir; | ||
352 | int i; | ||
353 | |||
354 | pg_dir = swapper_pg_dir; | ||
355 | sysreg_write(PTBR, (unsigned long)pg_dir); | ||
356 | |||
357 | for (i = 0; i < PTRS_PER_PGD; i++) | ||
358 | pgd_val(pg_dir[i]) = 0; | ||
359 | |||
360 | enable_mmu(); | ||
361 | printk ("CPU: Paging enabled\n"); | ||
362 | } | ||
363 | |||
364 | for_each_online_node(nid) { | ||
365 | pg_data_t *pgdat = NODE_DATA(nid); | ||
366 | unsigned long zones_size[MAX_NR_ZONES]; | ||
367 | unsigned long low, start_pfn; | ||
368 | |||
369 | start_pfn = pgdat->bdata->node_boot_start; | ||
370 | start_pfn >>= PAGE_SHIFT; | ||
371 | low = pgdat->bdata->node_low_pfn; | ||
372 | |||
373 | memset(zones_size, 0, sizeof(zones_size)); | ||
374 | zones_size[ZONE_NORMAL] = low - start_pfn; | ||
375 | |||
376 | printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", | ||
377 | nid, start_pfn, low); | ||
378 | |||
379 | free_area_init_node(nid, pgdat, zones_size, start_pfn, NULL); | ||
380 | |||
381 | printk("Node %u: mem_map starts at %p\n", | ||
382 | pgdat->node_id, pgdat->node_mem_map); | ||
383 | } | ||
384 | |||
385 | mem_map = NODE_DATA(0)->node_mem_map; | ||
386 | |||
387 | memset(zero_page, 0, PAGE_SIZE); | ||
388 | empty_zero_page = virt_to_page(zero_page); | ||
389 | flush_dcache_page(empty_zero_page); | ||
390 | } | ||
391 | |||
392 | void __init mem_init(void) | ||
393 | { | ||
394 | int codesize, reservedpages, datasize, initsize; | ||
395 | int nid, i; | ||
396 | |||
397 | reservedpages = 0; | ||
398 | high_memory = NULL; | ||
399 | |||
400 | /* this will put all low memory onto the freelists */ | ||
401 | for_each_online_node(nid) { | ||
402 | pg_data_t *pgdat = NODE_DATA(nid); | ||
403 | unsigned long node_pages = 0; | ||
404 | void *node_high_memory; | ||
405 | |||
406 | num_physpages += pgdat->node_present_pages; | ||
407 | |||
408 | if (pgdat->node_spanned_pages != 0) | ||
409 | node_pages = free_all_bootmem_node(pgdat); | ||
410 | |||
411 | totalram_pages += node_pages; | ||
412 | |||
413 | for (i = 0; i < node_pages; i++) | ||
414 | if (PageReserved(pgdat->node_mem_map + i)) | ||
415 | reservedpages++; | ||
416 | |||
417 | node_high_memory = (void *)((pgdat->node_start_pfn | ||
418 | + pgdat->node_spanned_pages) | ||
419 | << PAGE_SHIFT); | ||
420 | if (node_high_memory > high_memory) | ||
421 | high_memory = node_high_memory; | ||
422 | } | ||
423 | |||
424 | max_mapnr = MAP_NR(high_memory); | ||
425 | |||
426 | codesize = (unsigned long)_etext - (unsigned long)_text; | ||
427 | datasize = (unsigned long)_edata - (unsigned long)_data; | ||
428 | initsize = (unsigned long)__init_end - (unsigned long)__init_begin; | ||
429 | |||
430 | printk ("Memory: %luk/%luk available (%dk kernel code, " | ||
431 | "%dk reserved, %dk data, %dk init)\n", | ||
432 | (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), | ||
433 | totalram_pages << (PAGE_SHIFT - 10), | ||
434 | codesize >> 10, | ||
435 | reservedpages << (PAGE_SHIFT - 10), | ||
436 | datasize >> 10, | ||
437 | initsize >> 10); | ||
438 | } | ||
439 | |||
440 | static inline void free_area(unsigned long addr, unsigned long end, char *s) | ||
441 | { | ||
442 | unsigned int size = (end - addr) >> 10; | ||
443 | |||
444 | for (; addr < end; addr += PAGE_SIZE) { | ||
445 | struct page *page = virt_to_page(addr); | ||
446 | ClearPageReserved(page); | ||
447 | init_page_count(page); | ||
448 | free_page(addr); | ||
449 | totalram_pages++; | ||
450 | } | ||
451 | |||
452 | if (size && s) | ||
453 | printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n", | ||
454 | s, size, end - (size << 10), end); | ||
455 | } | ||
456 | |||
457 | void free_initmem(void) | ||
458 | { | ||
459 | free_area((unsigned long)__init_begin, (unsigned long)__init_end, | ||
460 | "init"); | ||
461 | } | ||
462 | |||
463 | #ifdef CONFIG_BLK_DEV_INITRD | ||
464 | |||
465 | static int keep_initrd; | ||
466 | |||
467 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
468 | { | ||
469 | if (!keep_initrd) | ||
470 | free_area(start, end, "initrd"); | ||
471 | } | ||
472 | |||
473 | static int __init keepinitrd_setup(char *__unused) | ||
474 | { | ||
475 | keep_initrd = 1; | ||
476 | return 1; | ||
477 | } | ||
478 | |||
479 | __setup("keepinitrd", keepinitrd_setup); | ||
480 | #endif | ||
diff --git a/arch/avr32/mm/ioremap.c b/arch/avr32/mm/ioremap.c new file mode 100644 index 000000000000..536021877df6 --- /dev/null +++ b/arch/avr32/mm/ioremap.c | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2006 Atmel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/vmalloc.h> | ||
9 | #include <linux/module.h> | ||
10 | |||
11 | #include <asm/io.h> | ||
12 | #include <asm/pgtable.h> | ||
13 | #include <asm/cacheflush.h> | ||
14 | #include <asm/tlbflush.h> | ||
15 | #include <asm/addrspace.h> | ||
16 | |||
17 | static inline int remap_area_pte(pte_t *pte, unsigned long address, | ||
18 | unsigned long end, unsigned long phys_addr, | ||
19 | pgprot_t prot) | ||
20 | { | ||
21 | unsigned long pfn; | ||
22 | |||
23 | pfn = phys_addr >> PAGE_SHIFT; | ||
24 | do { | ||
25 | WARN_ON(!pte_none(*pte)); | ||
26 | |||
27 | set_pte(pte, pfn_pte(pfn, prot)); | ||
28 | address += PAGE_SIZE; | ||
29 | pfn++; | ||
30 | pte++; | ||
31 | } while (address && (address < end)); | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static inline int remap_area_pmd(pmd_t *pmd, unsigned long address, | ||
37 | unsigned long end, unsigned long phys_addr, | ||
38 | pgprot_t prot) | ||
39 | { | ||
40 | unsigned long next; | ||
41 | |||
42 | phys_addr -= address; | ||
43 | |||
44 | do { | ||
45 | pte_t *pte = pte_alloc_kernel(pmd, address); | ||
46 | if (!pte) | ||
47 | return -ENOMEM; | ||
48 | |||
49 | next = (address + PMD_SIZE) & PMD_MASK; | ||
50 | if (remap_area_pte(pte, address, next, | ||
51 | address + phys_addr, prot)) | ||
52 | return -ENOMEM; | ||
53 | |||
54 | address = next; | ||
55 | pmd++; | ||
56 | } while (address && (address < end)); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static int remap_area_pud(pud_t *pud, unsigned long address, | ||
61 | unsigned long end, unsigned long phys_addr, | ||
62 | pgprot_t prot) | ||
63 | { | ||
64 | unsigned long next; | ||
65 | |||
66 | phys_addr -= address; | ||
67 | |||
68 | do { | ||
69 | pmd_t *pmd = pmd_alloc(&init_mm, pud, address); | ||
70 | if (!pmd) | ||
71 | return -ENOMEM; | ||
72 | next = (address + PUD_SIZE) & PUD_MASK; | ||
73 | if (remap_area_pmd(pmd, address, next, | ||
74 | phys_addr + address, prot)) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | address = next; | ||
78 | pud++; | ||
79 | } while (address && address < end); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
85 | size_t size, pgprot_t prot) | ||
86 | { | ||
87 | unsigned long end = address + size; | ||
88 | unsigned long next; | ||
89 | pgd_t *pgd; | ||
90 | int err = 0; | ||
91 | |||
92 | phys_addr -= address; | ||
93 | |||
94 | pgd = pgd_offset_k(address); | ||
95 | flush_cache_all(); | ||
96 | BUG_ON(address >= end); | ||
97 | |||
98 | spin_lock(&init_mm.page_table_lock); | ||
99 | do { | ||
100 | pud_t *pud = pud_alloc(&init_mm, pgd, address); | ||
101 | |||
102 | err = -ENOMEM; | ||
103 | if (!pud) | ||
104 | break; | ||
105 | |||
106 | next = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
107 | if (next < address || next > end) | ||
108 | next = end; | ||
109 | err = remap_area_pud(pud, address, next, | ||
110 | phys_addr + address, prot); | ||
111 | if (err) | ||
112 | break; | ||
113 | |||
114 | address = next; | ||
115 | pgd++; | ||
116 | } while (address && (address < end)); | ||
117 | |||
118 | spin_unlock(&init_mm.page_table_lock); | ||
119 | flush_tlb_all(); | ||
120 | return err; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Re-map an arbitrary physical address space into the kernel virtual | ||
125 | * address space. Needed when the kernel wants to access physical | ||
126 | * memory directly. | ||
127 | */ | ||
128 | void __iomem *__ioremap(unsigned long phys_addr, size_t size, | ||
129 | unsigned long flags) | ||
130 | { | ||
131 | void *addr; | ||
132 | struct vm_struct *area; | ||
133 | unsigned long offset, last_addr; | ||
134 | pgprot_t prot; | ||
135 | |||
136 | /* | ||
137 | * Check if we can simply use the P4 segment. This area is | ||
138 | * uncacheable, so if caching/buffering is requested, we can't | ||
139 | * use it. | ||
140 | */ | ||
141 | if ((phys_addr >= P4SEG) && (flags == 0)) | ||
142 | return (void __iomem *)phys_addr; | ||
143 | |||
144 | /* Don't allow wraparound or zero size */ | ||
145 | last_addr = phys_addr + size - 1; | ||
146 | if (!size || last_addr < phys_addr) | ||
147 | return NULL; | ||
148 | |||
149 | /* | ||
150 | * XXX: When mapping regular RAM, we'd better make damn sure | ||
151 | * it's never used for anything else. But this is really the | ||
152 | * caller's responsibility... | ||
153 | */ | ||
154 | if (PHYSADDR(P2SEGADDR(phys_addr)) == phys_addr) | ||
155 | return (void __iomem *)P2SEGADDR(phys_addr); | ||
156 | |||
157 | /* Mappings have to be page-aligned */ | ||
158 | offset = phys_addr & ~PAGE_MASK; | ||
159 | phys_addr &= PAGE_MASK; | ||
160 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | ||
161 | |||
162 | prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | ||
163 | | _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags); | ||
164 | |||
165 | /* | ||
166 | * Ok, go for it.. | ||
167 | */ | ||
168 | area = get_vm_area(size, VM_IOREMAP); | ||
169 | if (!area) | ||
170 | return NULL; | ||
171 | area->phys_addr = phys_addr; | ||
172 | addr = area->addr; | ||
173 | if (remap_area_pages((unsigned long)addr, phys_addr, size, prot)) { | ||
174 | vunmap(addr); | ||
175 | return NULL; | ||
176 | } | ||
177 | |||
178 | return (void __iomem *)(offset + (char *)addr); | ||
179 | } | ||
180 | EXPORT_SYMBOL(__ioremap); | ||
181 | |||
182 | void __iounmap(void __iomem *addr) | ||
183 | { | ||
184 | struct vm_struct *p; | ||
185 | |||
186 | if ((unsigned long)addr >= P4SEG) | ||
187 | return; | ||
188 | |||
189 | p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr)); | ||
190 | if (unlikely(!p)) { | ||
191 | printk (KERN_ERR "iounmap: bad address %p\n", addr); | ||
192 | return; | ||
193 | } | ||
194 | |||
195 | kfree (p); | ||
196 | } | ||
197 | EXPORT_SYMBOL(__iounmap); | ||
diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c new file mode 100644 index 000000000000..5d0523bbe298 --- /dev/null +++ b/arch/avr32/mm/tlb.c | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * AVR32 TLB operations | ||
3 | * | ||
4 | * Copyright (C) 2004-2006 Atmel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/mm.h> | ||
11 | |||
12 | #include <asm/mmu_context.h> | ||
13 | |||
14 | #define _TLBEHI_I 0x100 | ||
15 | |||
16 | void show_dtlb_entry(unsigned int index) | ||
17 | { | ||
18 | unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags; | ||
19 | |||
20 | local_irq_save(flags); | ||
21 | mmucr_save = sysreg_read(MMUCR); | ||
22 | tlbehi_save = sysreg_read(TLBEHI); | ||
23 | mmucr = mmucr_save & 0x13; | ||
24 | mmucr |= index << 14; | ||
25 | sysreg_write(MMUCR, mmucr); | ||
26 | |||
27 | asm volatile("tlbr" : : : "memory"); | ||
28 | cpu_sync_pipeline(); | ||
29 | |||
30 | tlbehi = sysreg_read(TLBEHI); | ||
31 | tlbelo = sysreg_read(TLBELO); | ||
32 | |||
33 | printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", | ||
34 | index, | ||
35 | (tlbehi & 0x200)?'1':'0', | ||
36 | (tlbelo & 0x100)?'1':'0', | ||
37 | (tlbehi & 0xff), | ||
38 | (tlbehi >> 12), (tlbelo >> 12), | ||
39 | (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, | ||
40 | (tlbelo & 0x200)?'1':'0', | ||
41 | (tlbelo & 0x080)?'1':'0', | ||
42 | (tlbelo & 0x001)?'1':'0', | ||
43 | (tlbelo & 0x002)?'1':'0'); | ||
44 | |||
45 | sysreg_write(MMUCR, mmucr_save); | ||
46 | sysreg_write(TLBEHI, tlbehi_save); | ||
47 | cpu_sync_pipeline(); | ||
48 | local_irq_restore(flags); | ||
49 | } | ||
50 | |||
51 | void dump_dtlb(void) | ||
52 | { | ||
53 | unsigned int i; | ||
54 | |||
55 | printk("ID V G ASID VPN PFN AP SZ C B W D\n"); | ||
56 | for (i = 0; i < 32; i++) | ||
57 | show_dtlb_entry(i); | ||
58 | } | ||
59 | |||
60 | static unsigned long last_mmucr; | ||
61 | |||
62 | static inline void set_replacement_pointer(unsigned shift) | ||
63 | { | ||
64 | unsigned long mmucr, mmucr_save; | ||
65 | |||
66 | mmucr = mmucr_save = sysreg_read(MMUCR); | ||
67 | |||
68 | /* Does this mapping already exist? */ | ||
69 | __asm__ __volatile__( | ||
70 | " tlbs\n" | ||
71 | " mfsr %0, %1" | ||
72 | : "=r"(mmucr) | ||
73 | : "i"(SYSREG_MMUCR)); | ||
74 | |||
75 | if (mmucr & SYSREG_BIT(MMUCR_N)) { | ||
76 | /* Not found -- pick a not-recently-accessed entry */ | ||
77 | unsigned long rp; | ||
78 | unsigned long tlbar = sysreg_read(TLBARLO); | ||
79 | |||
80 | rp = 32 - fls(tlbar); | ||
81 | if (rp == 32) { | ||
82 | rp = 0; | ||
83 | sysreg_write(TLBARLO, -1L); | ||
84 | } | ||
85 | |||
86 | mmucr &= 0x13; | ||
87 | mmucr |= (rp << shift); | ||
88 | |||
89 | sysreg_write(MMUCR, mmucr); | ||
90 | } | ||
91 | |||
92 | last_mmucr = mmucr; | ||
93 | } | ||
94 | |||
95 | static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid) | ||
96 | { | ||
97 | unsigned long vpn; | ||
98 | |||
99 | vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid; | ||
100 | sysreg_write(TLBEHI, vpn); | ||
101 | cpu_sync_pipeline(); | ||
102 | |||
103 | set_replacement_pointer(14); | ||
104 | |||
105 | sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); | ||
106 | |||
107 | /* Let's go */ | ||
108 | asm volatile("nop\n\ttlbw" : : : "memory"); | ||
109 | cpu_sync_pipeline(); | ||
110 | } | ||
111 | |||
112 | void update_mmu_cache(struct vm_area_struct *vma, | ||
113 | unsigned long address, pte_t pte) | ||
114 | { | ||
115 | unsigned long flags; | ||
116 | |||
117 | /* ptrace may call this routine */ | ||
118 | if (vma && current->active_mm != vma->vm_mm) | ||
119 | return; | ||
120 | |||
121 | local_irq_save(flags); | ||
122 | update_dtlb(address, pte, get_asid()); | ||
123 | local_irq_restore(flags); | ||
124 | } | ||
125 | |||
126 | void __flush_tlb_page(unsigned long asid, unsigned long page) | ||
127 | { | ||
128 | unsigned long mmucr, tlbehi; | ||
129 | |||
130 | page |= asid; | ||
131 | sysreg_write(TLBEHI, page); | ||
132 | cpu_sync_pipeline(); | ||
133 | asm volatile("tlbs"); | ||
134 | mmucr = sysreg_read(MMUCR); | ||
135 | |||
136 | if (!(mmucr & SYSREG_BIT(MMUCR_N))) { | ||
137 | unsigned long tlbarlo; | ||
138 | unsigned long entry; | ||
139 | |||
140 | /* Clear the "valid" bit */ | ||
141 | tlbehi = sysreg_read(TLBEHI); | ||
142 | tlbehi &= ~_TLBEHI_VALID; | ||
143 | sysreg_write(TLBEHI, tlbehi); | ||
144 | cpu_sync_pipeline(); | ||
145 | |||
146 | /* mark the entry as "not accessed" */ | ||
147 | entry = (mmucr >> 14) & 0x3f; | ||
148 | tlbarlo = sysreg_read(TLBARLO); | ||
149 | tlbarlo |= (0x80000000 >> entry); | ||
150 | sysreg_write(TLBARLO, tlbarlo); | ||
151 | |||
152 | /* update the entry with valid bit clear */ | ||
153 | asm volatile("tlbw"); | ||
154 | cpu_sync_pipeline(); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
159 | { | ||
160 | if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { | ||
161 | unsigned long flags, asid; | ||
162 | unsigned long saved_asid = MMU_NO_ASID; | ||
163 | |||
164 | asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; | ||
165 | page &= PAGE_MASK; | ||
166 | |||
167 | local_irq_save(flags); | ||
168 | if (vma->vm_mm != current->mm) { | ||
169 | saved_asid = get_asid(); | ||
170 | set_asid(asid); | ||
171 | } | ||
172 | |||
173 | __flush_tlb_page(asid, page); | ||
174 | |||
175 | if (saved_asid != MMU_NO_ASID) | ||
176 | set_asid(saved_asid); | ||
177 | local_irq_restore(flags); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
182 | unsigned long end) | ||
183 | { | ||
184 | struct mm_struct *mm = vma->vm_mm; | ||
185 | |||
186 | if (mm->context != NO_CONTEXT) { | ||
187 | unsigned long flags; | ||
188 | int size; | ||
189 | |||
190 | local_irq_save(flags); | ||
191 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
192 | if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ | ||
193 | mm->context = NO_CONTEXT; | ||
194 | if (mm == current->mm) | ||
195 | activate_context(mm); | ||
196 | } else { | ||
197 | unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK; | ||
198 | unsigned long saved_asid = MMU_NO_ASID; | ||
199 | |||
200 | start &= PAGE_MASK; | ||
201 | end += (PAGE_SIZE - 1); | ||
202 | end &= PAGE_MASK; | ||
203 | if (mm != current->mm) { | ||
204 | saved_asid = get_asid(); | ||
205 | set_asid(asid); | ||
206 | } | ||
207 | |||
208 | while (start < end) { | ||
209 | __flush_tlb_page(asid, start); | ||
210 | start += PAGE_SIZE; | ||
211 | } | ||
212 | if (saved_asid != MMU_NO_ASID) | ||
213 | set_asid(saved_asid); | ||
214 | } | ||
215 | local_irq_restore(flags); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * TODO: If this is only called for addresses > TASK_SIZE, we can probably | ||
221 | * skip the ASID stuff and just use the Global bit... | ||
222 | */ | ||
223 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
224 | { | ||
225 | unsigned long flags; | ||
226 | int size; | ||
227 | |||
228 | local_irq_save(flags); | ||
229 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
230 | if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ | ||
231 | flush_tlb_all(); | ||
232 | } else { | ||
233 | unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK; | ||
234 | unsigned long saved_asid = get_asid(); | ||
235 | |||
236 | start &= PAGE_MASK; | ||
237 | end += (PAGE_SIZE - 1); | ||
238 | end &= PAGE_MASK; | ||
239 | set_asid(asid); | ||
240 | while (start < end) { | ||
241 | __flush_tlb_page(asid, start); | ||
242 | start += PAGE_SIZE; | ||
243 | } | ||
244 | set_asid(saved_asid); | ||
245 | } | ||
246 | local_irq_restore(flags); | ||
247 | } | ||
248 | |||
249 | void flush_tlb_mm(struct mm_struct *mm) | ||
250 | { | ||
251 | /* Invalidate all TLB entries of this process by getting a new ASID */ | ||
252 | if (mm->context != NO_CONTEXT) { | ||
253 | unsigned long flags; | ||
254 | |||
255 | local_irq_save(flags); | ||
256 | mm->context = NO_CONTEXT; | ||
257 | if (mm == current->mm) | ||
258 | activate_context(mm); | ||
259 | local_irq_restore(flags); | ||
260 | } | ||
261 | } | ||
262 | |||
263 | void flush_tlb_all(void) | ||
264 | { | ||
265 | unsigned long flags; | ||
266 | |||
267 | local_irq_save(flags); | ||
268 | sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I)); | ||
269 | local_irq_restore(flags); | ||
270 | } | ||
271 | |||
272 | #ifdef CONFIG_PROC_FS | ||
273 | |||
274 | #include <linux/seq_file.h> | ||
275 | #include <linux/proc_fs.h> | ||
276 | #include <linux/init.h> | ||
277 | |||
278 | static void *tlb_start(struct seq_file *tlb, loff_t *pos) | ||
279 | { | ||
280 | static unsigned long tlb_index; | ||
281 | |||
282 | if (*pos >= 32) | ||
283 | return NULL; | ||
284 | |||
285 | tlb_index = 0; | ||
286 | return &tlb_index; | ||
287 | } | ||
288 | |||
289 | static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos) | ||
290 | { | ||
291 | unsigned long *index = v; | ||
292 | |||
293 | if (*index >= 31) | ||
294 | return NULL; | ||
295 | |||
296 | ++*pos; | ||
297 | ++*index; | ||
298 | return index; | ||
299 | } | ||
300 | |||
301 | static void tlb_stop(struct seq_file *tlb, void *v) | ||
302 | { | ||
303 | |||
304 | } | ||
305 | |||
306 | static int tlb_show(struct seq_file *tlb, void *v) | ||
307 | { | ||
308 | unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags; | ||
309 | unsigned long *index = v; | ||
310 | |||
311 | if (*index == 0) | ||
312 | seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n"); | ||
313 | |||
314 | BUG_ON(*index >= 32); | ||
315 | |||
316 | local_irq_save(flags); | ||
317 | mmucr_save = sysreg_read(MMUCR); | ||
318 | tlbehi_save = sysreg_read(TLBEHI); | ||
319 | mmucr = mmucr_save & 0x13; | ||
320 | mmucr |= *index << 14; | ||
321 | sysreg_write(MMUCR, mmucr); | ||
322 | |||
323 | asm volatile("tlbr" : : : "memory"); | ||
324 | cpu_sync_pipeline(); | ||
325 | |||
326 | tlbehi = sysreg_read(TLBEHI); | ||
327 | tlbelo = sysreg_read(TLBELO); | ||
328 | |||
329 | sysreg_write(MMUCR, mmucr_save); | ||
330 | sysreg_write(TLBEHI, tlbehi_save); | ||
331 | cpu_sync_pipeline(); | ||
332 | local_irq_restore(flags); | ||
333 | |||
334 | seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n", | ||
335 | *index, | ||
336 | (tlbehi & 0x200)?'1':'0', | ||
337 | (tlbelo & 0x100)?'1':'0', | ||
338 | (tlbehi & 0xff), | ||
339 | (tlbehi >> 12), (tlbelo >> 12), | ||
340 | (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, | ||
341 | (tlbelo & 0x200)?'1':'0', | ||
342 | (tlbelo & 0x080)?'1':'0', | ||
343 | (tlbelo & 0x001)?'1':'0', | ||
344 | (tlbelo & 0x002)?'1':'0'); | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static struct seq_operations tlb_ops = { | ||
350 | .start = tlb_start, | ||
351 | .next = tlb_next, | ||
352 | .stop = tlb_stop, | ||
353 | .show = tlb_show, | ||
354 | }; | ||
355 | |||
356 | static int tlb_open(struct inode *inode, struct file *file) | ||
357 | { | ||
358 | return seq_open(file, &tlb_ops); | ||
359 | } | ||
360 | |||
361 | static struct file_operations proc_tlb_operations = { | ||
362 | .open = tlb_open, | ||
363 | .read = seq_read, | ||
364 | .llseek = seq_lseek, | ||
365 | .release = seq_release, | ||
366 | }; | ||
367 | |||
368 | static int __init proctlb_init(void) | ||
369 | { | ||
370 | struct proc_dir_entry *entry; | ||
371 | |||
372 | entry = create_proc_entry("tlb", 0, NULL); | ||
373 | if (entry) | ||
374 | entry->proc_fops = &proc_tlb_operations; | ||
375 | return 0; | ||
376 | } | ||
377 | late_initcall(proctlb_init); | ||
378 | #endif /* CONFIG_PROC_FS */ | ||