aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mem64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/mem64.c')
-rw-r--r--arch/powerpc/mm/mem64.c259
1 files changed, 259 insertions, 0 deletions
diff --git a/arch/powerpc/mm/mem64.c b/arch/powerpc/mm/mem64.c
new file mode 100644
index 000000000000..ef765a84433f
--- /dev/null
+++ b/arch/powerpc/mm/mem64.c
@@ -0,0 +1,259 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/lmb.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
60#include <asm/ppcdebug.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/imalloc.h>
67
68/*
69 * This is called by /dev/mem to know if a given address has to
70 * be mapped non-cacheable or not
71 */
72int page_is_ram(unsigned long pfn)
73{
74 int i;
75 unsigned long paddr = (pfn << PAGE_SHIFT);
76
77 for (i=0; i < lmb.memory.cnt; i++) {
78 unsigned long base;
79
80 base = lmb.memory.region[i].base;
81
82 if ((paddr >= base) &&
83 (paddr < (base + lmb.memory.region[i].size))) {
84 return 1;
85 }
86 }
87
88 return 0;
89}
90EXPORT_SYMBOL(page_is_ram);
91
92pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
93 unsigned long size, pgprot_t vma_prot)
94{
95 if (ppc_md.phys_mem_access_prot)
96 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
97
98 if (!page_is_ram(addr >> PAGE_SHIFT))
99 vma_prot = __pgprot(pgprot_val(vma_prot)
100 | _PAGE_GUARDED | _PAGE_NO_CACHE);
101 return vma_prot;
102}
103EXPORT_SYMBOL(phys_mem_access_prot);
104
105void show_mem(void)
106{
107 unsigned long total = 0, reserved = 0;
108 unsigned long shared = 0, cached = 0;
109 struct page *page;
110 pg_data_t *pgdat;
111 unsigned long i;
112
113 printk("Mem-info:\n");
114 show_free_areas();
115 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
116 for_each_pgdat(pgdat) {
117 for (i = 0; i < pgdat->node_spanned_pages; i++) {
118 page = pgdat_page_nr(pgdat, i);
119 total++;
120 if (PageReserved(page))
121 reserved++;
122 else if (PageSwapCache(page))
123 cached++;
124 else if (page_count(page))
125 shared += page_count(page) - 1;
126 }
127 }
128 printk("%ld pages of RAM\n", total);
129 printk("%ld reserved pages\n", reserved);
130 printk("%ld pages shared\n", shared);
131 printk("%ld pages swap cached\n", cached);
132}
133
134/*
135 * This is called when a page has been modified by the kernel.
136 * It just marks the page as not i-cache clean. We do the i-cache
137 * flush later when the page is given to a user process, if necessary.
138 */
139void flush_dcache_page(struct page *page)
140{
141 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
142 return;
143 /* avoid an atomic op if possible */
144 if (test_bit(PG_arch_1, &page->flags))
145 clear_bit(PG_arch_1, &page->flags);
146}
147EXPORT_SYMBOL(flush_dcache_page);
148
149void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
150{
151 clear_page(page);
152
153 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
154 return;
155 /*
156 * We shouldnt have to do this, but some versions of glibc
157 * require it (ld.so assumes zero filled pages are icache clean)
158 * - Anton
159 */
160
161 /* avoid an atomic op if possible */
162 if (test_bit(PG_arch_1, &pg->flags))
163 clear_bit(PG_arch_1, &pg->flags);
164}
165EXPORT_SYMBOL(clear_user_page);
166
167void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
168 struct page *pg)
169{
170 copy_page(vto, vfrom);
171
172 /*
173 * We should be able to use the following optimisation, however
174 * there are two problems.
175 * Firstly a bug in some versions of binutils meant PLT sections
176 * were not marked executable.
177 * Secondly the first word in the GOT section is blrl, used
178 * to establish the GOT address. Until recently the GOT was
179 * not marked executable.
180 * - Anton
181 */
182#if 0
183 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
184 return;
185#endif
186
187 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
188 return;
189
190 /* avoid an atomic op if possible */
191 if (test_bit(PG_arch_1, &pg->flags))
192 clear_bit(PG_arch_1, &pg->flags);
193}
194
195void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
196 unsigned long addr, int len)
197{
198 unsigned long maddr;
199
200 maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
201 flush_icache_range(maddr, maddr + len);
202}
203EXPORT_SYMBOL(flush_icache_user_range);
204
205/*
206 * This is called at the end of handling a user page fault, when the
207 * fault has been handled by updating a PTE in the linux page tables.
208 * We use it to preload an HPTE into the hash table corresponding to
209 * the updated linux PTE.
210 *
211 * This must always be called with the mm->page_table_lock held
212 */
213void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
214 pte_t pte)
215{
216 unsigned long vsid;
217 void *pgdir;
218 pte_t *ptep;
219 int local = 0;
220 cpumask_t tmp;
221 unsigned long flags;
222
223 /* handle i-cache coherency */
224 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
225 !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
226 unsigned long pfn = pte_pfn(pte);
227 if (pfn_valid(pfn)) {
228 struct page *page = pfn_to_page(pfn);
229 if (!PageReserved(page)
230 && !test_bit(PG_arch_1, &page->flags)) {
231 __flush_dcache_icache(page_address(page));
232 set_bit(PG_arch_1, &page->flags);
233 }
234 }
235 }
236
237 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
238 if (!pte_young(pte))
239 return;
240
241 pgdir = vma->vm_mm->pgd;
242 if (pgdir == NULL)
243 return;
244
245 ptep = find_linux_pte(pgdir, ea);
246 if (!ptep)
247 return;
248
249 vsid = get_vsid(vma->vm_mm->context.id, ea);
250
251 local_irq_save(flags);
252 tmp = cpumask_of_cpu(smp_processor_id());
253 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
254 local = 1;
255
256 __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
257 0x300, local);
258 local_irq_restore(flags);
259}