diff options
author | Michal Simek <monstr@monstr.eu> | 2009-05-26 10:30:15 -0400 |
---|---|---|
committer | Michal Simek <monstr@monstr.eu> | 2009-05-26 10:45:16 -0400 |
commit | 15902bf63c8332946e5a1f48a72e3ae22874b11b (patch) | |
tree | 5a8aa78716a7176ea9df52f87ca0c0176f4ce955 /arch/microblaze/include/asm | |
parent | fc34d1eb1ca09d3450508e2cf9cf511364c2c460 (diff) |
microblaze_mmu_v2: Page table - ioremap - pgtable.c/h, section update
Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/include/asm')
-rw-r--r-- | arch/microblaze/include/asm/pgtable.h | 536 | ||||
-rw-r--r-- | arch/microblaze/include/asm/sections.h | 3 |
2 files changed, 539 insertions, 0 deletions
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 254fd4ba733b..4c57a586a989 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -14,6 +16,8 @@ | |||
14 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 16 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
15 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 17 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
16 | 18 | ||
19 | #ifndef CONFIG_MMU | ||
20 | |||
17 | #define pgd_present(pgd) (1) /* pages are always present on non MMU */ | 21 | #define pgd_present(pgd) (1) /* pages are always present on non MMU */ |
18 | #define pgd_none(pgd) (0) | 22 | #define pgd_none(pgd) (0) |
19 | #define pgd_bad(pgd) (0) | 23 | #define pgd_bad(pgd) (0) |
@@ -47,6 +51,538 @@ static inline int pte_file(pte_t pte) { return 0; } | |||
47 | 51 | ||
48 | #define arch_enter_lazy_cpu_mode() do {} while (0) | 52 | #define arch_enter_lazy_cpu_mode() do {} while (0) |
49 | 53 | ||
54 | #else /* CONFIG_MMU */ | ||
55 | |||
56 | #include <asm-generic/4level-fixup.h> | ||
57 | |||
58 | #ifdef __KERNEL__ | ||
59 | #ifndef __ASSEMBLY__ | ||
60 | |||
61 | #include <linux/sched.h> | ||
62 | #include <linux/threads.h> | ||
63 | #include <asm/processor.h> /* For TASK_SIZE */ | ||
64 | #include <asm/mmu.h> | ||
65 | #include <asm/page.h> | ||
66 | |||
67 | #define FIRST_USER_ADDRESS 0 | ||
68 | |||
69 | extern unsigned long va_to_phys(unsigned long address); | ||
70 | extern pte_t *va_to_pte(unsigned long address); | ||
71 | extern unsigned long ioremap_bot, ioremap_base; | ||
72 | |||
73 | /* | ||
74 | * The following only work if pte_present() is true. | ||
75 | * Undefined behaviour if not.. | ||
76 | */ | ||
77 | |||
78 | static inline int pte_special(pte_t pte) { return 0; } | ||
79 | |||
80 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | ||
81 | |||
82 | /* Start and end of the vmalloc area. */ | ||
83 | /* Make sure to map the vmalloc area above the pinned kernel memory area | ||
84 | of 32Mb. */ | ||
85 | #define VMALLOC_START (CONFIG_KERNEL_START + \ | ||
86 | max(32 * 1024 * 1024UL, memory_size)) | ||
87 | #define VMALLOC_END ioremap_bot | ||
88 | #define VMALLOC_VMADDR(x) ((unsigned long)(x)) | ||
89 | |||
90 | #endif /* __ASSEMBLY__ */ | ||
91 | |||
92 | /* | ||
93 | * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash | ||
94 | * table containing PTEs, together with a set of 16 segment registers, to | ||
95 | * define the virtual to physical address mapping. | ||
96 | * | ||
97 | * We use the hash table as an extended TLB, i.e. a cache of currently | ||
98 | * active mappings. We maintain a two-level page table tree, much | ||
99 | * like that used by the i386, for the sake of the Linux memory | ||
100 | * management code. Low-level assembler code in hashtable.S | ||
101 | * (procedure hash_page) is responsible for extracting ptes from the | ||
102 | * tree and putting them into the hash table when necessary, and | ||
103 | * updating the accessed and modified bits in the page table tree. | ||
104 | */ | ||
105 | |||
106 | /* | ||
107 | * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The | ||
108 | * instruction and data sides share a unified, 64-entry, semi-associative | ||
109 | * TLB which is maintained totally under software control. In addition, the | ||
110 | * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative | ||
111 | * TLB which serves as a first level to the shared TLB. These two TLBs are | ||
112 | * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions). | ||
113 | */ | ||
114 | |||
115 | /* | ||
116 | * The normal case is that PTEs are 32-bits and we have a 1-page | ||
117 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | ||
118 | * | ||
119 | */ | ||
120 | |||
121 | /* PMD_SHIFT determines the size of the area mapped by the PTE pages */ | ||
122 | #define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT) | ||
123 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
124 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
125 | |||
126 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ | ||
127 | #define PGDIR_SHIFT PMD_SHIFT | ||
128 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
129 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
130 | |||
131 | /* | ||
132 | * entries per page directory level: our page-table tree is two-level, so | ||
133 | * we don't really have any PMD directory. | ||
134 | */ | ||
135 | #define PTRS_PER_PTE (1 << PTE_SHIFT) | ||
136 | #define PTRS_PER_PMD 1 | ||
137 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
138 | |||
139 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | ||
140 | #define FIRST_USER_PGD_NR 0 | ||
141 | |||
142 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | ||
143 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | ||
144 | |||
145 | #define pte_ERROR(e) \ | ||
146 | printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \ | ||
147 | __FILE__, __LINE__, pte_val(e)) | ||
148 | #define pmd_ERROR(e) \ | ||
149 | printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ | ||
150 | __FILE__, __LINE__, pmd_val(e)) | ||
151 | #define pgd_ERROR(e) \ | ||
152 | printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ | ||
153 | __FILE__, __LINE__, pgd_val(e)) | ||
154 | |||
155 | /* | ||
156 | * Bits in a linux-style PTE. These match the bits in the | ||
157 | * (hardware-defined) PTE as closely as possible. | ||
158 | */ | ||
159 | |||
160 | /* There are several potential gotchas here. The hardware TLBLO | ||
161 | * field looks like this: | ||
162 | * | ||
163 | * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ||
164 | * RPN..................... 0 0 EX WR ZSEL....... W I M G | ||
165 | * | ||
166 | * Where possible we make the Linux PTE bits match up with this | ||
167 | * | ||
168 | * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can | ||
169 | * support down to 1k pages), this is done in the TLBMiss exception | ||
170 | * handler. | ||
171 | * - We use only zones 0 (for kernel pages) and 1 (for user pages) | ||
172 | * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB | ||
173 | * miss handler. Bit 27 is PAGE_USER, thus selecting the correct | ||
174 | * zone. | ||
175 | * - PRESENT *must* be in the bottom two bits because swap cache | ||
176 | * entries use the top 30 bits. Because 4xx doesn't support SMP | ||
177 | * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 | ||
178 | * is cleared in the TLB miss handler before the TLB entry is loaded. | ||
179 | * - All other bits of the PTE are loaded into TLBLO without | ||
180 | * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for | ||
181 | * software PTE bits. We actually use use bits 21, 24, 25, and | ||
182 | * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and | ||
183 | * PRESENT. | ||
184 | */ | ||
185 | |||
186 | /* Definitions for MicroBlaze. */ | ||
187 | #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ | ||
188 | #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ | ||
189 | #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ | ||
190 | #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ | ||
191 | #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ | ||
192 | #define _PAGE_RW 0x040 /* software: Writes permitted */ | ||
193 | #define _PAGE_DIRTY 0x080 /* software: dirty page */ | ||
194 | #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ | ||
195 | #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ | ||
196 | #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ | ||
197 | #define _PMD_PRESENT PAGE_MASK | ||
198 | |||
199 | /* | ||
200 | * Some bits are unused... | ||
201 | */ | ||
202 | #ifndef _PAGE_HASHPTE | ||
203 | #define _PAGE_HASHPTE 0 | ||
204 | #endif | ||
205 | #ifndef _PTE_NONE_MASK | ||
206 | #define _PTE_NONE_MASK 0 | ||
207 | #endif | ||
208 | #ifndef _PAGE_SHARED | ||
209 | #define _PAGE_SHARED 0 | ||
210 | #endif | ||
211 | #ifndef _PAGE_HWWRITE | ||
212 | #define _PAGE_HWWRITE 0 | ||
213 | #endif | ||
214 | #ifndef _PAGE_HWEXEC | ||
215 | #define _PAGE_HWEXEC 0 | ||
216 | #endif | ||
217 | #ifndef _PAGE_EXEC | ||
218 | #define _PAGE_EXEC 0 | ||
219 | #endif | ||
220 | |||
221 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
222 | |||
223 | /* | ||
224 | * Note: the _PAGE_COHERENT bit automatically gets set in the hardware | ||
225 | * PTE if CONFIG_SMP is defined (hash_page does this); there is no need | ||
226 | * to have it in the Linux PTE, and in fact the bit could be reused for | ||
227 | * another purpose. -- paulus. | ||
228 | */ | ||
229 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) | ||
230 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) | ||
231 | |||
232 | #define _PAGE_KERNEL \ | ||
233 | (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC) | ||
234 | |||
235 | #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) | ||
236 | |||
237 | #define PAGE_NONE __pgprot(_PAGE_BASE) | ||
238 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
239 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
240 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) | ||
241 | #define PAGE_SHARED_X \ | ||
242 | __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) | ||
243 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
244 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
245 | |||
246 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | ||
247 | #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED) | ||
248 | #define PAGE_KERNEL_CI __pgprot(_PAGE_IO) | ||
249 | |||
250 | /* | ||
251 | * We consider execute permission the same as read. | ||
252 | * Also, write permissions imply read permissions. | ||
253 | */ | ||
254 | #define __P000 PAGE_NONE | ||
255 | #define __P001 PAGE_READONLY_X | ||
256 | #define __P010 PAGE_COPY | ||
257 | #define __P011 PAGE_COPY_X | ||
258 | #define __P100 PAGE_READONLY | ||
259 | #define __P101 PAGE_READONLY_X | ||
260 | #define __P110 PAGE_COPY | ||
261 | #define __P111 PAGE_COPY_X | ||
262 | |||
263 | #define __S000 PAGE_NONE | ||
264 | #define __S001 PAGE_READONLY_X | ||
265 | #define __S010 PAGE_SHARED | ||
266 | #define __S011 PAGE_SHARED_X | ||
267 | #define __S100 PAGE_READONLY | ||
268 | #define __S101 PAGE_READONLY_X | ||
269 | #define __S110 PAGE_SHARED | ||
270 | #define __S111 PAGE_SHARED_X | ||
271 | |||
272 | #ifndef __ASSEMBLY__ | ||
273 | /* | ||
274 | * ZERO_PAGE is a global shared page that is always zero: used | ||
275 | * for zero-mapped memory areas etc.. | ||
276 | */ | ||
277 | extern unsigned long empty_zero_page[1024]; | ||
278 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
279 | |||
280 | #endif /* __ASSEMBLY__ */ | ||
281 | |||
282 | #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) | ||
283 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | ||
284 | #define pte_clear(mm, addr, ptep) \ | ||
285 | do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) | ||
286 | |||
287 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
288 | #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0) | ||
289 | #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0) | ||
290 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) | ||
291 | |||
292 | #define pte_page(x) (mem_map + (unsigned long) \ | ||
293 | ((pte_val(x) - memory_start) >> PAGE_SHIFT)) | ||
294 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT) | ||
295 | |||
296 | #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) | ||
297 | |||
298 | #define pfn_pte(pfn, prot) \ | ||
299 | __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot)) | ||
300 | |||
301 | #ifndef __ASSEMBLY__ | ||
302 | /* | ||
303 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
304 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
305 | * into the pgd entry) | ||
306 | */ | ||
307 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
308 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
309 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
310 | #define pgd_clear(xp) do { } while (0) | ||
311 | #define pgd_page(pgd) \ | ||
312 | ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) | ||
313 | |||
314 | /* | ||
315 | * The following only work if pte_present() is true. | ||
316 | * Undefined behaviour if not.. | ||
317 | */ | ||
318 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | ||
319 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
320 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } | ||
321 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
322 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
323 | /* FIXME */ | ||
324 | static inline int pte_file(pte_t pte) { return 0; } | ||
325 | |||
326 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | ||
327 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | ||
328 | |||
329 | static inline pte_t pte_rdprotect(pte_t pte) \ | ||
330 | { pte_val(pte) &= ~_PAGE_USER; return pte; } | ||
331 | static inline pte_t pte_wrprotect(pte_t pte) \ | ||
332 | { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } | ||
333 | static inline pte_t pte_exprotect(pte_t pte) \ | ||
334 | { pte_val(pte) &= ~_PAGE_EXEC; return pte; } | ||
335 | static inline pte_t pte_mkclean(pte_t pte) \ | ||
336 | { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } | ||
337 | static inline pte_t pte_mkold(pte_t pte) \ | ||
338 | { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
339 | |||
340 | static inline pte_t pte_mkread(pte_t pte) \ | ||
341 | { pte_val(pte) |= _PAGE_USER; return pte; } | ||
342 | static inline pte_t pte_mkexec(pte_t pte) \ | ||
343 | { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } | ||
344 | static inline pte_t pte_mkwrite(pte_t pte) \ | ||
345 | { pte_val(pte) |= _PAGE_RW; return pte; } | ||
346 | static inline pte_t pte_mkdirty(pte_t pte) \ | ||
347 | { pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
348 | static inline pte_t pte_mkyoung(pte_t pte) \ | ||
349 | { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
350 | |||
351 | /* | ||
352 | * Conversion functions: convert a page and protection to a page entry, | ||
353 | * and a page entry and page directory to the page they refer to. | ||
354 | */ | ||
355 | |||
356 | static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot) | ||
357 | { | ||
358 | pte_t pte; | ||
359 | pte_val(pte) = physpage | pgprot_val(pgprot); | ||
360 | return pte; | ||
361 | } | ||
362 | |||
363 | #define mk_pte(page, pgprot) \ | ||
364 | ({ \ | ||
365 | pte_t pte; \ | ||
366 | pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \ | ||
367 | pgprot_val(pgprot); \ | ||
368 | pte; \ | ||
369 | }) | ||
370 | |||
371 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
372 | { | ||
373 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | ||
374 | return pte; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Atomic PTE updates. | ||
379 | * | ||
380 | * pte_update clears and sets bit atomically, and returns | ||
381 | * the old pte value. | ||
382 | * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant | ||
383 | * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits. | ||
384 | */ | ||
385 | static inline unsigned long pte_update(pte_t *p, unsigned long clr, | ||
386 | unsigned long set) | ||
387 | { | ||
388 | unsigned long old, tmp, msr; | ||
389 | |||
390 | __asm__ __volatile__("\ | ||
391 | msrclr %2, 0x2\n\ | ||
392 | nop\n\ | ||
393 | lw %0, %4, r0\n\ | ||
394 | andn %1, %0, %5\n\ | ||
395 | or %1, %1, %6\n\ | ||
396 | sw %1, %4, r0\n\ | ||
397 | mts rmsr, %2\n\ | ||
398 | nop" | ||
399 | : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) | ||
400 | : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) | ||
401 | : "cc"); | ||
402 | |||
403 | return old; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * set_pte stores a linux PTE into the linux page table. | ||
408 | */ | ||
409 | static inline void set_pte(struct mm_struct *mm, unsigned long addr, | ||
410 | pte_t *ptep, pte_t pte) | ||
411 | { | ||
412 | *ptep = pte; | ||
413 | } | ||
414 | |||
415 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
416 | pte_t *ptep, pte_t pte) | ||
417 | { | ||
418 | *ptep = pte; | ||
419 | } | ||
420 | |||
421 | static inline int ptep_test_and_clear_young(struct mm_struct *mm, | ||
422 | unsigned long addr, pte_t *ptep) | ||
423 | { | ||
424 | return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0; | ||
425 | } | ||
426 | |||
427 | static inline int ptep_test_and_clear_dirty(struct mm_struct *mm, | ||
428 | unsigned long addr, pte_t *ptep) | ||
429 | { | ||
430 | return (pte_update(ptep, \ | ||
431 | (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; | ||
432 | } | ||
433 | |||
434 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | ||
435 | unsigned long addr, pte_t *ptep) | ||
436 | { | ||
437 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); | ||
438 | } | ||
439 | |||
440 | /*static inline void ptep_set_wrprotect(struct mm_struct *mm, | ||
441 | unsigned long addr, pte_t *ptep) | ||
442 | { | ||
443 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); | ||
444 | }*/ | ||
445 | |||
446 | static inline void ptep_mkdirty(struct mm_struct *mm, | ||
447 | unsigned long addr, pte_t *ptep) | ||
448 | { | ||
449 | pte_update(ptep, 0, _PAGE_DIRTY); | ||
450 | } | ||
451 | |||
452 | /*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/ | ||
453 | |||
454 | /* Convert pmd entry to page */ | ||
455 | /* our pmd entry is an effective address of pte table*/ | ||
456 | /* returns effective address of the pmd entry*/ | ||
457 | #define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) | ||
458 | |||
459 | /* returns struct *page of the pmd entry*/ | ||
460 | #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT)) | ||
461 | |||
462 | /* to find an entry in a kernel page-table-directory */ | ||
463 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
464 | |||
465 | /* to find an entry in a page-table-directory */ | ||
466 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | ||
467 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
468 | |||
469 | /* Find an entry in the second-level page table.. */ | ||
470 | static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) | ||
471 | { | ||
472 | return (pmd_t *) dir; | ||
473 | } | ||
474 | |||
475 | /* Find an entry in the third-level page table.. */ | ||
476 | #define pte_index(address) \ | ||
477 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
478 | #define pte_offset_kernel(dir, addr) \ | ||
479 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) | ||
480 | #define pte_offset_map(dir, addr) \ | ||
481 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) | ||
482 | #define pte_offset_map_nested(dir, addr) \ | ||
483 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) | ||
484 | |||
485 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | ||
486 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | ||
487 | |||
488 | /* Encode and decode a nonlinear file mapping entry */ | ||
489 | #define PTE_FILE_MAX_BITS 29 | ||
490 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) | ||
491 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) }) | ||
492 | |||
493 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
494 | |||
495 | /* | ||
496 | * When flushing the tlb entry for a page, we also need to flush the hash | ||
497 | * table entry. flush_hash_page is assembler (for speed) in hashtable.S. | ||
498 | */ | ||
499 | extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep); | ||
500 | |||
501 | /* Add an HPTE to the hash table */ | ||
502 | extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep); | ||
503 | |||
504 | /* | ||
505 | * Encode and decode a swap entry. | ||
506 | * Note that the bits we use in a PTE for representing a swap entry | ||
507 | * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit | ||
508 | * (if used). -- paulus | ||
509 | */ | ||
510 | #define __swp_type(entry) ((entry).val & 0x3f) | ||
511 | #define __swp_offset(entry) ((entry).val >> 6) | ||
512 | #define __swp_entry(type, offset) \ | ||
513 | ((swp_entry_t) { (type) | ((offset) << 6) }) | ||
514 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) | ||
515 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) | ||
516 | |||
517 | |||
518 | /* CONFIG_APUS */ | ||
519 | /* For virtual address to physical address conversion */ | ||
520 | extern void cache_clear(__u32 addr, int length); | ||
521 | extern void cache_push(__u32 addr, int length); | ||
522 | extern int mm_end_of_chunk(unsigned long addr, int len); | ||
523 | extern unsigned long iopa(unsigned long addr); | ||
524 | /* extern unsigned long mm_ptov(unsigned long addr) \ | ||
525 | __attribute__ ((const)); TBD */ | ||
526 | |||
527 | /* Values for nocacheflag and cmode */ | ||
528 | /* These are not used by the APUS kernel_map, but prevents | ||
529 | * compilation errors. | ||
530 | */ | ||
531 | #define IOMAP_FULL_CACHING 0 | ||
532 | #define IOMAP_NOCACHE_SER 1 | ||
533 | #define IOMAP_NOCACHE_NONSER 2 | ||
534 | #define IOMAP_NO_COPYBACK 3 | ||
535 | |||
536 | /* | ||
537 | * Map some physical address range into the kernel address space. | ||
538 | */ | ||
539 | extern unsigned long kernel_map(unsigned long paddr, unsigned long size, | ||
540 | int nocacheflag, unsigned long *memavailp); | ||
541 | |||
542 | /* | ||
543 | * Set cache mode of (kernel space) address range. | ||
544 | */ | ||
545 | extern void kernel_set_cachemode(unsigned long address, unsigned long size, | ||
546 | unsigned int cmode); | ||
547 | |||
548 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | ||
549 | #define kern_addr_valid(addr) (1) | ||
550 | |||
551 | #define io_remap_page_range remap_page_range | ||
552 | |||
553 | /* | ||
554 | * No page table caches to initialise | ||
555 | */ | ||
556 | #define pgtable_cache_init() do { } while (0) | ||
557 | |||
558 | void do_page_fault(struct pt_regs *regs, unsigned long address, | ||
559 | unsigned long error_code); | ||
560 | |||
561 | void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | ||
562 | unsigned int size, int flags); | ||
563 | |||
564 | void __init adjust_total_lowmem(void); | ||
565 | void mapin_ram(void); | ||
566 | int map_page(unsigned long va, phys_addr_t pa, int flags); | ||
567 | |||
568 | extern int mem_init_done; | ||
569 | extern unsigned long ioremap_base; | ||
570 | extern unsigned long ioremap_bot; | ||
571 | |||
572 | asmlinkage void __init mmu_init(void); | ||
573 | |||
574 | void __init *early_get_page(void); | ||
575 | |||
576 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); | ||
577 | void consistent_free(void *vaddr); | ||
578 | void consistent_sync(void *vaddr, size_t size, int direction); | ||
579 | void consistent_sync_page(struct page *page, unsigned long offset, | ||
580 | size_t size, int direction); | ||
581 | #endif /* __ASSEMBLY__ */ | ||
582 | #endif /* __KERNEL__ */ | ||
583 | |||
584 | #endif /* CONFIG_MMU */ | ||
585 | |||
50 | #ifndef __ASSEMBLY__ | 586 | #ifndef __ASSEMBLY__ |
51 | #include <asm-generic/pgtable.h> | 587 | #include <asm-generic/pgtable.h> |
52 | 588 | ||
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h index 8434a43e5421..4487e150b455 100644 --- a/arch/microblaze/include/asm/sections.h +++ b/arch/microblaze/include/asm/sections.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -14,6 +16,7 @@ | |||
14 | # ifndef __ASSEMBLY__ | 16 | # ifndef __ASSEMBLY__ |
15 | extern char _ssbss[], _esbss[]; | 17 | extern char _ssbss[], _esbss[]; |
16 | extern unsigned long __ivt_start[], __ivt_end[]; | 18 | extern unsigned long __ivt_start[], __ivt_end[]; |
19 | extern char _etext[], _stext[]; | ||
17 | 20 | ||
18 | # ifdef CONFIG_MTD_UCLINUX | 21 | # ifdef CONFIG_MTD_UCLINUX |
19 | extern char *_ebss; | 22 | extern char *_ebss; |