diff options
author | Michal Simek <monstr@monstr.eu> | 2009-05-26 10:30:17 -0400 |
---|---|---|
committer | Michal Simek <monstr@monstr.eu> | 2009-05-26 10:45:18 -0400 |
commit | 1f84e1ea0e87ad659cd6f6a6285d50c73a8d1a24 (patch) | |
tree | 989477a96a5b0fb71d93d669484de4039467ccc0 /arch | |
parent | dc95be1f7188f0718ac922b6b6b72406c294d250 (diff) |
microblaze_mmu_v2: pgalloc.h and page.h
Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/microblaze/include/asm/page.h | 166 | ||||
-rw-r--r-- | arch/microblaze/include/asm/pgalloc.h | 191 |
2 files changed, 314 insertions, 43 deletions
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 7238dcfcc517..210e584974f7 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h | |||
@@ -1,6 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Michal Simek | 2 | * VM ops |
3 | * Copyright (C) 2008 PetaLogix | 3 | * |
4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
5 | * Copyright (C) 2008-2009 PetaLogix | ||
4 | * Copyright (C) 2006 Atmark Techno, Inc. | 6 | * Copyright (C) 2006 Atmark Techno, Inc. |
5 | * Changes for MMU support: | 7 | * Changes for MMU support: |
6 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. | 8 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. |
@@ -15,14 +17,15 @@ | |||
15 | 17 | ||
16 | #include <linux/pfn.h> | 18 | #include <linux/pfn.h> |
17 | #include <asm/setup.h> | 19 | #include <asm/setup.h> |
20 | #include <linux/const.h> | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
18 | 23 | ||
19 | /* PAGE_SHIFT determines the page size */ | 24 | /* PAGE_SHIFT determines the page size */ |
20 | #define PAGE_SHIFT (12) | 25 | #define PAGE_SHIFT (12) |
21 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | 26 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
22 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 27 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
23 | 28 | ||
24 | #ifdef __KERNEL__ | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
27 | 30 | ||
28 | #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) | 31 | #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) |
@@ -35,6 +38,7 @@ | |||
35 | /* align addr on a size boundary - adjust address up if needed */ | 38 | /* align addr on a size boundary - adjust address up if needed */ |
36 | #define _ALIGN(addr, size) _ALIGN_UP(addr, size) | 39 | #define _ALIGN(addr, size) _ALIGN_UP(addr, size) |
37 | 40 | ||
41 | #ifndef CONFIG_MMU | ||
38 | /* | 42 | /* |
39 | * PAGE_OFFSET -- the first address of the first page of memory. When not | 43 | * PAGE_OFFSET -- the first address of the first page of memory. When not |
40 | * using MMU this corresponds to the first free page in physical memory (aligned | 44 | * using MMU this corresponds to the first free page in physical memory (aligned |
@@ -43,15 +47,44 @@ | |||
43 | extern unsigned int __page_offset; | 47 | extern unsigned int __page_offset; |
44 | #define PAGE_OFFSET __page_offset | 48 | #define PAGE_OFFSET __page_offset |
45 | 49 | ||
46 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) | 50 | #else /* CONFIG_MMU */ |
47 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | ||
48 | #define free_user_page(page, addr) free_page(addr) | ||
49 | 51 | ||
50 | #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) | 52 | /* |
53 | * PAGE_OFFSET -- the first address of the first page of memory. With MMU | ||
54 | * it is set to the kernel start address (aligned on a page boundary). | ||
55 | * | ||
56 | * CONFIG_KERNEL_START is defined in arch/microblaze/config.in and used | ||
57 | * in arch/microblaze/Makefile. | ||
58 | */ | ||
59 | #define PAGE_OFFSET CONFIG_KERNEL_START | ||
51 | 60 | ||
61 | /* | ||
62 | * MAP_NR -- given an address, calculate the index of the page struct which | ||
63 | * points to the address's page. | ||
64 | */ | ||
65 | #define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT) | ||
52 | 66 | ||
53 | #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) | 67 | /* |
54 | #define copy_user_page(vto, vfrom, vaddr, topg) \ | 68 | * The basic type of a PTE - 32 bit physical addressing. |
69 | */ | ||
70 | typedef unsigned long pte_basic_t; | ||
71 | #define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ | ||
72 | #define PTE_FMT "%.8lx" | ||
73 | |||
74 | #endif /* CONFIG_MMU */ | ||
75 | |||
76 | # ifndef CONFIG_MMU | ||
77 | # define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) | ||
78 | # define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | ||
79 | # define free_user_page(page, addr) free_page(addr) | ||
80 | # else /* CONFIG_MMU */ | ||
81 | extern void copy_page(void *to, void *from); | ||
82 | # endif /* CONFIG_MMU */ | ||
83 | |||
84 | # define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) | ||
85 | |||
86 | # define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) | ||
87 | # define copy_user_page(vto, vfrom, vaddr, topg) \ | ||
55 | memcpy((vto), (vfrom), PAGE_SIZE) | 88 | memcpy((vto), (vfrom), PAGE_SIZE) |
56 | 89 | ||
57 | /* | 90 | /* |
@@ -60,21 +93,32 @@ extern unsigned int __page_offset; | |||
60 | typedef struct page *pgtable_t; | 93 | typedef struct page *pgtable_t; |
61 | typedef struct { unsigned long pte; } pte_t; | 94 | typedef struct { unsigned long pte; } pte_t; |
62 | typedef struct { unsigned long pgprot; } pgprot_t; | 95 | typedef struct { unsigned long pgprot; } pgprot_t; |
96 | /* FIXME this can depend on linux kernel version */ | ||
97 | # ifdef CONFIG_MMU | ||
98 | typedef struct { unsigned long pmd; } pmd_t; | ||
99 | typedef struct { unsigned long pgd; } pgd_t; | ||
100 | # else /* CONFIG_MMU */ | ||
63 | typedef struct { unsigned long ste[64]; } pmd_t; | 101 | typedef struct { unsigned long ste[64]; } pmd_t; |
64 | typedef struct { pmd_t pue[1]; } pud_t; | 102 | typedef struct { pmd_t pue[1]; } pud_t; |
65 | typedef struct { pud_t pge[1]; } pgd_t; | 103 | typedef struct { pud_t pge[1]; } pgd_t; |
104 | # endif /* CONFIG_MMU */ | ||
66 | 105 | ||
106 | # define pte_val(x) ((x).pte) | ||
107 | # define pgprot_val(x) ((x).pgprot) | ||
67 | 108 | ||
68 | #define pte_val(x) ((x).pte) | 109 | # ifdef CONFIG_MMU |
69 | #define pgprot_val(x) ((x).pgprot) | 110 | # define pmd_val(x) ((x).pmd) |
70 | #define pmd_val(x) ((x).ste[0]) | 111 | # define pgd_val(x) ((x).pgd) |
71 | #define pud_val(x) ((x).pue[0]) | 112 | # else /* CONFIG_MMU */ |
72 | #define pgd_val(x) ((x).pge[0]) | 113 | # define pmd_val(x) ((x).ste[0]) |
114 | # define pud_val(x) ((x).pue[0]) | ||
115 | # define pgd_val(x) ((x).pge[0]) | ||
116 | # endif /* CONFIG_MMU */ | ||
73 | 117 | ||
74 | #define __pte(x) ((pte_t) { (x) }) | 118 | # define __pte(x) ((pte_t) { (x) }) |
75 | #define __pmd(x) ((pmd_t) { (x) }) | 119 | # define __pmd(x) ((pmd_t) { (x) }) |
76 | #define __pgd(x) ((pgd_t) { (x) }) | 120 | # define __pgd(x) ((pgd_t) { (x) }) |
77 | #define __pgprot(x) ((pgprot_t) { (x) }) | 121 | # define __pgprot(x) ((pgprot_t) { (x) }) |
78 | 122 | ||
79 | /** | 123 | /** |
80 | * Conversions for virtual address, physical address, pfn, and struct | 124 | * Conversions for virtual address, physical address, pfn, and struct |
@@ -94,44 +138,80 @@ extern unsigned long max_low_pfn; | |||
94 | extern unsigned long min_low_pfn; | 138 | extern unsigned long min_low_pfn; |
95 | extern unsigned long max_pfn; | 139 | extern unsigned long max_pfn; |
96 | 140 | ||
97 | #define __pa(vaddr) ((unsigned long) (vaddr)) | 141 | extern unsigned long memory_start; |
98 | #define __va(paddr) ((void *) (paddr)) | 142 | extern unsigned long memory_end; |
143 | extern unsigned long memory_size; | ||
99 | 144 | ||
100 | #define phys_to_pfn(phys) (PFN_DOWN(phys)) | 145 | extern int page_is_ram(unsigned long pfn); |
101 | #define pfn_to_phys(pfn) (PFN_PHYS(pfn)) | ||
102 | 146 | ||
103 | #define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) | 147 | # define phys_to_pfn(phys) (PFN_DOWN(phys)) |
104 | #define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) | 148 | # define pfn_to_phys(pfn) (PFN_PHYS(pfn)) |
105 | 149 | ||
106 | #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) | 150 | # define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) |
107 | #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) | 151 | # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) |
108 | 152 | ||
109 | #define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) | 153 | # ifdef CONFIG_MMU |
110 | #define page_to_bus(page) (page_to_phys(page)) | 154 | # define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) |
111 | #define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) | 155 | # else /* CONFIG_MMU */ |
156 | # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) | ||
157 | # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) | ||
158 | # define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) | ||
159 | # define page_to_bus(page) (page_to_phys(page)) | ||
160 | # define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) | ||
161 | # endif /* CONFIG_MMU */ | ||
112 | 162 | ||
113 | extern unsigned int memory_start; | 163 | # ifndef CONFIG_MMU |
114 | extern unsigned int memory_end; | 164 | # define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr) |
115 | extern unsigned int memory_size; | 165 | # define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) |
166 | # else /* CONFIG_MMU */ | ||
167 | # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) | ||
168 | # define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET)) | ||
169 | # define VALID_PAGE(page) ((page - mem_map) < max_mapnr) | ||
170 | # endif /* CONFIG_MMU */ | ||
116 | 171 | ||
117 | #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) | 172 | # endif /* __ASSEMBLY__ */ |
118 | 173 | ||
119 | #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) | 174 | #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) |
120 | 175 | ||
121 | #else | ||
122 | #define tophys(rd, rs) (addik rd, rs, 0) | ||
123 | #define tovirt(rd, rs) (addik rd, rs, 0) | ||
124 | #endif /* __ASSEMBLY__ */ | ||
125 | 176 | ||
126 | #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) | 177 | # ifndef CONFIG_MMU |
178 | # define __pa(vaddr) ((unsigned long) (vaddr)) | ||
179 | # define __va(paddr) ((void *) (paddr)) | ||
180 | # else /* CONFIG_MMU */ | ||
181 | # define __pa(x) __virt_to_phys((unsigned long)(x)) | ||
182 | # define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) | ||
183 | # endif /* CONFIG_MMU */ | ||
184 | |||
127 | 185 | ||
128 | /* Convert between virtual and physical address for MMU. */ | 186 | /* Convert between virtual and physical address for MMU. */ |
129 | /* Handle MicroBlaze processor with virtual memory. */ | 187 | /* Handle MicroBlaze processor with virtual memory. */ |
188 | #ifndef CONFIG_MMU | ||
130 | #define __virt_to_phys(addr) addr | 189 | #define __virt_to_phys(addr) addr |
131 | #define __phys_to_virt(addr) addr | 190 | #define __phys_to_virt(addr) addr |
191 | #define tophys(rd, rs) addik rd, rs, 0 | ||
192 | #define tovirt(rd, rs) addik rd, rs, 0 | ||
193 | #else | ||
194 | #define __virt_to_phys(addr) \ | ||
195 | ((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START) | ||
196 | #define __phys_to_virt(addr) \ | ||
197 | ((addr) + CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR) | ||
198 | #define tophys(rd, rs) \ | ||
199 | addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START) | ||
200 | #define tovirt(rd, rs) \ | ||
201 | addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR) | ||
202 | #endif /* CONFIG_MMU */ | ||
132 | 203 | ||
133 | #define TOPHYS(addr) __virt_to_phys(addr) | 204 | #define TOPHYS(addr) __virt_to_phys(addr) |
134 | 205 | ||
206 | #ifdef CONFIG_MMU | ||
207 | #ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC | ||
208 | #define WANT_PAGE_VIRTUAL 1 /* page alloc 2 relies on this */ | ||
209 | #endif | ||
210 | |||
211 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
212 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
213 | #endif /* CONFIG_MMU */ | ||
214 | |||
135 | #endif /* __KERNEL__ */ | 215 | #endif /* __KERNEL__ */ |
136 | 216 | ||
137 | #include <asm-generic/memory_model.h> | 217 | #include <asm-generic/memory_model.h> |
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index 2a4b35484010..59a757e46ba5 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -9,6 +11,195 @@ | |||
9 | #ifndef _ASM_MICROBLAZE_PGALLOC_H | 11 | #ifndef _ASM_MICROBLAZE_PGALLOC_H |
10 | #define _ASM_MICROBLAZE_PGALLOC_H | 12 | #define _ASM_MICROBLAZE_PGALLOC_H |
11 | 13 | ||
14 | #ifdef CONFIG_MMU | ||
15 | |||
16 | #include <linux/kernel.h> /* For min/max macros */ | ||
17 | #include <linux/highmem.h> | ||
18 | #include <asm/setup.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/cache.h> | ||
22 | |||
23 | #define PGDIR_ORDER 0 | ||
24 | |||
25 | /* | ||
26 | * This is handled very differently on MicroBlaze since out page tables | ||
27 | * are all 0's and I want to be able to use these zero'd pages elsewhere | ||
28 | * as well - it gives us quite a speedup. | ||
29 | * -- Cort | ||
30 | */ | ||
31 | extern struct pgtable_cache_struct { | ||
32 | unsigned long *pgd_cache; | ||
33 | unsigned long *pte_cache; | ||
34 | unsigned long pgtable_cache_sz; | ||
35 | } quicklists; | ||
36 | |||
37 | #define pgd_quicklist (quicklists.pgd_cache) | ||
38 | #define pmd_quicklist ((unsigned long *)0) | ||
39 | #define pte_quicklist (quicklists.pte_cache) | ||
40 | #define pgtable_cache_size (quicklists.pgtable_cache_sz) | ||
41 | |||
42 | extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */ | ||
43 | extern atomic_t zero_sz; /* # currently pre-zero'd pages */ | ||
44 | extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */ | ||
45 | extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */ | ||
46 | extern atomic_t zerototal; /* # pages zero'd over time */ | ||
47 | |||
48 | #define zero_quicklist (zero_cache) | ||
49 | #define zero_cache_sz (zero_sz) | ||
50 | #define zero_cache_calls (zeropage_calls) | ||
51 | #define zero_cache_hits (zeropage_hits) | ||
52 | #define zero_cache_total (zerototal) | ||
53 | |||
54 | /* | ||
55 | * return a pre-zero'd page from the list, | ||
56 | * return NULL if none available -- Cort | ||
57 | */ | ||
58 | extern unsigned long get_zero_page_fast(void); | ||
59 | |||
60 | extern void __bad_pte(pmd_t *pmd); | ||
61 | |||
62 | extern inline pgd_t *get_pgd_slow(void) | ||
63 | { | ||
64 | pgd_t *ret; | ||
65 | |||
66 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER); | ||
67 | if (ret != NULL) | ||
68 | clear_page(ret); | ||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | extern inline pgd_t *get_pgd_fast(void) | ||
73 | { | ||
74 | unsigned long *ret; | ||
75 | |||
76 | ret = pgd_quicklist; | ||
77 | if (ret != NULL) { | ||
78 | pgd_quicklist = (unsigned long *)(*ret); | ||
79 | ret[0] = 0; | ||
80 | pgtable_cache_size--; | ||
81 | } else | ||
82 | ret = (unsigned long *)get_pgd_slow(); | ||
83 | return (pgd_t *)ret; | ||
84 | } | ||
85 | |||
86 | extern inline void free_pgd_fast(pgd_t *pgd) | ||
87 | { | ||
88 | *(unsigned long **)pgd = pgd_quicklist; | ||
89 | pgd_quicklist = (unsigned long *) pgd; | ||
90 | pgtable_cache_size++; | ||
91 | } | ||
92 | |||
93 | extern inline void free_pgd_slow(pgd_t *pgd) | ||
94 | { | ||
95 | free_page((unsigned long)pgd); | ||
96 | } | ||
97 | |||
98 | #define pgd_free(mm, pgd) free_pgd_fast(pgd) | ||
99 | #define pgd_alloc(mm) get_pgd_fast() | ||
100 | |||
101 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
102 | |||
103 | /* | ||
104 | * We don't have any real pmd's, and this code never triggers because | ||
105 | * the pgd will always be present.. | ||
106 | */ | ||
107 | #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) | ||
108 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) | ||
109 | /* FIXME two definition - look below */ | ||
110 | #define pmd_free(mm, x) do { } while (0) | ||
111 | #define pgd_populate(mm, pmd, pte) BUG() | ||
112 | |||
113 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | ||
114 | unsigned long address) | ||
115 | { | ||
116 | pte_t *pte; | ||
117 | extern int mem_init_done; | ||
118 | extern void *early_get_page(void); | ||
119 | if (mem_init_done) { | ||
120 | pte = (pte_t *)__get_free_page(GFP_KERNEL | | ||
121 | __GFP_REPEAT | __GFP_ZERO); | ||
122 | } else { | ||
123 | pte = (pte_t *)early_get_page(); | ||
124 | if (pte) | ||
125 | clear_page(pte); | ||
126 | } | ||
127 | return pte; | ||
128 | } | ||
129 | |||
130 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | ||
131 | unsigned long address) | ||
132 | { | ||
133 | struct page *ptepage; | ||
134 | |||
135 | #ifdef CONFIG_HIGHPTE | ||
136 | int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; | ||
137 | #else | ||
138 | int flags = GFP_KERNEL | __GFP_REPEAT; | ||
139 | #endif | ||
140 | |||
141 | ptepage = alloc_pages(flags, 0); | ||
142 | if (ptepage) | ||
143 | clear_highpage(ptepage); | ||
144 | return ptepage; | ||
145 | } | ||
146 | |||
147 | static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, | ||
148 | unsigned long address) | ||
149 | { | ||
150 | unsigned long *ret; | ||
151 | |||
152 | ret = pte_quicklist; | ||
153 | if (ret != NULL) { | ||
154 | pte_quicklist = (unsigned long *)(*ret); | ||
155 | ret[0] = 0; | ||
156 | pgtable_cache_size--; | ||
157 | } | ||
158 | return (pte_t *)ret; | ||
159 | } | ||
160 | |||
161 | extern inline void pte_free_fast(pte_t *pte) | ||
162 | { | ||
163 | *(unsigned long **)pte = pte_quicklist; | ||
164 | pte_quicklist = (unsigned long *) pte; | ||
165 | pgtable_cache_size++; | ||
166 | } | ||
167 | |||
168 | extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
169 | { | ||
170 | free_page((unsigned long)pte); | ||
171 | } | ||
172 | |||
173 | extern inline void pte_free_slow(struct page *ptepage) | ||
174 | { | ||
175 | __free_page(ptepage); | ||
176 | } | ||
177 | |||
178 | extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) | ||
179 | { | ||
180 | __free_page(ptepage); | ||
181 | } | ||
182 | |||
183 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) | ||
184 | |||
185 | #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte)) | ||
186 | |||
187 | #define pmd_populate_kernel(mm, pmd, pte) \ | ||
188 | (pmd_val(*(pmd)) = (unsigned long) (pte)) | ||
189 | |||
190 | /* | ||
191 | * We don't have any real pmd's, and this code never triggers because | ||
192 | * the pgd will always be present.. | ||
193 | */ | ||
194 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) | ||
195 | /*#define pmd_free(mm, x) do { } while (0)*/ | ||
196 | #define __pmd_free_tlb(tlb, x) do { } while (0) | ||
197 | #define pgd_populate(mm, pmd, pte) BUG() | ||
198 | |||
199 | extern int do_check_pgt_cache(int, int); | ||
200 | |||
201 | #endif /* CONFIG_MMU */ | ||
202 | |||
12 | #define check_pgt_cache() do {} while (0) | 203 | #define check_pgt_cache() do {} while (0) |
13 | 204 | ||
14 | #endif /* _ASM_MICROBLAZE_PGALLOC_H */ | 205 | #endif /* _ASM_MICROBLAZE_PGALLOC_H */ |