diff options
author | Sam Ravnborg <sam@ravnborg.org> | 2008-07-18 00:55:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-18 00:55:51 -0400 |
commit | f5e706ad886b6a5eb59637830110b09ccebf01c5 (patch) | |
tree | ea043a0a28e16a2ac6395c35d737f52698a165b7 /include/asm-sparc64/pgtable.h | |
parent | 5e3609f60c09f0f15f71f80c6d7933b2c7be71a6 (diff) |
sparc: join the remaining header files
With this commit all sparc64 header files are moved to asm-sparc.
The remaining files (71 files) were too different to be trivially
merged so divide them up in a _32.h and a _64.h file which
are both included from the file with no bit size.
The following script were used:
cd include
FILES=`wc -l asm-sparc64/*h | grep -v '^ 1' | cut -b 20-`
for FILE in ${FILES}; do
echo $FILE:
BASE=`echo $FILE | cut -d '.' -f 1`
FN32=${BASE}_32.h
FN64=${BASE}_64.h
GUARD=___ASM_SPARC_`echo $BASE | tr '-' '_' | tr [:lower:] [:upper:]`_H
git mv asm-sparc/$FILE asm-sparc/$FN32
git mv asm-sparc64/$FILE asm-sparc/$FN64
echo git mv done
printf "#ifndef %s\n" $GUARD > asm-sparc/$FILE
printf "#define %s\n" $GUARD >> asm-sparc/$FILE
printf "#if defined(__sparc__) && defined(__arch64__)\n" >> asm-sparc/$FILE
printf "#include <asm-sparc/%s>\n" $FN64 >> asm-sparc/$FILE
printf "#else\n" >> asm-sparc/$FILE
printf "#include <asm-sparc/%s>\n" $FN32 >> asm-sparc/$FILE
printf "#endif\n" >> asm-sparc/$FILE
printf "#endif\n" >> asm-sparc/$FILE
git add asm-sparc/$FILE
echo new file done
printf "#include <asm-sparc/%s>\n" $FILE > asm-sparc64/$FILE
git add asm-sparc64/$FILE
echo sparc64 file done
done
The guard contains three '_' to avoid conflict with existing guards.
In additing the two Kbuild files are emptied to avoid breaking
headers_* targets.
We will reintroduce the exported header files when the necessary
kbuild changes are merged.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/pgtable.h')
-rw-r--r-- | include/asm-sparc64/pgtable.h | 782 |
1 files changed, 1 insertions, 781 deletions
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index b87017747b5d..9decbd99aeff 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
@@ -1,781 +1 @@ | |||
1 | /* | #include <asm-sparc/pgtable.h> | |
2 | * pgtable.h: SpitFire page table operations. | ||
3 | * | ||
4 | * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
6 | */ | ||
7 | |||
8 | #ifndef _SPARC64_PGTABLE_H | ||
9 | #define _SPARC64_PGTABLE_H | ||
10 | |||
11 | /* This file contains the functions and defines necessary to modify and use | ||
12 | * the SpitFire page tables. | ||
13 | */ | ||
14 | |||
15 | #include <asm-generic/pgtable-nopud.h> | ||
16 | |||
17 | #include <linux/compiler.h> | ||
18 | #include <linux/const.h> | ||
19 | #include <asm/types.h> | ||
20 | #include <asm/spitfire.h> | ||
21 | #include <asm/asi.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/processor.h> | ||
25 | |||
26 | /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). | ||
27 | * The page copy blockops can use 0x6000000 to 0x8000000. | ||
28 | * The TSB is mapped in the 0x8000000 to 0xa000000 range. | ||
29 | * The PROM resides in an area spanning 0xf0000000 to 0x100000000. | ||
30 | * The vmalloc area spans 0x100000000 to 0x200000000. | ||
31 | * Since modules need to be in the lowest 32-bits of the address space, | ||
32 | * we place them right before the OBP area from 0x10000000 to 0xf0000000. | ||
33 | * There is a single static kernel PMD which maps from 0x0 to address | ||
34 | * 0x400000000. | ||
35 | */ | ||
36 | #define TLBTEMP_BASE _AC(0x0000000006000000,UL) | ||
37 | #define TSBMAP_BASE _AC(0x0000000008000000,UL) | ||
38 | #define MODULES_VADDR _AC(0x0000000010000000,UL) | ||
39 | #define MODULES_LEN _AC(0x00000000e0000000,UL) | ||
40 | #define MODULES_END _AC(0x00000000f0000000,UL) | ||
41 | #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) | ||
42 | #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) | ||
43 | #define VMALLOC_START _AC(0x0000000100000000,UL) | ||
44 | #define VMALLOC_END _AC(0x0000000200000000,UL) | ||
45 | #define VMEMMAP_BASE _AC(0x0000000200000000,UL) | ||
46 | |||
47 | #define vmemmap ((struct page *)VMEMMAP_BASE) | ||
48 | |||
49 | /* XXX All of this needs to be rethought so we can take advantage | ||
50 | * XXX cheetah's full 64-bit virtual address space, ie. no more hole | ||
51 | * XXX in the middle like on spitfire. -DaveM | ||
52 | */ | ||
53 | /* | ||
54 | * Given a virtual address, the lowest PAGE_SHIFT bits determine offset | ||
55 | * into the page; the next higher PAGE_SHIFT-3 bits determine the pte# | ||
56 | * in the proper pagetable (the -3 is from the 8 byte ptes, and each page | ||
57 | * table is a single page long). The next higher PMD_BITS determine pmd# | ||
58 | * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2) | ||
59 | * since the pmd entries are 4 bytes, and each pmd page is a single page | ||
60 | * long). Finally, the higher few bits determine pgde#. | ||
61 | */ | ||
62 | |||
63 | /* PMD_SHIFT determines the size of the area a second-level page | ||
64 | * table can map | ||
65 | */ | ||
66 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) | ||
67 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) | ||
68 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
69 | #define PMD_BITS (PAGE_SHIFT - 2) | ||
70 | |||
71 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | ||
72 | #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) | ||
73 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) | ||
74 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
75 | #define PGDIR_BITS (PAGE_SHIFT - 2) | ||
76 | |||
77 | #ifndef __ASSEMBLY__ | ||
78 | |||
79 | #include <linux/sched.h> | ||
80 | |||
81 | /* Entries per page directory level. */ | ||
82 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) | ||
83 | #define PTRS_PER_PMD (1UL << PMD_BITS) | ||
84 | #define PTRS_PER_PGD (1UL << PGDIR_BITS) | ||
85 | |||
86 | /* Kernel has a separate 44bit address space. */ | ||
87 | #define FIRST_USER_ADDRESS 0 | ||
88 | |||
89 | #define pte_ERROR(e) __builtin_trap() | ||
90 | #define pmd_ERROR(e) __builtin_trap() | ||
91 | #define pgd_ERROR(e) __builtin_trap() | ||
92 | |||
93 | #endif /* !(__ASSEMBLY__) */ | ||
94 | |||
95 | /* PTE bits which are the same in SUN4U and SUN4V format. */ | ||
96 | #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ | ||
97 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ | ||
98 | |||
99 | /* SUN4U pte bits... */ | ||
100 | #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ | ||
101 | #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */ | ||
102 | #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */ | ||
103 | #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */ | ||
104 | #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */ | ||
105 | #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ | ||
106 | #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ | ||
107 | #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ | ||
108 | #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ | ||
109 | #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ | ||
110 | #define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */ | ||
111 | #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ | ||
112 | #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */ | ||
113 | #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */ | ||
114 | #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */ | ||
115 | #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */ | ||
116 | #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */ | ||
117 | #define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */ | ||
118 | #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ | ||
119 | #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */ | ||
120 | #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */ | ||
121 | #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */ | ||
122 | #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */ | ||
123 | #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ | ||
124 | #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ | ||
125 | #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */ | ||
126 | #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */ | ||
127 | #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */ | ||
128 | |||
129 | /* SUN4V pte bits... */ | ||
130 | #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */ | ||
131 | #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */ | ||
132 | #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */ | ||
133 | #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */ | ||
134 | #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ | ||
135 | #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ | ||
136 | #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ | ||
137 | #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ | ||
138 | #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ | ||
139 | #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */ | ||
140 | #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */ | ||
141 | #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */ | ||
142 | #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */ | ||
143 | #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */ | ||
144 | #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */ | ||
145 | #define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */ | ||
146 | #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */ | ||
147 | #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */ | ||
148 | #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */ | ||
149 | #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */ | ||
150 | #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */ | ||
151 | #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */ | ||
152 | #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */ | ||
153 | #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */ | ||
154 | #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */ | ||
155 | #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ | ||
156 | #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */ | ||
157 | |||
158 | #if PAGE_SHIFT == 13 | ||
159 | #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U | ||
160 | #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V | ||
161 | #elif PAGE_SHIFT == 16 | ||
162 | #define _PAGE_SZBITS_4U _PAGE_SZ64K_4U | ||
163 | #define _PAGE_SZBITS_4V _PAGE_SZ64K_4V | ||
164 | #elif PAGE_SHIFT == 19 | ||
165 | #define _PAGE_SZBITS_4U _PAGE_SZ512K_4U | ||
166 | #define _PAGE_SZBITS_4V _PAGE_SZ512K_4V | ||
167 | #elif PAGE_SHIFT == 22 | ||
168 | #define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U | ||
169 | #define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V | ||
170 | #else | ||
171 | #error Wrong PAGE_SHIFT specified | ||
172 | #endif | ||
173 | |||
174 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
175 | #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U | ||
176 | #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V | ||
177 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
178 | #define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U | ||
179 | #define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V | ||
180 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
181 | #define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U | ||
182 | #define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V | ||
183 | #endif | ||
184 | |||
185 | /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ | ||
186 | #define __P000 __pgprot(0) | ||
187 | #define __P001 __pgprot(0) | ||
188 | #define __P010 __pgprot(0) | ||
189 | #define __P011 __pgprot(0) | ||
190 | #define __P100 __pgprot(0) | ||
191 | #define __P101 __pgprot(0) | ||
192 | #define __P110 __pgprot(0) | ||
193 | #define __P111 __pgprot(0) | ||
194 | |||
195 | #define __S000 __pgprot(0) | ||
196 | #define __S001 __pgprot(0) | ||
197 | #define __S010 __pgprot(0) | ||
198 | #define __S011 __pgprot(0) | ||
199 | #define __S100 __pgprot(0) | ||
200 | #define __S101 __pgprot(0) | ||
201 | #define __S110 __pgprot(0) | ||
202 | #define __S111 __pgprot(0) | ||
203 | |||
204 | #ifndef __ASSEMBLY__ | ||
205 | |||
206 | extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); | ||
207 | |||
208 | extern unsigned long pte_sz_bits(unsigned long size); | ||
209 | |||
210 | extern pgprot_t PAGE_KERNEL; | ||
211 | extern pgprot_t PAGE_KERNEL_LOCKED; | ||
212 | extern pgprot_t PAGE_COPY; | ||
213 | extern pgprot_t PAGE_SHARED; | ||
214 | |||
215 | /* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */ | ||
216 | extern unsigned long _PAGE_IE; | ||
217 | extern unsigned long _PAGE_E; | ||
218 | extern unsigned long _PAGE_CACHE; | ||
219 | |||
220 | extern unsigned long pg_iobits; | ||
221 | extern unsigned long _PAGE_ALL_SZ_BITS; | ||
222 | extern unsigned long _PAGE_SZBITS; | ||
223 | |||
224 | extern struct page *mem_map_zero; | ||
225 | #define ZERO_PAGE(vaddr) (mem_map_zero) | ||
226 | |||
227 | /* PFNs are real physical page numbers. However, mem_map only begins to record | ||
228 | * per-page information starting at pfn_base. This is to handle systems where | ||
229 | * the first physical page in the machine is at some huge physical address, | ||
230 | * such as 4GB. This is common on a partitioned E10000, for example. | ||
231 | */ | ||
232 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | ||
233 | { | ||
234 | unsigned long paddr = pfn << PAGE_SHIFT; | ||
235 | unsigned long sz_bits; | ||
236 | |||
237 | sz_bits = 0UL; | ||
238 | if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { | ||
239 | __asm__ __volatile__( | ||
240 | "\n661: sethi %%uhi(%1), %0\n" | ||
241 | " sllx %0, 32, %0\n" | ||
242 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
243 | " .word 661b\n" | ||
244 | " mov %2, %0\n" | ||
245 | " nop\n" | ||
246 | " .previous\n" | ||
247 | : "=r" (sz_bits) | ||
248 | : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V)); | ||
249 | } | ||
250 | return __pte(paddr | sz_bits | pgprot_val(prot)); | ||
251 | } | ||
252 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
253 | |||
254 | /* This one can be done with two shifts. */ | ||
255 | static inline unsigned long pte_pfn(pte_t pte) | ||
256 | { | ||
257 | unsigned long ret; | ||
258 | |||
259 | __asm__ __volatile__( | ||
260 | "\n661: sllx %1, %2, %0\n" | ||
261 | " srlx %0, %3, %0\n" | ||
262 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
263 | " .word 661b\n" | ||
264 | " sllx %1, %4, %0\n" | ||
265 | " srlx %0, %5, %0\n" | ||
266 | " .previous\n" | ||
267 | : "=r" (ret) | ||
268 | : "r" (pte_val(pte)), | ||
269 | "i" (21), "i" (21 + PAGE_SHIFT), | ||
270 | "i" (8), "i" (8 + PAGE_SHIFT)); | ||
271 | |||
272 | return ret; | ||
273 | } | ||
274 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
275 | |||
276 | static inline pte_t pte_modify(pte_t pte, pgprot_t prot) | ||
277 | { | ||
278 | unsigned long mask, tmp; | ||
279 | |||
280 | /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) | ||
281 | * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) | ||
282 | * | ||
283 | * Even if we use negation tricks the result is still a 6 | ||
284 | * instruction sequence, so don't try to play fancy and just | ||
285 | * do the most straightforward implementation. | ||
286 | * | ||
287 | * Note: We encode this into 3 sun4v 2-insn patch sequences. | ||
288 | */ | ||
289 | |||
290 | __asm__ __volatile__( | ||
291 | "\n661: sethi %%uhi(%2), %1\n" | ||
292 | " sethi %%hi(%2), %0\n" | ||
293 | "\n662: or %1, %%ulo(%2), %1\n" | ||
294 | " or %0, %%lo(%2), %0\n" | ||
295 | "\n663: sllx %1, 32, %1\n" | ||
296 | " or %0, %1, %0\n" | ||
297 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
298 | " .word 661b\n" | ||
299 | " sethi %%uhi(%3), %1\n" | ||
300 | " sethi %%hi(%3), %0\n" | ||
301 | " .word 662b\n" | ||
302 | " or %1, %%ulo(%3), %1\n" | ||
303 | " or %0, %%lo(%3), %0\n" | ||
304 | " .word 663b\n" | ||
305 | " sllx %1, 32, %1\n" | ||
306 | " or %0, %1, %0\n" | ||
307 | " .previous\n" | ||
308 | : "=r" (mask), "=r" (tmp) | ||
309 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | | ||
310 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | | ||
311 | _PAGE_SZBITS_4U), | ||
312 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | | ||
313 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | | ||
314 | _PAGE_SZBITS_4V)); | ||
315 | |||
316 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); | ||
317 | } | ||
318 | |||
319 | static inline pte_t pgoff_to_pte(unsigned long off) | ||
320 | { | ||
321 | off <<= PAGE_SHIFT; | ||
322 | |||
323 | __asm__ __volatile__( | ||
324 | "\n661: or %0, %2, %0\n" | ||
325 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
326 | " .word 661b\n" | ||
327 | " or %0, %3, %0\n" | ||
328 | " .previous\n" | ||
329 | : "=r" (off) | ||
330 | : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); | ||
331 | |||
332 | return __pte(off); | ||
333 | } | ||
334 | |||
335 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | ||
336 | { | ||
337 | unsigned long val = pgprot_val(prot); | ||
338 | |||
339 | __asm__ __volatile__( | ||
340 | "\n661: andn %0, %2, %0\n" | ||
341 | " or %0, %3, %0\n" | ||
342 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
343 | " .word 661b\n" | ||
344 | " andn %0, %4, %0\n" | ||
345 | " or %0, %5, %0\n" | ||
346 | " .previous\n" | ||
347 | : "=r" (val) | ||
348 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), | ||
349 | "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); | ||
350 | |||
351 | return __pgprot(val); | ||
352 | } | ||
353 | /* Various pieces of code check for platform support by ifdef testing | ||
354 | * on "pgprot_noncached". That's broken and should be fixed, but for | ||
355 | * now... | ||
356 | */ | ||
357 | #define pgprot_noncached pgprot_noncached | ||
358 | |||
359 | #ifdef CONFIG_HUGETLB_PAGE | ||
360 | static inline pte_t pte_mkhuge(pte_t pte) | ||
361 | { | ||
362 | unsigned long mask; | ||
363 | |||
364 | __asm__ __volatile__( | ||
365 | "\n661: sethi %%uhi(%1), %0\n" | ||
366 | " sllx %0, 32, %0\n" | ||
367 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
368 | " .word 661b\n" | ||
369 | " mov %2, %0\n" | ||
370 | " nop\n" | ||
371 | " .previous\n" | ||
372 | : "=r" (mask) | ||
373 | : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); | ||
374 | |||
375 | return __pte(pte_val(pte) | mask); | ||
376 | } | ||
377 | #endif | ||
378 | |||
379 | static inline pte_t pte_mkdirty(pte_t pte) | ||
380 | { | ||
381 | unsigned long val = pte_val(pte), tmp; | ||
382 | |||
383 | __asm__ __volatile__( | ||
384 | "\n661: or %0, %3, %0\n" | ||
385 | " nop\n" | ||
386 | "\n662: nop\n" | ||
387 | " nop\n" | ||
388 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
389 | " .word 661b\n" | ||
390 | " sethi %%uhi(%4), %1\n" | ||
391 | " sllx %1, 32, %1\n" | ||
392 | " .word 662b\n" | ||
393 | " or %1, %%lo(%4), %1\n" | ||
394 | " or %0, %1, %0\n" | ||
395 | " .previous\n" | ||
396 | : "=r" (val), "=r" (tmp) | ||
397 | : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), | ||
398 | "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); | ||
399 | |||
400 | return __pte(val); | ||
401 | } | ||
402 | |||
403 | static inline pte_t pte_mkclean(pte_t pte) | ||
404 | { | ||
405 | unsigned long val = pte_val(pte), tmp; | ||
406 | |||
407 | __asm__ __volatile__( | ||
408 | "\n661: andn %0, %3, %0\n" | ||
409 | " nop\n" | ||
410 | "\n662: nop\n" | ||
411 | " nop\n" | ||
412 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
413 | " .word 661b\n" | ||
414 | " sethi %%uhi(%4), %1\n" | ||
415 | " sllx %1, 32, %1\n" | ||
416 | " .word 662b\n" | ||
417 | " or %1, %%lo(%4), %1\n" | ||
418 | " andn %0, %1, %0\n" | ||
419 | " .previous\n" | ||
420 | : "=r" (val), "=r" (tmp) | ||
421 | : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), | ||
422 | "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); | ||
423 | |||
424 | return __pte(val); | ||
425 | } | ||
426 | |||
427 | static inline pte_t pte_mkwrite(pte_t pte) | ||
428 | { | ||
429 | unsigned long val = pte_val(pte), mask; | ||
430 | |||
431 | __asm__ __volatile__( | ||
432 | "\n661: mov %1, %0\n" | ||
433 | " nop\n" | ||
434 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
435 | " .word 661b\n" | ||
436 | " sethi %%uhi(%2), %0\n" | ||
437 | " sllx %0, 32, %0\n" | ||
438 | " .previous\n" | ||
439 | : "=r" (mask) | ||
440 | : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); | ||
441 | |||
442 | return __pte(val | mask); | ||
443 | } | ||
444 | |||
445 | static inline pte_t pte_wrprotect(pte_t pte) | ||
446 | { | ||
447 | unsigned long val = pte_val(pte), tmp; | ||
448 | |||
449 | __asm__ __volatile__( | ||
450 | "\n661: andn %0, %3, %0\n" | ||
451 | " nop\n" | ||
452 | "\n662: nop\n" | ||
453 | " nop\n" | ||
454 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
455 | " .word 661b\n" | ||
456 | " sethi %%uhi(%4), %1\n" | ||
457 | " sllx %1, 32, %1\n" | ||
458 | " .word 662b\n" | ||
459 | " or %1, %%lo(%4), %1\n" | ||
460 | " andn %0, %1, %0\n" | ||
461 | " .previous\n" | ||
462 | : "=r" (val), "=r" (tmp) | ||
463 | : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U), | ||
464 | "i" (_PAGE_WRITE_4V | _PAGE_W_4V)); | ||
465 | |||
466 | return __pte(val); | ||
467 | } | ||
468 | |||
469 | static inline pte_t pte_mkold(pte_t pte) | ||
470 | { | ||
471 | unsigned long mask; | ||
472 | |||
473 | __asm__ __volatile__( | ||
474 | "\n661: mov %1, %0\n" | ||
475 | " nop\n" | ||
476 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
477 | " .word 661b\n" | ||
478 | " sethi %%uhi(%2), %0\n" | ||
479 | " sllx %0, 32, %0\n" | ||
480 | " .previous\n" | ||
481 | : "=r" (mask) | ||
482 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
483 | |||
484 | mask |= _PAGE_R; | ||
485 | |||
486 | return __pte(pte_val(pte) & ~mask); | ||
487 | } | ||
488 | |||
489 | static inline pte_t pte_mkyoung(pte_t pte) | ||
490 | { | ||
491 | unsigned long mask; | ||
492 | |||
493 | __asm__ __volatile__( | ||
494 | "\n661: mov %1, %0\n" | ||
495 | " nop\n" | ||
496 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
497 | " .word 661b\n" | ||
498 | " sethi %%uhi(%2), %0\n" | ||
499 | " sllx %0, 32, %0\n" | ||
500 | " .previous\n" | ||
501 | : "=r" (mask) | ||
502 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
503 | |||
504 | mask |= _PAGE_R; | ||
505 | |||
506 | return __pte(pte_val(pte) | mask); | ||
507 | } | ||
508 | |||
509 | static inline pte_t pte_mkspecial(pte_t pte) | ||
510 | { | ||
511 | return pte; | ||
512 | } | ||
513 | |||
514 | static inline unsigned long pte_young(pte_t pte) | ||
515 | { | ||
516 | unsigned long mask; | ||
517 | |||
518 | __asm__ __volatile__( | ||
519 | "\n661: mov %1, %0\n" | ||
520 | " nop\n" | ||
521 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
522 | " .word 661b\n" | ||
523 | " sethi %%uhi(%2), %0\n" | ||
524 | " sllx %0, 32, %0\n" | ||
525 | " .previous\n" | ||
526 | : "=r" (mask) | ||
527 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
528 | |||
529 | return (pte_val(pte) & mask); | ||
530 | } | ||
531 | |||
532 | static inline unsigned long pte_dirty(pte_t pte) | ||
533 | { | ||
534 | unsigned long mask; | ||
535 | |||
536 | __asm__ __volatile__( | ||
537 | "\n661: mov %1, %0\n" | ||
538 | " nop\n" | ||
539 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
540 | " .word 661b\n" | ||
541 | " sethi %%uhi(%2), %0\n" | ||
542 | " sllx %0, 32, %0\n" | ||
543 | " .previous\n" | ||
544 | : "=r" (mask) | ||
545 | : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V)); | ||
546 | |||
547 | return (pte_val(pte) & mask); | ||
548 | } | ||
549 | |||
550 | static inline unsigned long pte_write(pte_t pte) | ||
551 | { | ||
552 | unsigned long mask; | ||
553 | |||
554 | __asm__ __volatile__( | ||
555 | "\n661: mov %1, %0\n" | ||
556 | " nop\n" | ||
557 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
558 | " .word 661b\n" | ||
559 | " sethi %%uhi(%2), %0\n" | ||
560 | " sllx %0, 32, %0\n" | ||
561 | " .previous\n" | ||
562 | : "=r" (mask) | ||
563 | : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); | ||
564 | |||
565 | return (pte_val(pte) & mask); | ||
566 | } | ||
567 | |||
568 | static inline unsigned long pte_exec(pte_t pte) | ||
569 | { | ||
570 | unsigned long mask; | ||
571 | |||
572 | __asm__ __volatile__( | ||
573 | "\n661: sethi %%hi(%1), %0\n" | ||
574 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
575 | " .word 661b\n" | ||
576 | " mov %2, %0\n" | ||
577 | " .previous\n" | ||
578 | : "=r" (mask) | ||
579 | : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V)); | ||
580 | |||
581 | return (pte_val(pte) & mask); | ||
582 | } | ||
583 | |||
584 | static inline unsigned long pte_file(pte_t pte) | ||
585 | { | ||
586 | unsigned long val = pte_val(pte); | ||
587 | |||
588 | __asm__ __volatile__( | ||
589 | "\n661: and %0, %2, %0\n" | ||
590 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
591 | " .word 661b\n" | ||
592 | " and %0, %3, %0\n" | ||
593 | " .previous\n" | ||
594 | : "=r" (val) | ||
595 | : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); | ||
596 | |||
597 | return val; | ||
598 | } | ||
599 | |||
600 | static inline unsigned long pte_present(pte_t pte) | ||
601 | { | ||
602 | unsigned long val = pte_val(pte); | ||
603 | |||
604 | __asm__ __volatile__( | ||
605 | "\n661: and %0, %2, %0\n" | ||
606 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
607 | " .word 661b\n" | ||
608 | " and %0, %3, %0\n" | ||
609 | " .previous\n" | ||
610 | : "=r" (val) | ||
611 | : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V)); | ||
612 | |||
613 | return val; | ||
614 | } | ||
615 | |||
616 | static inline int pte_special(pte_t pte) | ||
617 | { | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | #define pmd_set(pmdp, ptep) \ | ||
622 | (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) | ||
623 | #define pud_set(pudp, pmdp) \ | ||
624 | (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL)) | ||
625 | #define __pmd_page(pmd) \ | ||
626 | ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL))) | ||
627 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) | ||
628 | #define pud_page_vaddr(pud) \ | ||
629 | ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) | ||
630 | #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) | ||
631 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
632 | #define pmd_bad(pmd) (0) | ||
633 | #define pmd_present(pmd) (pmd_val(pmd) != 0U) | ||
634 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U) | ||
635 | #define pud_none(pud) (!pud_val(pud)) | ||
636 | #define pud_bad(pud) (0) | ||
637 | #define pud_present(pud) (pud_val(pud) != 0U) | ||
638 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0U) | ||
639 | |||
640 | /* Same in both SUN4V and SUN4U. */ | ||
641 | #define pte_none(pte) (!pte_val(pte)) | ||
642 | |||
643 | /* to find an entry in a page-table-directory. */ | ||
644 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | ||
645 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
646 | |||
647 | /* to find an entry in a kernel page-table-directory */ | ||
648 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
649 | |||
650 | /* Find an entry in the second-level page table.. */ | ||
651 | #define pmd_offset(pudp, address) \ | ||
652 | ((pmd_t *) pud_page_vaddr(*(pudp)) + \ | ||
653 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))) | ||
654 | |||
655 | /* Find an entry in the third-level page table.. */ | ||
656 | #define pte_index(dir, address) \ | ||
657 | ((pte_t *) __pmd_page(*(dir)) + \ | ||
658 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) | ||
659 | #define pte_offset_kernel pte_index | ||
660 | #define pte_offset_map pte_index | ||
661 | #define pte_offset_map_nested pte_index | ||
662 | #define pte_unmap(pte) do { } while (0) | ||
663 | #define pte_unmap_nested(pte) do { } while (0) | ||
664 | |||
665 | /* Actual page table PTE updates. */ | ||
666 | extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig); | ||
667 | |||
668 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | ||
669 | { | ||
670 | pte_t orig = *ptep; | ||
671 | |||
672 | *ptep = pte; | ||
673 | |||
674 | /* It is more efficient to let flush_tlb_kernel_range() | ||
675 | * handle init_mm tlb flushes. | ||
676 | * | ||
677 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | ||
678 | * and SUN4V pte layout, so this inline test is fine. | ||
679 | */ | ||
680 | if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) | ||
681 | tlb_batch_add(mm, addr, ptep, orig); | ||
682 | } | ||
683 | |||
684 | #define pte_clear(mm,addr,ptep) \ | ||
685 | set_pte_at((mm), (addr), (ptep), __pte(0UL)) | ||
686 | |||
687 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
688 | #define __HAVE_ARCH_MOVE_PTE | ||
689 | #define move_pte(pte, prot, old_addr, new_addr) \ | ||
690 | ({ \ | ||
691 | pte_t newpte = (pte); \ | ||
692 | if (tlb_type != hypervisor && pte_present(pte)) { \ | ||
693 | unsigned long this_pfn = pte_pfn(pte); \ | ||
694 | \ | ||
695 | if (pfn_valid(this_pfn) && \ | ||
696 | (((old_addr) ^ (new_addr)) & (1 << 13))) \ | ||
697 | flush_dcache_page_all(current->mm, \ | ||
698 | pfn_to_page(this_pfn)); \ | ||
699 | } \ | ||
700 | newpte; \ | ||
701 | }) | ||
702 | #endif | ||
703 | |||
704 | extern pgd_t swapper_pg_dir[2048]; | ||
705 | extern pmd_t swapper_low_pmd_dir[2048]; | ||
706 | |||
707 | extern void paging_init(void); | ||
708 | extern unsigned long find_ecache_flush_span(unsigned long size); | ||
709 | |||
710 | /* These do nothing with the way I have things setup. */ | ||
711 | #define mmu_lockarea(vaddr, len) (vaddr) | ||
712 | #define mmu_unlockarea(vaddr, len) do { } while(0) | ||
713 | |||
714 | struct vm_area_struct; | ||
715 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
716 | |||
717 | /* Encode and de-code a swap entry */ | ||
718 | #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) | ||
719 | #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL)) | ||
720 | #define __swp_entry(type, offset) \ | ||
721 | ( (swp_entry_t) \ | ||
722 | { \ | ||
723 | (((long)(type) << PAGE_SHIFT) | \ | ||
724 | ((long)(offset) << (PAGE_SHIFT + 8UL))) \ | ||
725 | } ) | ||
726 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
727 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
728 | |||
729 | /* File offset in PTE support. */ | ||
730 | extern unsigned long pte_file(pte_t); | ||
731 | #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
732 | extern pte_t pgoff_to_pte(unsigned long); | ||
733 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) | ||
734 | |||
735 | extern unsigned long *sparc64_valid_addr_bitmap; | ||
736 | |||
737 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | ||
738 | #define kern_addr_valid(addr) \ | ||
739 | (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap)) | ||
740 | |||
741 | extern int page_in_phys_avail(unsigned long paddr); | ||
742 | |||
743 | extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | ||
744 | unsigned long pfn, | ||
745 | unsigned long size, pgprot_t prot); | ||
746 | |||
747 | /* | ||
748 | * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in | ||
749 | * its high 4 bits. These macros/functions put it there or get it from there. | ||
750 | */ | ||
751 | #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4))) | ||
752 | #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) | ||
753 | #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) | ||
754 | |||
755 | #include <asm-generic/pgtable.h> | ||
756 | |||
757 | /* We provide our own get_unmapped_area to cope with VA holes and | ||
758 | * SHM area cache aliasing for userland. | ||
759 | */ | ||
760 | #define HAVE_ARCH_UNMAPPED_AREA | ||
761 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | ||
762 | |||
763 | /* We provide a special get_unmapped_area for framebuffer mmaps to try and use | ||
764 | * the largest alignment possible such that larget PTEs can be used. | ||
765 | */ | ||
766 | extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, | ||
767 | unsigned long, unsigned long, | ||
768 | unsigned long); | ||
769 | #define HAVE_ARCH_FB_UNMAPPED_AREA | ||
770 | |||
771 | extern void pgtable_cache_init(void); | ||
772 | extern void sun4v_register_fault_status(void); | ||
773 | extern void sun4v_ktsb_register(void); | ||
774 | extern void __init cheetah_ecache_flush_init(void); | ||
775 | extern void sun4v_patch_tlb_handlers(void); | ||
776 | |||
777 | extern unsigned long cmdline_memory_size; | ||
778 | |||
779 | #endif /* !(__ASSEMBLY__) */ | ||
780 | |||
781 | #endif /* !(_SPARC64_PGTABLE_H) */ | ||