aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-11-19 04:17:32 -0500
committerPaul Mackerras <paulus@samba.org>2005-11-19 04:17:32 -0500
commit047ea7846565917c4a666635fa1fa4b5c587cd55 (patch)
tree409c8f6ddd1f145fb364a8d6f813febd0c94d06b /include/asm-ppc64
parent800fc3eeb0eed3bf98d621c0da24d68cabcf6526 (diff)
powerpc: Trivially merge several headers from asm-ppc64 to asm-powerpc
For these, I have just done the lame-o merge where the file ends up looking like: #ifndef CONFIG_PPC64 #include <asm-ppc/foo.h> #else ... contents from asm-ppc64/foo.h #endif so nothing has changed, really, except that we reduce include/asm-ppc64 a bit more. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-ppc64')
-rw-r--r--include/asm-ppc64/io.h458
-rw-r--r--include/asm-ppc64/mmu.h395
-rw-r--r--include/asm-ppc64/mmu_context.h85
-rw-r--r--include/asm-ppc64/mmzone.h50
-rw-r--r--include/asm-ppc64/pci-bridge.h151
-rw-r--r--include/asm-ppc64/pgalloc.h151
-rw-r--r--include/asm-ppc64/pgtable-4k.h91
-rw-r--r--include/asm-ppc64/pgtable-64k.h90
-rw-r--r--include/asm-ppc64/pgtable.h519
9 files changed, 0 insertions, 1990 deletions
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
deleted file mode 100644
index 77fc07c3c6bd..000000000000
--- a/include/asm-ppc64/io.h
+++ /dev/null
@@ -1,458 +0,0 @@
1#ifndef _PPC64_IO_H
2#define _PPC64_IO_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/config.h>
12#include <linux/compiler.h>
13#include <asm/page.h>
14#include <asm/byteorder.h>
15#ifdef CONFIG_PPC_ISERIES
16#include <asm/iseries/iseries_io.h>
17#endif
18#include <asm/synch.h>
19#include <asm/delay.h>
20
21#include <asm-generic/iomap.h>
22
23#define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c))
24#define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c))
25#define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 __iomem *)(p), (a), (c))
26#define __ide_mm_outsl(p, a, c) _outsl_ns((volatile u32 __iomem *)(p), (a), (c))
27
28
29#define SIO_CONFIG_RA 0x398
30#define SIO_CONFIG_RD 0x399
31
32#define SLOW_DOWN_IO
33
34extern unsigned long isa_io_base;
35extern unsigned long pci_io_base;
36extern unsigned long io_page_mask;
37
38#define MAX_ISA_PORT 0x10000
39
40#define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) \
41 & io_page_mask)
42
43#ifdef CONFIG_PPC_ISERIES
44/* __raw_* accessors aren't supported on iSeries */
45#define __raw_readb(addr) { BUG(); 0; }
46#define __raw_readw(addr) { BUG(); 0; }
47#define __raw_readl(addr) { BUG(); 0; }
48#define __raw_readq(addr) { BUG(); 0; }
49#define __raw_writeb(v, addr) { BUG(); 0; }
50#define __raw_writew(v, addr) { BUG(); 0; }
51#define __raw_writel(v, addr) { BUG(); 0; }
52#define __raw_writeq(v, addr) { BUG(); 0; }
53#define readb(addr) iSeries_Read_Byte(addr)
54#define readw(addr) iSeries_Read_Word(addr)
55#define readl(addr) iSeries_Read_Long(addr)
56#define writeb(data, addr) iSeries_Write_Byte((data),(addr))
57#define writew(data, addr) iSeries_Write_Word((data),(addr))
58#define writel(data, addr) iSeries_Write_Long((data),(addr))
59#define memset_io(a,b,c) iSeries_memset_io((a),(b),(c))
60#define memcpy_fromio(a,b,c) iSeries_memcpy_fromio((a), (b), (c))
61#define memcpy_toio(a,b,c) iSeries_memcpy_toio((a), (b), (c))
62
63#define inb(addr) readb(((void __iomem *)(long)(addr)))
64#define inw(addr) readw(((void __iomem *)(long)(addr)))
65#define inl(addr) readl(((void __iomem *)(long)(addr)))
66#define outb(data,addr) writeb(data,((void __iomem *)(long)(addr)))
67#define outw(data,addr) writew(data,((void __iomem *)(long)(addr)))
68#define outl(data,addr) writel(data,((void __iomem *)(long)(addr)))
69/*
70 * The *_ns versions below don't do byte-swapping.
71 * Neither do the standard versions now, these are just here
72 * for older code.
73 */
74#define insw_ns(port, buf, ns) _insw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
75#define insl_ns(port, buf, nl) _insl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
76#else
77
78static inline unsigned char __raw_readb(const volatile void __iomem *addr)
79{
80 return *(volatile unsigned char __force *)addr;
81}
82static inline unsigned short __raw_readw(const volatile void __iomem *addr)
83{
84 return *(volatile unsigned short __force *)addr;
85}
86static inline unsigned int __raw_readl(const volatile void __iomem *addr)
87{
88 return *(volatile unsigned int __force *)addr;
89}
90static inline unsigned long __raw_readq(const volatile void __iomem *addr)
91{
92 return *(volatile unsigned long __force *)addr;
93}
94static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
95{
96 *(volatile unsigned char __force *)addr = v;
97}
98static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
99{
100 *(volatile unsigned short __force *)addr = v;
101}
102static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
103{
104 *(volatile unsigned int __force *)addr = v;
105}
106static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
107{
108 *(volatile unsigned long __force *)addr = v;
109}
110#define readb(addr) eeh_readb(addr)
111#define readw(addr) eeh_readw(addr)
112#define readl(addr) eeh_readl(addr)
113#define readq(addr) eeh_readq(addr)
114#define writeb(data, addr) eeh_writeb((data), (addr))
115#define writew(data, addr) eeh_writew((data), (addr))
116#define writel(data, addr) eeh_writel((data), (addr))
117#define writeq(data, addr) eeh_writeq((data), (addr))
118#define memset_io(a,b,c) eeh_memset_io((a),(b),(c))
119#define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(b),(c))
120#define memcpy_toio(a,b,c) eeh_memcpy_toio((a),(b),(c))
121#define inb(port) eeh_inb((unsigned long)port)
122#define outb(val, port) eeh_outb(val, (unsigned long)port)
123#define inw(port) eeh_inw((unsigned long)port)
124#define outw(val, port) eeh_outw(val, (unsigned long)port)
125#define inl(port) eeh_inl((unsigned long)port)
126#define outl(val, port) eeh_outl(val, (unsigned long)port)
127
128/*
129 * The insw/outsw/insl/outsl macros don't do byte-swapping.
130 * They are only used in practice for transferring buffers which
131 * are arrays of bytes, and byte-swapping is not appropriate in
132 * that case. - paulus */
133#define insb(port, buf, ns) eeh_insb((port), (buf), (ns))
134#define insw(port, buf, ns) eeh_insw_ns((port), (buf), (ns))
135#define insl(port, buf, nl) eeh_insl_ns((port), (buf), (nl))
136#define insw_ns(port, buf, ns) eeh_insw_ns((port), (buf), (ns))
137#define insl_ns(port, buf, nl) eeh_insl_ns((port), (buf), (nl))
138
139#define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns))
140#define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
141#define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
142
143#endif
144
145#define readb_relaxed(addr) readb(addr)
146#define readw_relaxed(addr) readw(addr)
147#define readl_relaxed(addr) readl(addr)
148#define readq_relaxed(addr) readq(addr)
149
150extern void _insb(volatile u8 __iomem *port, void *buf, int ns);
151extern void _outsb(volatile u8 __iomem *port, const void *buf, int ns);
152extern void _insw(volatile u16 __iomem *port, void *buf, int ns);
153extern void _outsw(volatile u16 __iomem *port, const void *buf, int ns);
154extern void _insl(volatile u32 __iomem *port, void *buf, int nl);
155extern void _outsl(volatile u32 __iomem *port, const void *buf, int nl);
156extern void _insw_ns(volatile u16 __iomem *port, void *buf, int ns);
157extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns);
158extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl);
159extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl);
160
161#define mmiowb()
162
163/*
164 * output pause versions need a delay at least for the
165 * w83c105 ide controller in a p610.
166 */
167#define inb_p(port) inb(port)
168#define outb_p(val, port) (udelay(1), outb((val), (port)))
169#define inw_p(port) inw(port)
170#define outw_p(val, port) (udelay(1), outw((val), (port)))
171#define inl_p(port) inl(port)
172#define outl_p(val, port) (udelay(1), outl((val), (port)))
173
174/*
175 * The *_ns versions below don't do byte-swapping.
176 * Neither do the standard versions now, these are just here
177 * for older code.
178 */
179#define outsw_ns(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
180#define outsl_ns(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
181
182
183#define IO_SPACE_LIMIT ~(0UL)
184
185
186#ifdef __KERNEL__
187extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr,
188 unsigned long size, unsigned long flags);
189extern void __iomem *__ioremap(unsigned long address, unsigned long size,
190 unsigned long flags);
191
192/**
193 * ioremap - map bus memory into CPU space
194 * @address: bus address of the memory
195 * @size: size of the resource to map
196 *
197 * ioremap performs a platform specific sequence of operations to
198 * make bus memory CPU accessible via the readb/readw/readl/writeb/
199 * writew/writel functions and the other mmio helpers. The returned
200 * address is not guaranteed to be usable directly as a virtual
201 * address.
202 */
203extern void __iomem *ioremap(unsigned long address, unsigned long size);
204
205#define ioremap_nocache(addr, size) ioremap((addr), (size))
206extern int iounmap_explicit(volatile void __iomem *addr, unsigned long size);
207extern void iounmap(volatile void __iomem *addr);
208extern void __iomem * reserve_phb_iospace(unsigned long size);
209
210/**
211 * virt_to_phys - map virtual addresses to physical
212 * @address: address to remap
213 *
214 * The returned physical address is the physical (CPU) mapping for
215 * the memory address given. It is only valid to use this function on
216 * addresses directly mapped or allocated via kmalloc.
217 *
218 * This function does not give bus mappings for DMA transfers. In
219 * almost all conceivable cases a device driver should not be using
220 * this function
221 */
222static inline unsigned long virt_to_phys(volatile void * address)
223{
224 return __pa((unsigned long)address);
225}
226
227/**
228 * phys_to_virt - map physical address to virtual
229 * @address: address to remap
230 *
231 * The returned virtual address is a current CPU mapping for
232 * the memory address given. It is only valid to use this function on
233 * addresses that have a kernel mapping
234 *
235 * This function does not handle bus mappings for DMA transfers. In
236 * almost all conceivable cases a device driver should not be using
237 * this function
238 */
239static inline void * phys_to_virt(unsigned long address)
240{
241 return (void *)__va(address);
242}
243
244/*
245 * Change "struct page" to physical address.
246 */
247#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
248
249/* We do NOT want virtual merging, it would put too much pressure on
250 * our iommu allocator. Instead, we want drivers to be smart enough
251 * to coalesce sglists that happen to have been mapped in a contiguous
252 * way by the iommu
253 */
254#define BIO_VMERGE_BOUNDARY 0
255
256#endif /* __KERNEL__ */
257
258static inline void iosync(void)
259{
260 __asm__ __volatile__ ("sync" : : : "memory");
261}
262
263/* Enforce in-order execution of data I/O.
264 * No distinction between read/write on PPC; use eieio for all three.
265 */
266#define iobarrier_rw() eieio()
267#define iobarrier_r() eieio()
268#define iobarrier_w() eieio()
269
270/*
271 * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
272 * These routines do not perform EEH-related I/O address translation,
273 * and should not be used directly by device drivers. Use inb/readb
274 * instead.
275 */
276static inline int in_8(const volatile unsigned char __iomem *addr)
277{
278 int ret;
279
280 __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync"
281 : "=r" (ret) : "m" (*addr));
282 return ret;
283}
284
285static inline void out_8(volatile unsigned char __iomem *addr, int val)
286{
287 __asm__ __volatile__("stb%U0%X0 %1,%0; sync"
288 : "=m" (*addr) : "r" (val));
289}
290
291static inline int in_le16(const volatile unsigned short __iomem *addr)
292{
293 int ret;
294
295 __asm__ __volatile__("lhbrx %0,0,%1; twi 0,%0,0; isync"
296 : "=r" (ret) : "r" (addr), "m" (*addr));
297 return ret;
298}
299
300static inline int in_be16(const volatile unsigned short __iomem *addr)
301{
302 int ret;
303
304 __asm__ __volatile__("lhz%U1%X1 %0,%1; twi 0,%0,0; isync"
305 : "=r" (ret) : "m" (*addr));
306 return ret;
307}
308
309static inline void out_le16(volatile unsigned short __iomem *addr, int val)
310{
311 __asm__ __volatile__("sthbrx %1,0,%2; sync"
312 : "=m" (*addr) : "r" (val), "r" (addr));
313}
314
315static inline void out_be16(volatile unsigned short __iomem *addr, int val)
316{
317 __asm__ __volatile__("sth%U0%X0 %1,%0; sync"
318 : "=m" (*addr) : "r" (val));
319}
320
321static inline unsigned in_le32(const volatile unsigned __iomem *addr)
322{
323 unsigned ret;
324
325 __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync"
326 : "=r" (ret) : "r" (addr), "m" (*addr));
327 return ret;
328}
329
330static inline unsigned in_be32(const volatile unsigned __iomem *addr)
331{
332 unsigned ret;
333
334 __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync"
335 : "=r" (ret) : "m" (*addr));
336 return ret;
337}
338
339static inline void out_le32(volatile unsigned __iomem *addr, int val)
340{
341 __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr)
342 : "r" (val), "r" (addr));
343}
344
345static inline void out_be32(volatile unsigned __iomem *addr, int val)
346{
347 __asm__ __volatile__("stw%U0%X0 %1,%0; sync"
348 : "=m" (*addr) : "r" (val));
349}
350
351static inline unsigned long in_le64(const volatile unsigned long __iomem *addr)
352{
353 unsigned long tmp, ret;
354
355 __asm__ __volatile__(
356 "ld %1,0(%2)\n"
357 "twi 0,%1,0\n"
358 "isync\n"
359 "rldimi %0,%1,5*8,1*8\n"
360 "rldimi %0,%1,3*8,2*8\n"
361 "rldimi %0,%1,1*8,3*8\n"
362 "rldimi %0,%1,7*8,4*8\n"
363 "rldicl %1,%1,32,0\n"
364 "rlwimi %0,%1,8,8,31\n"
365 "rlwimi %0,%1,24,16,23\n"
366 : "=r" (ret) , "=r" (tmp) : "b" (addr) , "m" (*addr));
367 return ret;
368}
369
370static inline unsigned long in_be64(const volatile unsigned long __iomem *addr)
371{
372 unsigned long ret;
373
374 __asm__ __volatile__("ld%U1%X1 %0,%1; twi 0,%0,0; isync"
375 : "=r" (ret) : "m" (*addr));
376 return ret;
377}
378
379static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long val)
380{
381 unsigned long tmp;
382
383 __asm__ __volatile__(
384 "rldimi %0,%1,5*8,1*8\n"
385 "rldimi %0,%1,3*8,2*8\n"
386 "rldimi %0,%1,1*8,3*8\n"
387 "rldimi %0,%1,7*8,4*8\n"
388 "rldicl %1,%1,32,0\n"
389 "rlwimi %0,%1,8,8,31\n"
390 "rlwimi %0,%1,24,16,23\n"
391 "std %0,0(%3)\n"
392 "sync"
393 : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr));
394}
395
396static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val)
397{
398 __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val));
399}
400
401#ifndef CONFIG_PPC_ISERIES
402#include <asm/eeh.h>
403#endif
404
405#ifdef __KERNEL__
406
407/**
408 * check_signature - find BIOS signatures
409 * @io_addr: mmio address to check
410 * @signature: signature block
411 * @length: length of signature
412 *
413 * Perform a signature comparison with the mmio address io_addr. This
414 * address should have been obtained by ioremap.
415 * Returns 1 on a match.
416 */
417static inline int check_signature(const volatile void __iomem * io_addr,
418 const unsigned char *signature, int length)
419{
420 int retval = 0;
421#ifndef CONFIG_PPC_ISERIES
422 do {
423 if (readb(io_addr) != *signature)
424 goto out;
425 io_addr++;
426 signature++;
427 length--;
428 } while (length);
429 retval = 1;
430out:
431#endif
432 return retval;
433}
434
435/* Nothing to do */
436
437#define dma_cache_inv(_start,_size) do { } while (0)
438#define dma_cache_wback(_start,_size) do { } while (0)
439#define dma_cache_wback_inv(_start,_size) do { } while (0)
440
441/* Check of existence of legacy devices */
442extern int check_legacy_ioport(unsigned long base_port);
443
444
445/*
446 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
447 * access
448 */
449#define xlate_dev_mem_ptr(p) __va(p)
450
451/*
452 * Convert a virtual cached pointer to an uncached pointer
453 */
454#define xlate_dev_kmem_ptr(p) p
455
456#endif /* __KERNEL__ */
457
458#endif /* _PPC64_IO_H */
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
deleted file mode 100644
index 1a7e0afa2dc6..000000000000
--- a/include/asm-ppc64/mmu.h
+++ /dev/null
@@ -1,395 +0,0 @@
1/*
2 * PowerPC memory management structures
3 *
4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
5 * PPC64 rework.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _PPC64_MMU_H_
14#define _PPC64_MMU_H_
15
16#include <linux/config.h>
17#include <asm/asm-compat.h>
18#include <asm/page.h>
19
20/*
21 * Segment table
22 */
23
24#define STE_ESID_V 0x80
25#define STE_ESID_KS 0x20
26#define STE_ESID_KP 0x10
27#define STE_ESID_N 0x08
28
29#define STE_VSID_SHIFT 12
30
31/* Location of cpu0's segment table */
32#define STAB0_PAGE 0x6
33#define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
34
35#ifndef __ASSEMBLY__
36extern char initial_stab[];
37#endif /* ! __ASSEMBLY */
38
39/*
40 * SLB
41 */
42
43#define SLB_NUM_BOLTED 3
44#define SLB_CACHE_ENTRIES 8
45
46/* Bits in the SLB ESID word */
47#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
48
49/* Bits in the SLB VSID word */
50#define SLB_VSID_SHIFT 12
51#define SLB_VSID_B ASM_CONST(0xc000000000000000)
52#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
53#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
54#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
55#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
56#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
57#define SLB_VSID_L ASM_CONST(0x0000000000000100)
58#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
59#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
60#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
61#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
62#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
63#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
64#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
65
66#define SLB_VSID_KERNEL (SLB_VSID_KP)
67#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
68
69#define SLBIE_C (0x08000000)
70
71/*
72 * Hash table
73 */
74
75#define HPTES_PER_GROUP 8
76
77#define HPTE_V_AVPN_SHIFT 7
78#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
79#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
80#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN))
81#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
82#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
83#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
84#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
85#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
86
87#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
88#define HPTE_R_TS ASM_CONST(0x4000000000000000)
89#define HPTE_R_RPN_SHIFT 12
90#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
91#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
92#define HPTE_R_PP ASM_CONST(0x0000000000000003)
93#define HPTE_R_N ASM_CONST(0x0000000000000004)
94
95/* Values for PP (assumes Ks=0, Kp=1) */
96/* pp0 will always be 0 for linux */
97#define PP_RWXX 0 /* Supervisor read/write, User none */
98#define PP_RWRX 1 /* Supervisor read/write, User read */
99#define PP_RWRW 2 /* Supervisor read/write, User read/write */
100#define PP_RXRX 3 /* Supervisor read, User read */
101
102#ifndef __ASSEMBLY__
103
104typedef struct {
105 unsigned long v;
106 unsigned long r;
107} hpte_t;
108
109extern hpte_t *htab_address;
110extern unsigned long htab_hash_mask;
111
112/*
113 * Page size definition
114 *
115 * shift : is the "PAGE_SHIFT" value for that page size
116 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
117 * directly to a slbmte "vsid" value
118 * penc : is the HPTE encoding mask for the "LP" field:
119 *
120 */
121struct mmu_psize_def
122{
123 unsigned int shift; /* number of bits */
124 unsigned int penc; /* HPTE encoding */
125 unsigned int tlbiel; /* tlbiel supported for that page size */
126 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
127 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
128};
129
130#endif /* __ASSEMBLY__ */
131
132/*
133 * The kernel use the constants below to index in the page sizes array.
134 * The use of fixed constants for this purpose is better for performances
135 * of the low level hash refill handlers.
136 *
137 * A non supported page size has a "shift" field set to 0
138 *
139 * Any new page size being implemented can get a new entry in here. Whether
140 * the kernel will use it or not is a different matter though. The actual page
141 * size used by hugetlbfs is not defined here and may be made variable
142 */
143
144#define MMU_PAGE_4K 0 /* 4K */
145#define MMU_PAGE_64K 1 /* 64K */
146#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
147#define MMU_PAGE_1M 3 /* 1M */
148#define MMU_PAGE_16M 4 /* 16M */
149#define MMU_PAGE_16G 5 /* 16G */
150#define MMU_PAGE_COUNT 6
151
152#ifndef __ASSEMBLY__
153
154/*
155 * The current system page sizes
156 */
157extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
158extern int mmu_linear_psize;
159extern int mmu_virtual_psize;
160
161#ifdef CONFIG_HUGETLB_PAGE
162/*
163 * The page size index of the huge pages for use by hugetlbfs
164 */
165extern int mmu_huge_psize;
166
167#endif /* CONFIG_HUGETLB_PAGE */
168
169/*
170 * This function sets the AVPN and L fields of the HPTE appropriately
171 * for the page size
172 */
173static inline unsigned long hpte_encode_v(unsigned long va, int psize)
174{
175 unsigned long v =
176 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
177 v <<= HPTE_V_AVPN_SHIFT;
178 if (psize != MMU_PAGE_4K)
179 v |= HPTE_V_LARGE;
180 return v;
181}
182
183/*
184 * This function sets the ARPN, and LP fields of the HPTE appropriately
185 * for the page size. We assume the pa is already "clean" that is properly
186 * aligned for the requested page size
187 */
188static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
189{
190 unsigned long r;
191
192 /* A 4K page needs no special encoding */
193 if (psize == MMU_PAGE_4K)
194 return pa & HPTE_R_RPN;
195 else {
196 unsigned int penc = mmu_psize_defs[psize].penc;
197 unsigned int shift = mmu_psize_defs[psize].shift;
198 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
199 }
200 return r;
201}
202
203/*
204 * This hashes a virtual address for a 256Mb segment only for now
205 */
206
207static inline unsigned long hpt_hash(unsigned long va, unsigned int shift)
208{
209 return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift);
210}
211
212extern int __hash_page_4K(unsigned long ea, unsigned long access,
213 unsigned long vsid, pte_t *ptep, unsigned long trap,
214 unsigned int local);
215extern int __hash_page_64K(unsigned long ea, unsigned long access,
216 unsigned long vsid, pte_t *ptep, unsigned long trap,
217 unsigned int local);
218struct mm_struct;
219extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
220 unsigned long ea, unsigned long vsid, int local);
221
222extern void htab_finish_init(void);
223extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
224 unsigned long pstart, unsigned long mode,
225 int psize);
226
227extern void htab_initialize(void);
228extern void htab_initialize_secondary(void);
229extern void hpte_init_native(void);
230extern void hpte_init_lpar(void);
231extern void hpte_init_iSeries(void);
232extern void mm_init_ppc64(void);
233
234extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
235 unsigned long va, unsigned long prpn,
236 unsigned long rflags,
237 unsigned long vflags, int psize);
238
239extern long native_hpte_insert(unsigned long hpte_group,
240 unsigned long va, unsigned long prpn,
241 unsigned long rflags,
242 unsigned long vflags, int psize);
243
244extern long iSeries_hpte_insert(unsigned long hpte_group,
245 unsigned long va, unsigned long prpn,
246 unsigned long rflags,
247 unsigned long vflags, int psize);
248
249extern void stabs_alloc(void);
250extern void slb_initialize(void);
251extern void stab_initialize(unsigned long stab);
252
253#endif /* __ASSEMBLY__ */
254
255/*
256 * VSID allocation
257 *
258 * We first generate a 36-bit "proto-VSID". For kernel addresses this
259 * is equal to the ESID, for user addresses it is:
260 * (context << 15) | (esid & 0x7fff)
261 *
262 * The two forms are distinguishable because the top bit is 0 for user
263 * addresses, whereas the top two bits are 1 for kernel addresses.
264 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
265 * now.
266 *
267 * The proto-VSIDs are then scrambled into real VSIDs with the
268 * multiplicative hash:
269 *
270 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
271 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
272 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
273 *
274 * This scramble is only well defined for proto-VSIDs below
275 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
276 * reserved. VSID_MULTIPLIER is prime, so in particular it is
277 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
278 * Because the modulus is 2^n-1 we can compute it efficiently without
279 * a divide or extra multiply (see below).
280 *
281 * This scheme has several advantages over older methods:
282 *
283 * - We have VSIDs allocated for every kernel address
284 * (i.e. everything above 0xC000000000000000), except the very top
285 * segment, which simplifies several things.
286 *
287 * - We allow for 15 significant bits of ESID and 20 bits of
288 * context for user addresses. i.e. 8T (43 bits) of address space for
289 * up to 1M contexts (although the page table structure and context
290 * allocation will need changes to take advantage of this).
291 *
292 * - The scramble function gives robust scattering in the hash
293 * table (at least based on some initial results). The previous
294 * method was more susceptible to pathological cases giving excessive
295 * hash collisions.
296 */
297/*
298 * WARNING - If you change these you must make sure the asm
299 * implementations in slb_allocate (slb_low.S), do_stab_bolted
300 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
301 *
302 * You'll also need to change the precomputed VSID values in head.S
303 * which are used by the iSeries firmware.
304 */
305
306#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
307#define VSID_BITS 36
308#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
309
310#define CONTEXT_BITS 19
311#define USER_ESID_BITS 16
312
313#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
314
315/*
316 * This macro generates asm code to compute the VSID scramble
317 * function. Used in slb_allocate() and do_stab_bolted. The function
318 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
319 *
320 * rt = register continaing the proto-VSID and into which the
321 * VSID will be stored
322 * rx = scratch register (clobbered)
323 *
324 * - rt and rx must be different registers
325 * - The answer will end up in the low 36 bits of rt. The higher
326 * bits may contain other garbage, so you may need to mask the
327 * result.
328 */
329#define ASM_VSID_SCRAMBLE(rt, rx) \
330 lis rx,VSID_MULTIPLIER@h; \
331 ori rx,rx,VSID_MULTIPLIER@l; \
332 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
333 \
334 srdi rx,rt,VSID_BITS; \
335 clrldi rt,rt,(64-VSID_BITS); \
336 add rt,rt,rx; /* add high and low bits */ \
337 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
338 * 2^36-1+2^28-1. That in particular means that if r3 >= \
339 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
340 * the bit clear, r3 already has the answer we want, if it \
341 * doesn't, the answer is the low 36 bits of r3+1. So in all \
342 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
343 addi rx,rt,1; \
344 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
345 add rt,rt,rx
346
347
348#ifndef __ASSEMBLY__
349
350typedef unsigned long mm_context_id_t;
351
352typedef struct {
353 mm_context_id_t id;
354#ifdef CONFIG_HUGETLB_PAGE
355 u16 low_htlb_areas, high_htlb_areas;
356#endif
357} mm_context_t;
358
359
360static inline unsigned long vsid_scramble(unsigned long protovsid)
361{
362#if 0
363 /* The code below is equivalent to this function for arguments
364 * < 2^VSID_BITS, which is all this should ever be called
365 * with. However gcc is not clever enough to compute the
366 * modulus (2^n-1) without a second multiply. */
367 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
368#else /* 1 */
369 unsigned long x;
370
371 x = protovsid * VSID_MULTIPLIER;
372 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
373 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
374#endif /* 1 */
375}
376
377/* This is only valid for addresses >= KERNELBASE */
378static inline unsigned long get_kernel_vsid(unsigned long ea)
379{
380 return vsid_scramble(ea >> SID_SHIFT);
381}
382
383/* This is only valid for user addresses (which are below 2^41) */
384static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
385{
386 return vsid_scramble((context << USER_ESID_BITS)
387 | (ea >> SID_SHIFT));
388}
389
390#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
391#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
392
393#endif /* __ASSEMBLY */
394
395#endif /* _PPC64_MMU_H_ */
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
deleted file mode 100644
index 4f512e9fa6b8..000000000000
--- a/include/asm-ppc64/mmu_context.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef __PPC64_MMU_CONTEXT_H
2#define __PPC64_MMU_CONTEXT_H
3
4#include <linux/config.h>
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <asm/mmu.h>
8#include <asm/cputable.h>
9
10/*
11 * Copyright (C) 2001 PPC 64 Team, IBM Corp
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19/*
20 * Getting into a kernel thread, there is no valid user segment, mark
21 * paca->pgdir NULL so that SLB miss on user addresses will fault
22 */
23static inline void enter_lazy_tlb(struct mm_struct *mm,
24 struct task_struct *tsk)
25{
26#ifdef CONFIG_PPC_64K_PAGES
27 get_paca()->pgdir = NULL;
28#endif /* CONFIG_PPC_64K_PAGES */
29}
30
31#define NO_CONTEXT 0
32#define MAX_CONTEXT (0x100000-1)
33
34extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
35extern void destroy_context(struct mm_struct *mm);
36
37extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
38extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
39
40/*
41 * switch_mm is the entry point called from the architecture independent
42 * code in kernel/sched.c
43 */
44static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
45 struct task_struct *tsk)
46{
47 if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
48 cpu_set(smp_processor_id(), next->cpu_vm_mask);
49
50 /* No need to flush userspace segments if the mm doesnt change */
51#ifdef CONFIG_PPC_64K_PAGES
52 if (prev == next && get_paca()->pgdir == next->pgd)
53 return;
54#else
55 if (prev == next)
56 return;
57#endif /* CONFIG_PPC_64K_PAGES */
58
59#ifdef CONFIG_ALTIVEC
60 if (cpu_has_feature(CPU_FTR_ALTIVEC))
61 asm volatile ("dssall");
62#endif /* CONFIG_ALTIVEC */
63
64 if (cpu_has_feature(CPU_FTR_SLB))
65 switch_slb(tsk, next);
66 else
67 switch_stab(tsk, next);
68}
69
70#define deactivate_mm(tsk,mm) do { } while (0)
71
72/*
73 * After we have set current->mm to a new value, this activates
74 * the context for the new mm so we see the new mappings.
75 */
76static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
77{
78 unsigned long flags;
79
80 local_irq_save(flags);
81 switch_mm(prev, next, current);
82 local_irq_restore(flags);
83}
84
85#endif /* __PPC64_MMU_CONTEXT_H */
diff --git a/include/asm-ppc64/mmzone.h b/include/asm-ppc64/mmzone.h
deleted file mode 100644
index 54958d6cae04..000000000000
--- a/include/asm-ppc64/mmzone.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 *
4 * PowerPC64 port:
5 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
6 */
7#ifndef _ASM_MMZONE_H_
8#define _ASM_MMZONE_H_
9
10#include <linux/config.h>
11
12/*
13 * generic non-linear memory support:
14 *
15 * 1) we will not split memory into more chunks than will fit into the
16 * flags field of the struct page
17 */
18
19#ifdef CONFIG_NEED_MULTIPLE_NODES
20
21extern struct pglist_data *node_data[];
22/*
23 * Return a pointer to the node data for node n.
24 */
25#define NODE_DATA(nid) (node_data[nid])
26
27/*
28 * Following are specific to this numa platform.
29 */
30
31extern int numa_cpu_lookup_table[];
32extern cpumask_t numa_cpumask_lookup_table[];
33#ifdef CONFIG_MEMORY_HOTPLUG
34extern unsigned long max_pfn;
35#endif
36
37/*
38 * Following are macros that each numa implmentation must define.
39 */
40
41#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
42#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
43
44#endif /* CONFIG_NEED_MULTIPLE_NODES */
45
46#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
47extern int __init early_pfn_to_nid(unsigned long pfn);
48#endif
49
50#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h
deleted file mode 100644
index cf04327a597a..000000000000
--- a/include/asm-ppc64/pci-bridge.h
+++ /dev/null
@@ -1,151 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _ASM_PCI_BRIDGE_H
3#define _ASM_PCI_BRIDGE_H
4
5#include <linux/config.h>
6#include <linux/pci.h>
7#include <linux/list.h>
8
9/*
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16/*
17 * Structure of a PCI controller (host bridge)
18 */
19struct pci_controller {
20 struct pci_bus *bus;
21 char is_dynamic;
22 void *arch_data;
23 struct list_head list_node;
24
25 int first_busno;
26 int last_busno;
27
28 void __iomem *io_base_virt;
29 unsigned long io_base_phys;
30
31 /* Some machines have a non 1:1 mapping of
32 * the PCI memory space in the CPU bus space
33 */
34 unsigned long pci_mem_offset;
35 unsigned long pci_io_size;
36
37 struct pci_ops *ops;
38 volatile unsigned int __iomem *cfg_addr;
39 volatile void __iomem *cfg_data;
40
41 /* Currently, we limit ourselves to 1 IO range and 3 mem
42 * ranges since the common pci_bus structure can't handle more
43 */
44 struct resource io_resource;
45 struct resource mem_resources[3];
46 int global_number;
47 int local_number;
48 unsigned long buid;
49 unsigned long dma_window_base_cur;
50 unsigned long dma_window_size;
51};
52
53/*
54 * PCI stuff, for nodes representing PCI devices, pointed to
55 * by device_node->data.
56 */
57struct pci_controller;
58struct iommu_table;
59
60struct pci_dn {
61 int busno; /* for pci devices */
62 int bussubno; /* for pci devices */
63 int devfn; /* for pci devices */
64
65#ifdef CONFIG_PPC_PSERIES
66 int eeh_mode; /* See eeh.h for possible EEH_MODEs */
67 int eeh_config_addr;
68 int eeh_check_count; /* # times driver ignored error */
69 int eeh_freeze_count; /* # times this device froze up. */
70 int eeh_is_bridge; /* device is pci-to-pci bridge */
71#endif
72 int pci_ext_config_space; /* for pci devices */
73 struct pci_controller *phb; /* for pci devices */
74 struct iommu_table *iommu_table; /* for phb's or bridges */
75 struct pci_dev *pcidev; /* back-pointer to the pci device */
76 struct device_node *node; /* back-pointer to the device_node */
77#ifdef CONFIG_PPC_ISERIES
78 struct list_head Device_List;
79 int Irq; /* Assigned IRQ */
80 int Flags; /* Possible flags(disable/bist)*/
81 u8 LogicalSlot; /* Hv Slot Index for Tces */
82#endif
83 u32 config_space[16]; /* saved PCI config space */
84};
85
86/* Get the pointer to a device_node's pci_dn */
87#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
88
89struct device_node *fetch_dev_dn(struct pci_dev *dev);
90
91/* Get a device_node from a pci_dev. This code must be fast except
92 * in the case where the sysdata is incorrect and needs to be fixed
93 * up (this will only happen once).
94 * In this case the sysdata will have been inherited from a PCI host
95 * bridge or a PCI-PCI bridge further up the tree, so it will point
96 * to a valid struct pci_dn, just not the one we want.
97 */
98static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
99{
100 struct device_node *dn = dev->sysdata;
101 struct pci_dn *pdn = dn->data;
102
103 if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number)
104 return dn; /* fast path. sysdata is good */
105 return fetch_dev_dn(dev);
106}
107
108static inline int pci_device_from_OF_node(struct device_node *np,
109 u8 *bus, u8 *devfn)
110{
111 if (!PCI_DN(np))
112 return -ENODEV;
113 *bus = PCI_DN(np)->busno;
114 *devfn = PCI_DN(np)->devfn;
115 return 0;
116}
117
118static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
119{
120 if (bus->self)
121 return pci_device_to_OF_node(bus->self);
122 else
123 return bus->sysdata; /* Must be root bus (PHB) */
124}
125
126extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
127 struct device_node *dev, int primary);
128
129extern int pcibios_remove_root_bus(struct pci_controller *phb);
130
131extern void phbs_remap_io(void);
132
133static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
134{
135 struct device_node *busdn = bus->sysdata;
136
137 BUG_ON(busdn == NULL);
138 return PCI_DN(busdn)->phb;
139}
140
141extern struct pci_controller *
142pcibios_alloc_controller(struct device_node *dev);
143extern void pcibios_free_controller(struct pci_controller *phb);
144
145/* Return values for ppc_md.pci_probe_mode function */
146#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
147#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
148#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
149
150#endif
151#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
deleted file mode 100644
index dcf3622d1946..000000000000
--- a/include/asm-ppc64/pgalloc.h
+++ /dev/null
@@ -1,151 +0,0 @@
1#ifndef _PPC64_PGALLOC_H
2#define _PPC64_PGALLOC_H
3
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/cpumask.h>
7#include <linux/percpu.h>
8
9extern kmem_cache_t *pgtable_cache[];
10
11#ifdef CONFIG_PPC_64K_PAGES
12#define PTE_CACHE_NUM 0
13#define PMD_CACHE_NUM 1
14#define PGD_CACHE_NUM 2
15#else
16#define PTE_CACHE_NUM 0
17#define PMD_CACHE_NUM 1
18#define PUD_CACHE_NUM 1
19#define PGD_CACHE_NUM 0
20#endif
21
22/*
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 */
28
29static inline pgd_t *pgd_alloc(struct mm_struct *mm)
30{
31 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
32}
33
34static inline void pgd_free(pgd_t *pgd)
35{
36 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
37}
38
39#ifndef CONFIG_PPC_64K_PAGES
40
41#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
42
43static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
44{
45 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
46 GFP_KERNEL|__GFP_REPEAT);
47}
48
49static inline void pud_free(pud_t *pud)
50{
51 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
52}
53
54static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
55{
56 pud_set(pud, (unsigned long)pmd);
57}
58
59#define pmd_populate(mm, pmd, pte_page) \
60 pmd_populate_kernel(mm, pmd, page_address(pte_page))
61#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
62
63
64#else /* CONFIG_PPC_64K_PAGES */
65
66#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
67
68static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
69 pte_t *pte)
70{
71 pmd_set(pmd, (unsigned long)pte);
72}
73
74#define pmd_populate(mm, pmd, pte_page) \
75 pmd_populate_kernel(mm, pmd, page_address(pte_page))
76
77#endif /* CONFIG_PPC_64K_PAGES */
78
79static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
80{
81 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
82 GFP_KERNEL|__GFP_REPEAT);
83}
84
85static inline void pmd_free(pmd_t *pmd)
86{
87 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
88}
89
90static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
91 unsigned long address)
92{
93 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
94 GFP_KERNEL|__GFP_REPEAT);
95}
96
97static inline struct page *pte_alloc_one(struct mm_struct *mm,
98 unsigned long address)
99{
100 return virt_to_page(pte_alloc_one_kernel(mm, address));
101}
102
103static inline void pte_free_kernel(pte_t *pte)
104{
105 kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
106}
107
108static inline void pte_free(struct page *ptepage)
109{
110 pte_free_kernel(page_address(ptepage));
111}
112
113#define PGF_CACHENUM_MASK 0xf
114
115typedef struct pgtable_free {
116 unsigned long val;
117} pgtable_free_t;
118
119static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
120 unsigned long mask)
121{
122 BUG_ON(cachenum > PGF_CACHENUM_MASK);
123
124 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
125}
126
127static inline void pgtable_free(pgtable_free_t pgf)
128{
129 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
130 int cachenum = pgf.val & PGF_CACHENUM_MASK;
131
132 kmem_cache_free(pgtable_cache[cachenum], p);
133}
134
135extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
136
137#define __pte_free_tlb(tlb, ptepage) \
138 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
139 PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
140#define __pmd_free_tlb(tlb, pmd) \
141 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
142 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
143#ifndef CONFIG_PPC_64K_PAGES
144#define __pud_free_tlb(tlb, pmd) \
145 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
146 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
147#endif /* CONFIG_PPC_64K_PAGES */
148
149#define check_pgt_cache() do { } while (0)
150
151#endif /* _PPC64_PGALLOC_H */
diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-ppc64/pgtable-4k.h
deleted file mode 100644
index e9590c06ad92..000000000000
--- a/include/asm-ppc64/pgtable-4k.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Entries per page directory level. The PTE level must use a 64b record
3 * for each page table entry. The PMD and PGD level use a 32b record for
4 * each entry by assuming that each entry is page aligned.
5 */
6#define PTE_INDEX_SIZE 9
7#define PMD_INDEX_SIZE 7
8#define PUD_INDEX_SIZE 7
9#define PGD_INDEX_SIZE 9
10
11#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
12#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
13#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
14#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
15
16#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
17#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
18#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
19#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
20
21/* PMD_SHIFT determines what a second-level page table entry can map */
22#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
23#define PMD_SIZE (1UL << PMD_SHIFT)
24#define PMD_MASK (~(PMD_SIZE-1))
25
26/* With 4k base page size, hugepage PTEs go at the PMD level */
27#define MIN_HUGEPTE_SHIFT PMD_SHIFT
28
29/* PUD_SHIFT determines what a third-level page table entry can map */
30#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
31#define PUD_SIZE (1UL << PUD_SHIFT)
32#define PUD_MASK (~(PUD_SIZE-1))
33
34/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
35#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
36#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
37#define PGDIR_MASK (~(PGDIR_SIZE-1))
38
39/* PTE bits */
40#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
41#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
42#define _PAGE_F_SECOND _PAGE_SECONDARY
43#define _PAGE_F_GIX _PAGE_GROUP_IX
44
45/* PTE flags to conserve for HPTE identification */
46#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
47 _PAGE_SECONDARY | _PAGE_GROUP_IX)
48
49/* PAGE_MASK gives the right answer below, but only by accident */
50/* It should be preserving the high 48 bits and then specifically */
51/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
52#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
53 _PAGE_HPTEFLAGS)
54
55/* Bits to mask out from a PMD to get to the PTE page */
56#define PMD_MASKED_BITS 0
57/* Bits to mask out from a PUD to get to the PMD page */
58#define PUD_MASKED_BITS 0
59/* Bits to mask out from a PGD to get to the PUD page */
60#define PGD_MASKED_BITS 0
61
62/* shift to put page number into pte */
63#define PTE_RPN_SHIFT (17)
64
65#define __real_pte(e,p) ((real_pte_t)(e))
66#define __rpte_to_pte(r) (r)
67#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12)
68
69#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
70 do { \
71 index = 0; \
72 shift = mmu_psize_defs[psize].shift; \
73
74#define pte_iterate_hashed_end() } while(0)
75
76/*
77 * 4-level page tables related bits
78 */
79
80#define pgd_none(pgd) (!pgd_val(pgd))
81#define pgd_bad(pgd) (pgd_val(pgd) == 0)
82#define pgd_present(pgd) (pgd_val(pgd) != 0)
83#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
84#define pgd_page(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
85
86#define pud_offset(pgdp, addr) \
87 (((pud_t *) pgd_page(*(pgdp))) + \
88 (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
89
90#define pud_ERROR(e) \
91 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e))
diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-ppc64/pgtable-64k.h
deleted file mode 100644
index 154f1840ece4..000000000000
--- a/include/asm-ppc64/pgtable-64k.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#include <asm-generic/pgtable-nopud.h>
2
3
4#define PTE_INDEX_SIZE 12
5#define PMD_INDEX_SIZE 12
6#define PUD_INDEX_SIZE 0
7#define PGD_INDEX_SIZE 4
8
9#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
10#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
11#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
12
13#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
14#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
15#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
16
17/* With 4k base page size, hugepage PTEs go at the PMD level */
18#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
19
20/* PMD_SHIFT determines what a second-level page table entry can map */
21#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
22#define PMD_SIZE (1UL << PMD_SHIFT)
23#define PMD_MASK (~(PMD_SIZE-1))
24
25/* PGDIR_SHIFT determines what a third-level page table entry can map */
26#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
27#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
28#define PGDIR_MASK (~(PGDIR_SIZE-1))
29
30/* Additional PTE bits (don't change without checking asm in hash_low.S) */
31#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
32#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
33#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
34#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
35#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
36
37/* PTE flags to conserve for HPTE identification */
38#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\
39 _PAGE_COMBO)
40
41/* Shift to put page number into pte.
42 *
43 * That gives us a max RPN of 32 bits, which means a max of 48 bits
44 * of addressable physical space.
45 * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but
46 * 32 makes PTEs more readable for debugging for now :)
47 */
48#define PTE_RPN_SHIFT (32)
49#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
50#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
51
52/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
53 * pgprot changes
54 */
55#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
56 _PAGE_ACCESSED)
57
58/* Bits to mask out from a PMD to get to the PTE page */
59#define PMD_MASKED_BITS 0x1ff
60/* Bits to mask out from a PGD/PUD to get to the PMD page */
61#define PUD_MASKED_BITS 0x1ff
62
63#ifndef __ASSEMBLY__
64
65/* Manipulate "rpte" values */
66#define __real_pte(e,p) ((real_pte_t) { \
67 (e), pte_val(*((p) + PTRS_PER_PTE)) })
68#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
69 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
70#define __rpte_to_pte(r) ((r).pte)
71#define __rpte_sub_valid(rpte, index) \
72 (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
73
74
75/* Trick: we set __end to va + 64k, which happens works for
76 * a 16M page as well as we want only one iteration
77 */
78#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
79 do { \
80 unsigned long __end = va + PAGE_SIZE; \
81 unsigned __split = (psize == MMU_PAGE_4K || \
82 psize == MMU_PAGE_64K_AP); \
83 shift = mmu_psize_defs[psize].shift; \
84 for (index = 0; va < __end; index++, va += (1 << shift)) { \
85 if (!__split || __rpte_sub_valid(rpte, index)) do { \
86
87#define pte_iterate_hashed_end() } while(0); } } while(0)
88
89
90#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
deleted file mode 100644
index dee36c83be19..000000000000
--- a/include/asm-ppc64/pgtable.h
+++ /dev/null
@@ -1,519 +0,0 @@
1#ifndef _PPC64_PGTABLE_H
2#define _PPC64_PGTABLE_H
3
4/*
5 * This file contains the functions and defines necessary to modify and use
6 * the ppc64 hashed page table.
7 */
8
9#ifndef __ASSEMBLY__
10#include <linux/config.h>
11#include <linux/stddef.h>
12#include <asm/processor.h> /* For TASK_SIZE */
13#include <asm/mmu.h>
14#include <asm/page.h>
15#include <asm/tlbflush.h>
16struct mm_struct;
17#endif /* __ASSEMBLY__ */
18
19#ifdef CONFIG_PPC_64K_PAGES
20#include <asm/pgtable-64k.h>
21#else
22#include <asm/pgtable-4k.h>
23#endif
24
25#define FIRST_USER_ADDRESS 0
26
27/*
28 * Size of EA range mapped by our pagetables.
29 */
30#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
31 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
32#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
33
34#if TASK_SIZE_USER64 > PGTABLE_RANGE
35#error TASK_SIZE_USER64 exceeds pagetable range
36#endif
37
38#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
39#error TASK_SIZE_USER64 exceeds user VSID range
40#endif
41
42/*
43 * Define the address range of the vmalloc VM area.
44 */
45#define VMALLOC_START (0xD000000000000000ul)
46#define VMALLOC_SIZE (0x80000000000UL)
47#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
48
49/*
50 * Define the address range of the imalloc VM area.
51 */
52#define PHBS_IO_BASE VMALLOC_END
53#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
54#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
55
56/*
57 * Common bits in a linux-style PTE. These match the bits in the
58 * (hardware-defined) PowerPC PTE as closely as possible. Additional
59 * bits may be defined in pgtable-*.h
60 */
61#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
62#define _PAGE_USER 0x0002 /* matches one of the PP bits */
63#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
64#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
65#define _PAGE_GUARDED 0x0008
66#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
67#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
68#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
69#define _PAGE_DIRTY 0x0080 /* C: page changed */
70#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
71#define _PAGE_RW 0x0200 /* software: user write access allowed */
72#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
73#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
74
75#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
76
77#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
78
79/* __pgprot defined in asm-ppc64/page.h */
80#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
81
82#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
83#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
84#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
85#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
86#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
87#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
88#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
89#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
90 _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
91#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
92
93#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
94#define HAVE_PAGE_AGP
95
96/* PTEIDX nibble */
97#define _PTEIDX_SECONDARY 0x8
98#define _PTEIDX_GROUP_IX 0x7
99
100
101/*
102 * POWER4 and newer have per page execute protection, older chips can only
103 * do this on a segment (256MB) basis.
104 *
105 * Also, write permissions imply read permissions.
106 * This is the closest we can get..
107 *
108 * Note due to the way vm flags are laid out, the bits are XWR
109 */
110#define __P000 PAGE_NONE
111#define __P001 PAGE_READONLY
112#define __P010 PAGE_COPY
113#define __P011 PAGE_COPY
114#define __P100 PAGE_READONLY_X
115#define __P101 PAGE_READONLY_X
116#define __P110 PAGE_COPY_X
117#define __P111 PAGE_COPY_X
118
119#define __S000 PAGE_NONE
120#define __S001 PAGE_READONLY
121#define __S010 PAGE_SHARED
122#define __S011 PAGE_SHARED
123#define __S100 PAGE_READONLY_X
124#define __S101 PAGE_READONLY_X
125#define __S110 PAGE_SHARED_X
126#define __S111 PAGE_SHARED_X
127
128#ifndef __ASSEMBLY__
129
130/*
131 * ZERO_PAGE is a global shared page that is always zero: used
132 * for zero-mapped memory areas etc..
133 */
134extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
135#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
136#endif /* __ASSEMBLY__ */
137
138#ifdef CONFIG_HUGETLB_PAGE
139
140#define HAVE_ARCH_UNMAPPED_AREA
141#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
142
143#endif
144
145#ifndef __ASSEMBLY__
146
147/*
148 * Conversion functions: convert a page and protection to a page entry,
149 * and a page entry and page directory to the page they refer to.
150 *
151 * mk_pte takes a (struct page *) as input
152 */
153#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
154
155static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
156{
157 pte_t pte;
158
159
160 pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
161 return pte;
162}
163
164#define pte_modify(_pte, newprot) \
165 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
166
167#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
168#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
169
170/* pte_clear moved to later in this file */
171
172#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
173#define pte_page(x) pfn_to_page(pte_pfn(x))
174
175#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
176#define pmd_none(pmd) (!pmd_val(pmd))
177#define pmd_bad(pmd) (pmd_val(pmd) == 0)
178#define pmd_present(pmd) (pmd_val(pmd) != 0)
179#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
180#define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
181#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
182
183#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
184#define pud_none(pud) (!pud_val(pud))
185#define pud_bad(pud) ((pud_val(pud)) == 0)
186#define pud_present(pud) (pud_val(pud) != 0)
187#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
188#define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
189
190#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
191
192/*
193 * Find an entry in a page-table-directory. We combine the address region
194 * (the high order N bits) and the pgd portion of the address.
195 */
196/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
197#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
198
199#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
200
201#define pmd_offset(pudp,addr) \
202 (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
203
204#define pte_offset_kernel(dir,addr) \
205 (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
206
207#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
208#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
209#define pte_unmap(pte) do { } while(0)
210#define pte_unmap_nested(pte) do { } while(0)
211
212/* to find an entry in a kernel page-table-directory */
213/* This now only contains the vmalloc pages */
214#define pgd_offset_k(address) pgd_offset(&init_mm, address)
215
216/*
217 * The following only work if pte_present() is true.
218 * Undefined behaviour if not..
219 */
220static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
221static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
222static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
223static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
224static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
225static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
226
227static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
228static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
229
230static inline pte_t pte_rdprotect(pte_t pte) {
231 pte_val(pte) &= ~_PAGE_USER; return pte; }
232static inline pte_t pte_exprotect(pte_t pte) {
233 pte_val(pte) &= ~_PAGE_EXEC; return pte; }
234static inline pte_t pte_wrprotect(pte_t pte) {
235 pte_val(pte) &= ~(_PAGE_RW); return pte; }
236static inline pte_t pte_mkclean(pte_t pte) {
237 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
238static inline pte_t pte_mkold(pte_t pte) {
239 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
240static inline pte_t pte_mkread(pte_t pte) {
241 pte_val(pte) |= _PAGE_USER; return pte; }
242static inline pte_t pte_mkexec(pte_t pte) {
243 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
244static inline pte_t pte_mkwrite(pte_t pte) {
245 pte_val(pte) |= _PAGE_RW; return pte; }
246static inline pte_t pte_mkdirty(pte_t pte) {
247 pte_val(pte) |= _PAGE_DIRTY; return pte; }
248static inline pte_t pte_mkyoung(pte_t pte) {
249 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
250static inline pte_t pte_mkhuge(pte_t pte) {
251 return pte; }
252
253/* Atomic PTE updates */
254static inline unsigned long pte_update(pte_t *p, unsigned long clr)
255{
256 unsigned long old, tmp;
257
258 __asm__ __volatile__(
259 "1: ldarx %0,0,%3 # pte_update\n\
260 andi. %1,%0,%6\n\
261 bne- 1b \n\
262 andc %1,%0,%4 \n\
263 stdcx. %1,0,%3 \n\
264 bne- 1b"
265 : "=&r" (old), "=&r" (tmp), "=m" (*p)
266 : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY)
267 : "cc" );
268 return old;
269}
270
271/* PTE updating functions, this function puts the PTE in the
272 * batch, doesn't actually triggers the hash flush immediately,
273 * you need to call flush_tlb_pending() to do that.
274 * Pass -1 for "normal" size (4K or 64K)
275 */
276extern void hpte_update(struct mm_struct *mm, unsigned long addr,
277 pte_t *ptep, unsigned long pte, int huge);
278
279static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
280 unsigned long addr, pte_t *ptep)
281{
282 unsigned long old;
283
284 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
285 return 0;
286 old = pte_update(ptep, _PAGE_ACCESSED);
287 if (old & _PAGE_HASHPTE) {
288 hpte_update(mm, addr, ptep, old, 0);
289 flush_tlb_pending();
290 }
291 return (old & _PAGE_ACCESSED) != 0;
292}
293#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
294#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
295({ \
296 int __r; \
297 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
298 __r; \
299})
300
301/*
302 * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
303 * moment we always flush but we need to fix hpte_update and test if the
304 * optimisation is worth it.
305 */
306static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
307 unsigned long addr, pte_t *ptep)
308{
309 unsigned long old;
310
311 if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
312 return 0;
313 old = pte_update(ptep, _PAGE_DIRTY);
314 if (old & _PAGE_HASHPTE)
315 hpte_update(mm, addr, ptep, old, 0);
316 return (old & _PAGE_DIRTY) != 0;
317}
318#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
319#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \
320({ \
321 int __r; \
322 __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
323 __r; \
324})
325
326#define __HAVE_ARCH_PTEP_SET_WRPROTECT
327static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
328 pte_t *ptep)
329{
330 unsigned long old;
331
332 if ((pte_val(*ptep) & _PAGE_RW) == 0)
333 return;
334 old = pte_update(ptep, _PAGE_RW);
335 if (old & _PAGE_HASHPTE)
336 hpte_update(mm, addr, ptep, old, 0);
337}
338
339/*
340 * We currently remove entries from the hashtable regardless of whether
341 * the entry was young or dirty. The generic routines only flush if the
342 * entry was young or dirty which is not good enough.
343 *
344 * We should be more intelligent about this but for the moment we override
345 * these functions and force a tlb flush unconditionally
346 */
347#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
348#define ptep_clear_flush_young(__vma, __address, __ptep) \
349({ \
350 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
351 __ptep); \
352 __young; \
353})
354
355#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
356#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
357({ \
358 int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
359 __ptep); \
360 flush_tlb_page(__vma, __address); \
361 __dirty; \
362})
363
364#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
365static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
366 unsigned long addr, pte_t *ptep)
367{
368 unsigned long old = pte_update(ptep, ~0UL);
369
370 if (old & _PAGE_HASHPTE)
371 hpte_update(mm, addr, ptep, old, 0);
372 return __pte(old);
373}
374
375static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
376 pte_t * ptep)
377{
378 unsigned long old = pte_update(ptep, ~0UL);
379
380 if (old & _PAGE_HASHPTE)
381 hpte_update(mm, addr, ptep, old, 0);
382}
383
384/*
385 * set_pte stores a linux PTE into the linux page table.
386 */
387static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
388 pte_t *ptep, pte_t pte)
389{
390 if (pte_present(*ptep)) {
391 pte_clear(mm, addr, ptep);
392 flush_tlb_pending();
393 }
394 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
395
396#ifdef CONFIG_PPC_64K_PAGES
397 if (mmu_virtual_psize != MMU_PAGE_64K)
398 pte = __pte(pte_val(pte) | _PAGE_COMBO);
399#endif /* CONFIG_PPC_64K_PAGES */
400
401 *ptep = pte;
402}
403
404/* Set the dirty and/or accessed bits atomically in a linux PTE, this
405 * function doesn't need to flush the hash entry
406 */
407#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
408static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
409{
410 unsigned long bits = pte_val(entry) &
411 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
412 unsigned long old, tmp;
413
414 __asm__ __volatile__(
415 "1: ldarx %0,0,%4\n\
416 andi. %1,%0,%6\n\
417 bne- 1b \n\
418 or %0,%3,%0\n\
419 stdcx. %0,0,%4\n\
420 bne- 1b"
421 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
422 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
423 :"cc");
424}
425#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
426 do { \
427 __ptep_set_access_flags(__ptep, __entry, __dirty); \
428 flush_tlb_page_nohash(__vma, __address); \
429 } while(0)
430
431/*
432 * Macro to mark a page protection value as "uncacheable".
433 */
434#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
435
436struct file;
437extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
438 unsigned long size, pgprot_t vma_prot);
439#define __HAVE_PHYS_MEM_ACCESS_PROT
440
441#define __HAVE_ARCH_PTE_SAME
442#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
443
444#define pte_ERROR(e) \
445 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
446#define pmd_ERROR(e) \
447 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
448#define pgd_ERROR(e) \
449 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
450
451extern pgd_t swapper_pg_dir[];
452
453extern void paging_init(void);
454
455#ifdef CONFIG_HUGETLB_PAGE
456#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
457 free_pgd_range(tlb, addr, end, floor, ceiling)
458#endif
459
460/*
461 * This gets called at the end of handling a page fault, when
462 * the kernel has put a new PTE into the page table for the process.
463 * We use it to put a corresponding HPTE into the hash table
464 * ahead of time, instead of waiting for the inevitable extra
465 * hash-table miss exception.
466 */
467struct vm_area_struct;
468extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
469
470/* Encode and de-code a swap entry */
471#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
472#define __swp_offset(entry) ((entry).val >> 8)
473#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
474#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
475#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
476#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
477#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
478#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
479
480/*
481 * kern_addr_valid is intended to indicate whether an address is a valid
482 * kernel address. Most 32-bit archs define it as always true (like this)
483 * but most 64-bit archs actually perform a test. What should we do here?
484 * The only use is in fs/ncpfs/dir.c
485 */
486#define kern_addr_valid(addr) (1)
487
488#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
489 remap_pfn_range(vma, vaddr, pfn, size, prot)
490
491void pgtable_cache_init(void);
492
493/*
494 * find_linux_pte returns the address of a linux pte for a given
495 * effective address and directory. If not found, it returns zero.
496 */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
497{
498 pgd_t *pg;
499 pud_t *pu;
500 pmd_t *pm;
501 pte_t *pt = NULL;
502
503 pg = pgdir + pgd_index(ea);
504 if (!pgd_none(*pg)) {
505 pu = pud_offset(pg, ea);
506 if (!pud_none(*pu)) {
507 pm = pmd_offset(pu, ea);
508 if (pmd_present(*pm))
509 pt = pte_offset_kernel(pm, ea);
510 }
511 }
512 return pt;
513}
514
515#include <asm-generic/pgtable.h>
516
517#endif /* __ASSEMBLY__ */
518
519#endif /* _PPC64_PGTABLE_H */