aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc/page.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ppc/page.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-ppc/page.h')
-rw-r--r--include/asm-ppc/page.h169
1 files changed, 169 insertions, 0 deletions
diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h
new file mode 100644
index 000000000000..4789dc024240
--- /dev/null
+++ b/include/asm-ppc/page.h
@@ -0,0 +1,169 @@
1#ifndef _PPC_PAGE_H
2#define _PPC_PAGE_H
3
4/* PAGE_SHIFT determines the page size */
5#define PAGE_SHIFT 12
6#define PAGE_SIZE (1UL << PAGE_SHIFT)
7
8/*
9 * Subtle: this is an int (not an unsigned long) and so it
10 * gets extended to 64 bits the way want (i.e. with 1s). -- paulus
11 */
12#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
13
14#ifdef __KERNEL__
15#include <linux/config.h>
16
17/* This must match what is in arch/ppc/Makefile */
18#define PAGE_OFFSET CONFIG_KERNEL_START
19#define KERNELBASE PAGE_OFFSET
20
21#ifndef __ASSEMBLY__
22
23/*
24 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
25 * physical addressing. For now this just the IBM PPC440.
26 */
27#ifdef CONFIG_PTE_64BIT
28typedef unsigned long long pte_basic_t;
29#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
30#define PTE_FMT "%16Lx"
31#else
32typedef unsigned long pte_basic_t;
33#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
34#define PTE_FMT "%.8lx"
35#endif
36
37#undef STRICT_MM_TYPECHECKS
38
39#ifdef STRICT_MM_TYPECHECKS
40/*
41 * These are used to make use of C type-checking..
42 */
43typedef struct { pte_basic_t pte; } pte_t;
44typedef struct { unsigned long pmd; } pmd_t;
45typedef struct { unsigned long pgd; } pgd_t;
46typedef struct { unsigned long pgprot; } pgprot_t;
47
48#define pte_val(x) ((x).pte)
49#define pmd_val(x) ((x).pmd)
50#define pgd_val(x) ((x).pgd)
51#define pgprot_val(x) ((x).pgprot)
52
53#define __pte(x) ((pte_t) { (x) } )
54#define __pmd(x) ((pmd_t) { (x) } )
55#define __pgd(x) ((pgd_t) { (x) } )
56#define __pgprot(x) ((pgprot_t) { (x) } )
57
58#else
59/*
60 * .. while these make it easier on the compiler
61 */
62typedef pte_basic_t pte_t;
63typedef unsigned long pmd_t;
64typedef unsigned long pgd_t;
65typedef unsigned long pgprot_t;
66
67#define pte_val(x) (x)
68#define pmd_val(x) (x)
69#define pgd_val(x) (x)
70#define pgprot_val(x) (x)
71
72#define __pte(x) (x)
73#define __pmd(x) (x)
74#define __pgd(x) (x)
75#define __pgprot(x) (x)
76
77#endif
78
79
80/* align addr on a size boundary - adjust address up if needed -- Cort */
81#define _ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
82
83/* to align the pointer to the (next) page boundary */
84#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
85
86struct page;
87extern void clear_pages(void *page, int order);
88static inline void clear_page(void *page) { clear_pages(page, 0); }
89extern void copy_page(void *to, void *from);
90extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
91extern void copy_user_page(void *to, void *from, unsigned long vaddr,
92 struct page *pg);
93
94#ifndef CONFIG_APUS
95#define PPC_MEMSTART 0
96#define PPC_PGSTART 0
97#define PPC_MEMOFFSET PAGE_OFFSET
98#else
99extern unsigned long ppc_memstart;
100extern unsigned long ppc_pgstart;
101extern unsigned long ppc_memoffset;
102#define PPC_MEMSTART ppc_memstart
103#define PPC_PGSTART ppc_pgstart
104#define PPC_MEMOFFSET ppc_memoffset
105#endif
106
107#if defined(CONFIG_APUS) && !defined(MODULE)
108/* map phys->virtual and virtual->phys for RAM pages */
109static inline unsigned long ___pa(unsigned long v)
110{
111 unsigned long p;
112 asm volatile ("1: addis %0, %1, %2;"
113 ".section \".vtop_fixup\",\"aw\";"
114 ".align 1;"
115 ".long 1b;"
116 ".previous;"
117 : "=r" (p)
118 : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff));
119
120 return p;
121}
122static inline void* ___va(unsigned long p)
123{
124 unsigned long v;
125 asm volatile ("1: addis %0, %1, %2;"
126 ".section \".ptov_fixup\",\"aw\";"
127 ".align 1;"
128 ".long 1b;"
129 ".previous;"
130 : "=r" (v)
131 : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff));
132
133 return (void*) v;
134}
135#else
136#define ___pa(vaddr) ((vaddr)-PPC_MEMOFFSET)
137#define ___va(paddr) ((paddr)+PPC_MEMOFFSET)
138#endif
139
140extern int page_is_ram(unsigned long pfn);
141
142#define __pa(x) ___pa((unsigned long)(x))
143#define __va(x) ((void *)(___va((unsigned long)(x))))
144
145#define pfn_to_page(pfn) (mem_map + ((pfn) - PPC_PGSTART))
146#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PPC_PGSTART)
147#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
148#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
149
150#define pfn_valid(pfn) (((pfn) - PPC_PGSTART) < max_mapnr)
151#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
152
153/* Pure 2^n version of get_order */
154extern __inline__ int get_order(unsigned long size)
155{
156 int lz;
157
158 size = (size-1) >> PAGE_SHIFT;
159 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
160 return 32 - lz;
161}
162
163#endif /* __ASSEMBLY__ */
164
165#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
166 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
167
168#endif /* __KERNEL__ */
169#endif /* _PPC_PAGE_H */