diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-07-28 19:09:44 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-07-28 19:09:44 -0400 |
commit | f15cbe6f1a4b4d9df59142fc8e4abb973302cf44 (patch) | |
tree | 774d7b11abaaf33561ab8268bf51ddd9ceb79025 /arch/sh/include/asm/page.h | |
parent | 25326277d8d1393d1c66240e6255aca780f9e3eb (diff) |
sh: migrate to arch/sh/include/
This follows the sparc changes a439fe51a1f8eb087c22dd24d69cebae4a3addac.
Most of the moving about was done with Sam's directions at:
http://marc.info/?l=linux-sh&m=121724823706062&w=2
with subsequent hacking and fixups entirely my fault.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm/page.h')
-rw-r--r-- | arch/sh/include/asm/page.h | 183 |
1 files changed, 183 insertions, 0 deletions
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h new file mode 100644 index 000000000000..77fb8bf02e4e --- /dev/null +++ b/arch/sh/include/asm/page.h | |||
@@ -0,0 +1,183 @@ | |||
1 | #ifndef __ASM_SH_PAGE_H | ||
2 | #define __ASM_SH_PAGE_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 1999 Niibe Yutaka | ||
6 | */ | ||
7 | |||
8 | #include <linux/const.h> | ||
9 | |||
10 | /* PAGE_SHIFT determines the page size */ | ||
11 | #if defined(CONFIG_PAGE_SIZE_4KB) | ||
12 | # define PAGE_SHIFT 12 | ||
13 | #elif defined(CONFIG_PAGE_SIZE_8KB) | ||
14 | # define PAGE_SHIFT 13 | ||
15 | #elif defined(CONFIG_PAGE_SIZE_16KB) | ||
16 | # define PAGE_SHIFT 14 | ||
17 | #elif defined(CONFIG_PAGE_SIZE_64KB) | ||
18 | # define PAGE_SHIFT 16 | ||
19 | #else | ||
20 | # error "Bogus kernel page size?" | ||
21 | #endif | ||
22 | |||
23 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | ||
24 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
25 | #define PTE_MASK PAGE_MASK | ||
26 | |||
27 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
28 | #define HPAGE_SHIFT 16 | ||
29 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
30 | #define HPAGE_SHIFT 18 | ||
31 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | ||
32 | #define HPAGE_SHIFT 20 | ||
33 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
34 | #define HPAGE_SHIFT 22 | ||
35 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) | ||
36 | #define HPAGE_SHIFT 26 | ||
37 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) | ||
38 | #define HPAGE_SHIFT 29 | ||
39 | #endif | ||
40 | |||
41 | #ifdef CONFIG_HUGETLB_PAGE | ||
42 | #define HPAGE_SIZE (1UL << HPAGE_SHIFT) | ||
43 | #define HPAGE_MASK (~(HPAGE_SIZE-1)) | ||
44 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) | ||
45 | #endif | ||
46 | |||
47 | #ifndef __ASSEMBLY__ | ||
48 | |||
49 | extern unsigned long shm_align_mask; | ||
50 | extern unsigned long max_low_pfn, min_low_pfn; | ||
51 | extern unsigned long memory_start, memory_end; | ||
52 | |||
53 | extern void clear_page(void *to); | ||
54 | extern void copy_page(void *to, void *from); | ||
55 | |||
56 | #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ | ||
57 | (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \ | ||
58 | defined(CONFIG_SH7705_CACHE_32KB)) | ||
59 | struct page; | ||
60 | struct vm_area_struct; | ||
61 | extern void clear_user_page(void *to, unsigned long address, struct page *page); | ||
62 | extern void copy_user_page(void *to, void *from, unsigned long address, | ||
63 | struct page *page); | ||
64 | #if defined(CONFIG_CPU_SH4) | ||
65 | extern void copy_user_highpage(struct page *to, struct page *from, | ||
66 | unsigned long vaddr, struct vm_area_struct *vma); | ||
67 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | ||
68 | #endif | ||
69 | #else | ||
70 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
71 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
72 | #endif | ||
73 | |||
74 | /* | ||
75 | * These are used to make use of C type-checking.. | ||
76 | */ | ||
77 | #ifdef CONFIG_X2TLB | ||
78 | typedef struct { unsigned long pte_low, pte_high; } pte_t; | ||
79 | typedef struct { unsigned long long pgprot; } pgprot_t; | ||
80 | typedef struct { unsigned long long pgd; } pgd_t; | ||
81 | #define pte_val(x) \ | ||
82 | ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) | ||
83 | #define __pte(x) \ | ||
84 | ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) | ||
85 | #elif defined(CONFIG_SUPERH32) | ||
86 | typedef struct { unsigned long pte_low; } pte_t; | ||
87 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
88 | typedef struct { unsigned long pgd; } pgd_t; | ||
89 | #define pte_val(x) ((x).pte_low) | ||
90 | #define __pte(x) ((pte_t) { (x) } ) | ||
91 | #else | ||
92 | typedef struct { unsigned long long pte_low; } pte_t; | ||
93 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
94 | typedef struct { unsigned long pgd; } pgd_t; | ||
95 | #define pte_val(x) ((x).pte_low) | ||
96 | #define __pte(x) ((pte_t) { (x) } ) | ||
97 | #endif | ||
98 | |||
99 | #define pgd_val(x) ((x).pgd) | ||
100 | #define pgprot_val(x) ((x).pgprot) | ||
101 | |||
102 | #define __pgd(x) ((pgd_t) { (x) } ) | ||
103 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
104 | |||
105 | typedef struct page *pgtable_t; | ||
106 | |||
107 | #endif /* !__ASSEMBLY__ */ | ||
108 | |||
109 | /* | ||
110 | * __MEMORY_START and SIZE are the physical addresses and size of RAM. | ||
111 | */ | ||
112 | #define __MEMORY_START CONFIG_MEMORY_START | ||
113 | #define __MEMORY_SIZE CONFIG_MEMORY_SIZE | ||
114 | |||
115 | /* | ||
116 | * PAGE_OFFSET is the virtual address of the start of kernel address | ||
117 | * space. | ||
118 | */ | ||
119 | #define PAGE_OFFSET CONFIG_PAGE_OFFSET | ||
120 | |||
121 | /* | ||
122 | * Virtual to physical RAM address translation. | ||
123 | * | ||
124 | * In 29 bit mode, the physical offset of RAM from address 0 is visible in | ||
125 | * the kernel virtual address space, and thus we don't have to take | ||
126 | * this into account when translating. However in 32 bit mode this offset | ||
127 | * is not visible (it is part of the PMB mapping) and so needs to be | ||
128 | * added or subtracted as required. | ||
129 | */ | ||
130 | #ifdef CONFIG_32BIT | ||
131 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) | ||
132 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) | ||
133 | #else | ||
134 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) | ||
135 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | ||
136 | #endif | ||
137 | |||
138 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | ||
139 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | ||
140 | |||
141 | /* | ||
142 | * PFN = physical frame number (ie PFN 0 == physical address 0) | ||
143 | * PFN_START is the PFN of the first page of RAM. By defining this we | ||
144 | * don't have struct page entries for the portion of address space | ||
145 | * between physical address 0 and the start of RAM. | ||
146 | */ | ||
147 | #define PFN_START (__MEMORY_START >> PAGE_SHIFT) | ||
148 | #define ARCH_PFN_OFFSET (PFN_START) | ||
149 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
150 | #ifdef CONFIG_FLATMEM | ||
151 | #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) | ||
152 | #endif | ||
153 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
154 | |||
155 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
156 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
157 | |||
158 | #include <asm-generic/memory_model.h> | ||
159 | #include <asm-generic/page.h> | ||
160 | |||
161 | /* vDSO support */ | ||
162 | #ifdef CONFIG_VSYSCALL | ||
163 | #define __HAVE_ARCH_GATE_AREA | ||
164 | #endif | ||
165 | |||
166 | /* | ||
167 | * Some drivers need to perform DMA into kmalloc'ed buffers | ||
168 | * and so we have to increase the kmalloc minalign for this. | ||
169 | */ | ||
170 | #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES | ||
171 | |||
172 | #ifdef CONFIG_SUPERH64 | ||
173 | /* | ||
174 | * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still | ||
175 | * happily generate {ld/st}.q pairs, requiring us to have 8-byte | ||
176 | * alignment to avoid traps. The kmalloc alignment is gauranteed by | ||
177 | * virtue of L1_CACHE_BYTES, requiring this to only be special cased | ||
178 | * for slab caches. | ||
179 | */ | ||
180 | #define ARCH_SLAB_MINALIGN 8 | ||
181 | #endif | ||
182 | |||
183 | #endif /* __ASM_SH_PAGE_H */ | ||