aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm/page.h
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-05-28 23:09:12 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-06-04 17:11:18 -0400
commit867e359b97c970a60626d5d76bbe2a8fadbf38fb (patch)
treec5ccbb7f5172e8555977119608ecb1eee3cc37e3 /arch/tile/include/asm/page.h
parent5360bd776f73d0a7da571d72a09a03f237e99900 (diff)
arch/tile: core support for Tilera 32-bit chips.
This change is the core kernel support for TILEPro and TILE64 chips. No driver support (except the console driver) is included yet. This includes the relevant Linux headers in asm/; the low-level low-level "Tile architecture" headers in arch/, which are shared with the hypervisor, etc., and are build-system agnostic; and the relevant hypervisor headers in hv/. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/tile/include/asm/page.h')
-rw-r--r--arch/tile/include/asm/page.h334
1 files changed, 334 insertions, 0 deletions
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
new file mode 100644
index 000000000000..c8301c43d6d9
--- /dev/null
+++ b/arch/tile/include/asm/page.h
@@ -0,0 +1,334 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_PAGE_H
16#define _ASM_TILE_PAGE_H
17
18#include <linux/const.h>
19#include <hv/hypervisor.h>
20#include <arch/chip.h>
21
22/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
23#define PAGE_SHIFT 16
24#define HPAGE_SHIFT 24
25
26#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
27#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
28
29#define PAGE_MASK (~(PAGE_SIZE - 1))
30#define HPAGE_MASK (~(HPAGE_SIZE - 1))
31
32/*
33 * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx
34 * definitions in <hv/hypervisor.h>. We validate this at build time
35 * here, and again at runtime during early boot. We provide a
36 * separate definition since userspace doesn't have <hv/hypervisor.h>.
37 *
38 * Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since
39 * they are the same on i386 but not TILE.
40 */
41#if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT
42# error Small page size mismatch in Linux
43#endif
44#if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT
45# error Huge page size mismatch in Linux
46#endif
47
48#ifndef __ASSEMBLY__
49
50#include <linux/types.h>
51#include <linux/string.h>
52
53struct page;
54
55static inline void clear_page(void *page)
56{
57 memset(page, 0, PAGE_SIZE);
58}
59
60static inline void copy_page(void *to, void *from)
61{
62 memcpy(to, from, PAGE_SIZE);
63}
64
65static inline void clear_user_page(void *page, unsigned long vaddr,
66 struct page *pg)
67{
68 clear_page(page);
69}
70
71static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
72 struct page *topage)
73{
74 copy_page(to, from);
75}
76
77/*
78 * Hypervisor page tables are made of the same basic structure.
79 */
80
81typedef __u64 pteval_t;
82typedef __u64 pmdval_t;
83typedef __u64 pudval_t;
84typedef __u64 pgdval_t;
85typedef __u64 pgprotval_t;
86
87typedef HV_PTE pte_t;
88typedef HV_PTE pgd_t;
89typedef HV_PTE pgprot_t;
90
91/*
92 * User L2 page tables are managed as one L2 page table per page,
93 * because we use the page allocator for them. This keeps the allocation
94 * simple and makes it potentially useful to implement HIGHPTE at some point.
95 * However, it's also inefficient, since L2 page tables are much smaller
96 * than pages (currently 2KB vs 64KB). So we should revisit this.
97 */
98typedef struct page *pgtable_t;
99
100/* Must be a macro since it is used to create constants. */
101#define __pgprot(val) hv_pte(val)
102
103static inline u64 pgprot_val(pgprot_t pgprot)
104{
105 return hv_pte_val(pgprot);
106}
107
108static inline u64 pte_val(pte_t pte)
109{
110 return hv_pte_val(pte);
111}
112
113static inline u64 pgd_val(pgd_t pgd)
114{
115 return hv_pte_val(pgd);
116}
117
118#ifdef __tilegx__
119
120typedef HV_PTE pmd_t;
121
122static inline u64 pmd_val(pmd_t pmd)
123{
124 return hv_pte_val(pmd);
125}
126
127#endif
128
129#endif /* !__ASSEMBLY__ */
130
131#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
132
133#define HUGE_MAX_HSTATE 2
134
135#ifdef CONFIG_HUGETLB_PAGE
136#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
137#endif
138
139/* Each memory controller has PAs distinct in their high bits. */
140#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS())
141#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
142#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
143#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
144
145#ifdef __tilegx__
146
147/*
148 * We reserve the lower half of memory for user-space programs, and the
149 * upper half for system code. We re-map all of physical memory in the
150 * upper half, which takes a quarter of our VA space. Then we have
151 * the vmalloc regions. The supervisor code lives at 0xfffffff700000000,
152 * with the hypervisor above that.
153 *
154 * Loadable kernel modules are placed immediately after the static
155 * supervisor code, with each being allocated a 256MB region of
156 * address space, so we don't have to worry about the range of "jal"
157 * and other branch instructions.
158 *
159 * For now we keep life simple and just allocate one pmd (4GB) for vmalloc.
160 * Similarly, for now we don't play any struct page mapping games.
161 */
162
163#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH()
164# error Too much PA to map with the VA available!
165#endif
166#define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
167
168#define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */
169#define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */
170#define PAGE_OFFSET MEM_HIGH_START
171#define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */
172#define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */
173#define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */
174#define MEM_SV_INTRPT MEM_SV_START
175#define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */
176#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
177#define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */
178
179/* Highest DTLB address we will use */
180#define KERNEL_HIGH_VADDR MEM_SV_START
181
182/* Since we don't currently provide any fixmaps, we use an impossible VA. */
183#define FIXADDR_TOP MEM_HV_START
184
185#else /* !__tilegx__ */
186
187/*
188 * A PAGE_OFFSET of 0xC0000000 means that the kernel has
189 * a virtual address space of one gigabyte, which limits the
190 * amount of physical memory you can use to about 768MB.
191 * If you want more physical memory than this then see the CONFIG_HIGHMEM
192 * option in the kernel configuration.
193 *
194 * The top two 16MB chunks in the table below (VIRT and HV) are
195 * unavailable to Linux. Since the kernel interrupt vectors must live
196 * at 0xfd000000, we map all of the bottom of RAM at this address with
197 * a huge page table entry to minimize its ITLB footprint (as well as
198 * at PAGE_OFFSET). The last architected requirement is that user
199 * interrupt vectors live at 0xfc000000, so we make that range of
200 * memory available to user processes. The remaining regions are sized
201 * as shown; after the first four addresses, we show "typical" values,
202 * since the actual addresses depend on kernel #defines.
203 *
204 * MEM_VIRT_INTRPT 0xff000000
205 * MEM_HV_INTRPT 0xfe000000
206 * MEM_SV_INTRPT (kernel code) 0xfd000000
207 * MEM_USER_INTRPT (user vector) 0xfc000000
208 * FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR)
209 * PKMAP_BASE 0xf7000000 (via LAST_PKMAP)
210 * HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS)
211 * VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE)
212 * mapped LOWMEM 0xc0000000
213 */
214
215#define MEM_USER_INTRPT _AC(0xfc000000, UL)
216#define MEM_SV_INTRPT _AC(0xfd000000, UL)
217#define MEM_HV_INTRPT _AC(0xfe000000, UL)
218#define MEM_VIRT_INTRPT _AC(0xff000000, UL)
219
220#define INTRPT_SIZE 0x4000
221
222/* Tolerate page size larger than the architecture interrupt region size. */
223#if PAGE_SIZE > INTRPT_SIZE
224#undef INTRPT_SIZE
225#define INTRPT_SIZE PAGE_SIZE
226#endif
227
228#define KERNEL_HIGH_VADDR MEM_USER_INTRPT
229#define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE)
230
231#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
232
233/* On 32-bit architectures we mix kernel modules in with other vmaps. */
234#define MEM_MODULE_START VMALLOC_START
235#define MEM_MODULE_END VMALLOC_END
236
237#endif /* __tilegx__ */
238
239#ifndef __ASSEMBLY__
240
241#ifdef CONFIG_HIGHMEM
242
243/* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */
244extern unsigned long pbase_map[];
245extern void *vbase_map[];
246
247static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr)
248{
249 unsigned long kaddr = (unsigned long)_kaddr;
250 return pbase_map[kaddr >> HPAGE_SHIFT] +
251 ((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT);
252}
253
254static inline void *pfn_to_kaddr(unsigned long pfn)
255{
256 return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT);
257}
258
259static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
260{
261 unsigned long pfn = kaddr_to_pfn(kaddr);
262 return ((phys_addr_t)pfn << PAGE_SHIFT) +
263 ((unsigned long)kaddr & (PAGE_SIZE-1));
264}
265
266static inline void *phys_to_virt(phys_addr_t paddr)
267{
268 return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1));
269}
270
271/* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */
272static inline int virt_addr_valid(const volatile void *kaddr)
273{
274 extern void *high_memory; /* copied from <linux/mm.h> */
275 return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory);
276}
277
278#else /* !CONFIG_HIGHMEM */
279
280static inline unsigned long kaddr_to_pfn(const volatile void *kaddr)
281{
282 return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT;
283}
284
285static inline void *pfn_to_kaddr(unsigned long pfn)
286{
287 return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET);
288}
289
290static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
291{
292 return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET);
293}
294
295static inline void *phys_to_virt(phys_addr_t paddr)
296{
297 return (void *)((unsigned long)paddr + PAGE_OFFSET);
298}
299
300/* Check that the given address is within some mapped range of PAs. */
301#define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr))
302
303#endif /* !CONFIG_HIGHMEM */
304
305/* All callers are not consistent in how they call these functions. */
306#define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr))
307#define __va(paddr) phys_to_virt((phys_addr_t)(paddr))
308
309extern int devmem_is_allowed(unsigned long pagenr);
310
311#ifdef CONFIG_FLATMEM
312static inline int pfn_valid(unsigned long pfn)
313{
314 return pfn < max_mapnr;
315}
316#endif
317
318/* Provide as macros since these require some other headers included. */
319#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
320#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr))
321#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
322
323struct mm_struct;
324extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
325
326#endif /* !__ASSEMBLY__ */
327
328#define VM_DATA_DEFAULT_FLAGS \
329 (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
330
331#include <asm-generic/memory_model.h>
332#include <asm-generic/getorder.h>
333
334#endif /* _ASM_TILE_PAGE_H */