diff options
-rw-r--r-- | arch/hexagon/include/asm/fixmap.h | 73 | ||||
-rw-r--r-- | arch/hexagon/include/asm/mmu.h | 37 | ||||
-rw-r--r-- | arch/hexagon/include/asm/mmu_context.h | 100 | ||||
-rw-r--r-- | arch/hexagon/include/asm/page.h | 157 | ||||
-rw-r--r-- | arch/hexagon/include/asm/pgalloc.h | 146 | ||||
-rw-r--r-- | arch/hexagon/include/asm/pgtable.h | 518 | ||||
-rw-r--r-- | arch/hexagon/mm/pgalloc.c | 23 |
7 files changed, 1054 insertions, 0 deletions
diff --git a/arch/hexagon/include/asm/fixmap.h b/arch/hexagon/include/asm/fixmap.h new file mode 100644 index 000000000000..b27f4941645b --- /dev/null +++ b/arch/hexagon/include/asm/fixmap.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * Fixmap support for Hexagon - enough to support highmem features | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_FIXMAP_H | ||
22 | #define _ASM_FIXMAP_H | ||
23 | |||
24 | /* | ||
25 | * A lot of the fixmap info is already in mem-layout.h | ||
26 | */ | ||
27 | #include <asm/mem-layout.h> | ||
28 | |||
29 | /* | ||
30 | * Full fixmap support involves set_fixmap() functions, but | ||
31 | * these may not be needed if all we're after is an area for | ||
32 | * highmem kernel mappings. | ||
33 | */ | ||
34 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
35 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
36 | |||
37 | extern void __this_fixmap_does_not_exist(void); | ||
38 | |||
39 | /** | ||
40 | * fix_to_virt -- "index to address" translation. | ||
41 | * | ||
42 | * If anyone tries to use the idx directly without translation, | ||
43 | * we catch the bug with a NULL-deference kernel oops. Illegal | ||
44 | * ranges of incoming indices are caught too. | ||
45 | */ | ||
46 | static inline unsigned long fix_to_virt(const unsigned int idx) | ||
47 | { | ||
48 | /* | ||
49 | * This branch gets completely eliminated after inlining, | ||
50 | * except when someone tries to use fixaddr indices in an | ||
51 | * illegal way. (such as mixing up address types or using | ||
52 | * out-of-range indices). | ||
53 | * | ||
54 | * If it doesn't get removed, the linker will complain | ||
55 | * loudly with a reasonably clear error message.. | ||
56 | */ | ||
57 | if (idx >= __end_of_fixed_addresses) | ||
58 | __this_fixmap_does_not_exist(); | ||
59 | |||
60 | return __fix_to_virt(idx); | ||
61 | } | ||
62 | |||
63 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
64 | { | ||
65 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
66 | return __virt_to_fix(vaddr); | ||
67 | } | ||
68 | |||
69 | #define kmap_get_fixmap_pte(vaddr) \ | ||
70 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \ | ||
71 | (vaddr)), (vaddr)), (vaddr)) | ||
72 | |||
73 | #endif | ||
diff --git a/arch/hexagon/include/asm/mmu.h b/arch/hexagon/include/asm/mmu.h new file mode 100644 index 000000000000..30a5d8d2659d --- /dev/null +++ b/arch/hexagon/include/asm/mmu.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
16 | * 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_MMU_H | ||
20 | #define _ASM_MMU_H | ||
21 | |||
22 | #include <asm/vdso.h> | ||
23 | |||
24 | /* | ||
25 | * Architecture-specific state for a mm_struct. | ||
26 | * For the Hexagon Virtual Machine, it can be a copy | ||
27 | * of the pointer to the page table base. | ||
28 | */ | ||
29 | struct mm_context { | ||
30 | unsigned long long generation; | ||
31 | unsigned long ptbase; | ||
32 | struct hexagon_vdso *vdso; | ||
33 | }; | ||
34 | |||
35 | typedef struct mm_context mm_context_t; | ||
36 | |||
37 | #endif | ||
diff --git a/arch/hexagon/include/asm/mmu_context.h b/arch/hexagon/include/asm/mmu_context.h new file mode 100644 index 000000000000..b4fe5a5411b6 --- /dev/null +++ b/arch/hexagon/include/asm/mmu_context.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * MM context support for the Hexagon architecture | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_MMU_CONTEXT_H | ||
22 | #define _ASM_MMU_CONTEXT_H | ||
23 | |||
24 | #include <asm/setup.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <asm/pgalloc.h> | ||
27 | #include <asm/mem-layout.h> | ||
28 | |||
29 | static inline void destroy_context(struct mm_struct *mm) | ||
30 | { | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * VM port hides all TLB management, so "lazy TLB" isn't very | ||
35 | * meaningful. Even for ports to architectures with visble TLBs, | ||
36 | * this is almost invariably a null function. | ||
37 | */ | ||
38 | static inline void enter_lazy_tlb(struct mm_struct *mm, | ||
39 | struct task_struct *tsk) | ||
40 | { | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Architecture-specific actions, if any, for memory map deactivation. | ||
45 | */ | ||
46 | static inline void deactivate_mm(struct task_struct *tsk, | ||
47 | struct mm_struct *mm) | ||
48 | { | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * init_new_context - initialize context related info for new mm_struct instance | ||
53 | * @tsk: pointer to a task struct | ||
54 | * @mm: pointer to a new mm struct | ||
55 | */ | ||
56 | static inline int init_new_context(struct task_struct *tsk, | ||
57 | struct mm_struct *mm) | ||
58 | { | ||
59 | /* mm->context is set up by pgd_alloc */ | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Switch active mm context | ||
65 | */ | ||
66 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
67 | struct task_struct *tsk) | ||
68 | { | ||
69 | int l1; | ||
70 | |||
71 | /* | ||
72 | * For virtual machine, we have to update system map if it's been | ||
73 | * touched. | ||
74 | */ | ||
75 | if (next->context.generation < prev->context.generation) { | ||
76 | for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++) | ||
77 | next->pgd[l1] = init_mm.pgd[l1]; | ||
78 | |||
79 | next->context.generation = prev->context.generation; | ||
80 | } | ||
81 | |||
82 | __vmnewmap((void *)next->context.ptbase); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Activate new memory map for task | ||
87 | */ | ||
88 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | |||
92 | local_irq_save(flags); | ||
93 | switch_mm(prev, next, current_thread_info()->task); | ||
94 | local_irq_restore(flags); | ||
95 | } | ||
96 | |||
97 | /* Generic hooks for arch_dup_mmap and arch_exit_mmap */ | ||
98 | #include <asm-generic/mm_hooks.h> | ||
99 | |||
100 | #endif | ||
diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h new file mode 100644 index 000000000000..edd97626c482 --- /dev/null +++ b/arch/hexagon/include/asm/page.h | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Page management definitions for the Hexagon architecture | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_PAGE_H | ||
22 | #define _ASM_PAGE_H | ||
23 | |||
24 | #include <linux/const.h> | ||
25 | |||
26 | /* This is probably not the most graceful way to handle this. */ | ||
27 | |||
28 | #ifdef CONFIG_PAGE_SIZE_4KB | ||
29 | #define PAGE_SHIFT 12 | ||
30 | #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB | ||
31 | #endif | ||
32 | |||
33 | #ifdef CONFIG_PAGE_SIZE_16KB | ||
34 | #define PAGE_SHIFT 14 | ||
35 | #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_PAGE_SIZE_64KB | ||
39 | #define PAGE_SHIFT 16 | ||
40 | #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB | ||
41 | #endif | ||
42 | |||
43 | #ifdef CONFIG_PAGE_SIZE_256KB | ||
44 | #define PAGE_SHIFT 18 | ||
45 | #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB | ||
46 | #endif | ||
47 | |||
48 | #ifdef CONFIG_PAGE_SIZE_1MB | ||
49 | #define PAGE_SHIFT 20 | ||
50 | #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB | ||
51 | #endif | ||
52 | |||
53 | /* | ||
54 | * These should be defined in hugetlb.h, but apparently not. | ||
55 | * "Huge" for us should be 4MB or 16MB, which are both represented | ||
56 | * in L1 PTE's. Right now, it's set up for 4MB. | ||
57 | */ | ||
58 | #ifdef CONFIG_HUGETLB_PAGE | ||
59 | #define HPAGE_SHIFT 22 | ||
60 | #define HPAGE_SIZE (1UL << HPAGE_SHIFT) | ||
61 | #define HPAGE_MASK (~(HPAGE_SIZE-1)) | ||
62 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) | ||
63 | #define HVM_HUGEPAGE_SIZE 0x5 | ||
64 | #endif | ||
65 | |||
66 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | ||
67 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) | ||
68 | |||
69 | #ifdef __KERNEL__ | ||
70 | #ifndef __ASSEMBLY__ | ||
71 | |||
72 | /* | ||
73 | * This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in. | ||
74 | */ | ||
75 | #include <linux/pfn.h> | ||
76 | |||
77 | /* | ||
78 | * We implement a two-level architecture-specific page table structure. | ||
79 | * Null intermediate page table level (pmd, pud) definitions will come from | ||
80 | * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h | ||
81 | */ | ||
82 | typedef struct { unsigned long pte; } pte_t; | ||
83 | typedef struct { unsigned long pgd; } pgd_t; | ||
84 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
85 | typedef struct page *pgtable_t; | ||
86 | |||
87 | #define pte_val(x) ((x).pte) | ||
88 | #define pgd_val(x) ((x).pgd) | ||
89 | #define pgprot_val(x) ((x).pgprot) | ||
90 | #define __pte(x) ((pte_t) { (x) }) | ||
91 | #define __pgd(x) ((pgd_t) { (x) }) | ||
92 | #define __pgprot(x) ((pgprot_t) { (x) }) | ||
93 | |||
94 | /* | ||
95 | * We need a __pa and a __va routine for kernel space. | ||
96 | * MIPS says they're only used during mem_init. | ||
97 | * also, check if we need a PHYS_OFFSET. | ||
98 | */ | ||
99 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) | ||
100 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) | ||
101 | |||
102 | /* The "page frame" descriptor is defined in linux/mm.h */ | ||
103 | struct page; | ||
104 | |||
105 | /* Returns page frame descriptor for virtual address. */ | ||
106 | #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr))) | ||
107 | |||
108 | /* Default vm area behavior is non-executable. */ | ||
109 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ | ||
110 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
111 | |||
112 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | ||
113 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
114 | |||
115 | /* Need to not use a define for linesize; may move this to another file. */ | ||
116 | static inline void clear_page(void *page) | ||
117 | { | ||
118 | /* This can only be done on pages with L1 WB cache */ | ||
119 | asm volatile( | ||
120 | " loop0(1f,%1);\n" | ||
121 | "1: { dczeroa(%0);\n" | ||
122 | " %0 = add(%0,#32); }:endloop0\n" | ||
123 | : "+r" (page) | ||
124 | : "r" (PAGE_SIZE/32) | ||
125 | : "lc0", "sa0", "memory" | ||
126 | ); | ||
127 | } | ||
128 | |||
129 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) | ||
130 | |||
131 | /* | ||
132 | * Under assumption that kernel always "sees" user map... | ||
133 | */ | ||
134 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
135 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
136 | |||
137 | /* | ||
138 | * page_to_phys - convert page to physical address | ||
139 | * @page - pointer to page entry in mem_map | ||
140 | */ | ||
141 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | ||
142 | |||
143 | /* | ||
144 | * For port to Hexagon Virtual Machine, MAYBE we check for attempts | ||
145 | * to reference reserved HVM space, but in any case, the VM will be | ||
146 | * protected. | ||
147 | */ | ||
148 | #define kern_addr_valid(addr) (1) | ||
149 | |||
150 | #include <asm-generic/memory_model.h> | ||
151 | /* XXX Todo: implement assembly-optimized version of getorder. */ | ||
152 | #include <asm-generic/getorder.h> | ||
153 | |||
154 | #endif /* ifdef __ASSEMBLY__ */ | ||
155 | #endif /* ifdef __KERNEL__ */ | ||
156 | |||
157 | #endif | ||
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h new file mode 100644 index 000000000000..13443c775131 --- /dev/null +++ b/arch/hexagon/include/asm/pgalloc.h | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * Page table support for the Hexagon architecture | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_PGALLOC_H | ||
22 | #define _ASM_PGALLOC_H | ||
23 | |||
24 | #include <asm/mem-layout.h> | ||
25 | #include <asm/atomic.h> | ||
26 | |||
27 | #define check_pgt_cache() do {} while (0) | ||
28 | |||
29 | extern unsigned long long kmap_generation; | ||
30 | |||
31 | /* | ||
32 | * Page table creation interface | ||
33 | */ | ||
34 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
35 | { | ||
36 | pgd_t *pgd; | ||
37 | |||
38 | pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
39 | |||
40 | /* | ||
41 | * There may be better ways to do this, but to ensure | ||
42 | * that new address spaces always contain the kernel | ||
43 | * base mapping, and to ensure that the user area is | ||
44 | * initially marked invalid, initialize the new map | ||
45 | * map with a copy of the kernel's persistent map. | ||
46 | */ | ||
47 | |||
48 | memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t *)); | ||
49 | mm->context.generation = kmap_generation; | ||
50 | |||
51 | /* Physical version is what is passed to virtual machine on switch */ | ||
52 | mm->context.ptbase = __pa(pgd); | ||
53 | |||
54 | return pgd; | ||
55 | } | ||
56 | |||
57 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
58 | { | ||
59 | free_page((unsigned long) pgd); | ||
60 | } | ||
61 | |||
62 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | ||
63 | unsigned long address) | ||
64 | { | ||
65 | struct page *pte; | ||
66 | |||
67 | pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); | ||
68 | |||
69 | if (pte) | ||
70 | pgtable_page_ctor(pte); | ||
71 | |||
72 | return pte; | ||
73 | } | ||
74 | |||
75 | /* _kernel variant gets to use a different allocator */ | ||
76 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | ||
77 | unsigned long address) | ||
78 | { | ||
79 | gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; | ||
80 | return (pte_t *) __get_free_page(flags); | ||
81 | } | ||
82 | |||
83 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | ||
84 | { | ||
85 | pgtable_page_dtor(pte); | ||
86 | __free_page(pte); | ||
87 | } | ||
88 | |||
89 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
90 | { | ||
91 | free_page((unsigned long)pte); | ||
92 | } | ||
93 | |||
94 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | ||
95 | pgtable_t pte) | ||
96 | { | ||
97 | /* | ||
98 | * Conveniently, zero in 3 LSB means indirect 4K page table. | ||
99 | * Not so convenient when you're trying to vary the page size. | ||
100 | */ | ||
101 | set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) | | ||
102 | HEXAGON_L1_PTE_SIZE)); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Other architectures seem to have ways of making all processes | ||
107 | * share the same pmd's for their kernel mappings, but the v0.3 | ||
108 | * Hexagon VM spec has a "monolithic" L1 table for user and kernel | ||
109 | * segments. We track "generations" of the kernel map to minimize | ||
110 | * overhead, and update the "slave" copies of the kernel mappings | ||
111 | * as part of switch_mm. However, we still need to update the | ||
112 | * kernel map of the active thread who's calling pmd_populate_kernel... | ||
113 | */ | ||
114 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, | ||
115 | pte_t *pte) | ||
116 | { | ||
117 | extern spinlock_t kmap_gen_lock; | ||
118 | pmd_t *ppmd; | ||
119 | int pmdindex; | ||
120 | |||
121 | spin_lock(&kmap_gen_lock); | ||
122 | kmap_generation++; | ||
123 | mm->context.generation = kmap_generation; | ||
124 | current->active_mm->context.generation = kmap_generation; | ||
125 | spin_unlock(&kmap_gen_lock); | ||
126 | |||
127 | set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); | ||
128 | |||
129 | /* | ||
130 | * Now the "slave" copy of the current thread. | ||
131 | * This is pointer arithmetic, not byte addresses! | ||
132 | */ | ||
133 | pmdindex = (pgd_t *)pmd - mm->pgd; | ||
134 | ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; | ||
135 | set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); | ||
136 | if (pmdindex > max_kernel_seg) | ||
137 | max_kernel_seg = pmdindex; | ||
138 | } | ||
139 | |||
140 | #define __pte_free_tlb(tlb, pte, addr) \ | ||
141 | do { \ | ||
142 | pgtable_page_dtor((pte)); \ | ||
143 | tlb_remove_page((tlb), (pte)); \ | ||
144 | } while (0) | ||
145 | |||
146 | #endif | ||
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h new file mode 100644 index 000000000000..ca619bf225ef --- /dev/null +++ b/arch/hexagon/include/asm/pgtable.h | |||
@@ -0,0 +1,518 @@ | |||
1 | /* | ||
2 | * Page table support for the Hexagon architecture | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_PGTABLE_H | ||
22 | #define _ASM_PGTABLE_H | ||
23 | |||
24 | /* | ||
25 | * Page table definitions for Qualcomm Hexagon processor. | ||
26 | */ | ||
27 | #include <linux/swap.h> | ||
28 | #include <asm/page.h> | ||
29 | #include <asm-generic/pgtable-nopmd.h> | ||
30 | |||
31 | /* A handy thing to have if one has the RAM. Declared in head.S */ | ||
32 | extern unsigned long empty_zero_page; | ||
33 | extern unsigned long zero_page_mask; | ||
34 | |||
35 | /* | ||
36 | * The PTE model described here is that of the Hexagon Virtual Machine, | ||
37 | * which autonomously walks 2-level page tables. At a lower level, we | ||
38 | * also describe the RISCish software-loaded TLB entry structure of | ||
39 | * the underlying Hexagon processor. A kernel built to run on the | ||
40 | * virtual machine has no need to know about the underlying hardware. | ||
41 | */ | ||
42 | #include <asm/vm_mmu.h> | ||
43 | |||
44 | /* | ||
45 | * To maximize the comfort level for the PTE manipulation macros, | ||
46 | * define the "well known" architecture-specific bits. | ||
47 | */ | ||
48 | #define _PAGE_READ __HVM_PTE_R | ||
49 | #define _PAGE_WRITE __HVM_PTE_W | ||
50 | #define _PAGE_EXECUTE __HVM_PTE_X | ||
51 | #define _PAGE_USER __HVM_PTE_U | ||
52 | |||
53 | /* | ||
54 | * We have a total of 4 "soft" bits available in the abstract PTE. | ||
55 | * The two mandatory software bits are Dirty and Accessed. | ||
56 | * To make nonlinear swap work according to the more recent | ||
57 | * model, we want a low order "Present" bit to indicate whether | ||
58 | * the PTE describes MMU programming or swap space. | ||
59 | */ | ||
60 | #define _PAGE_PRESENT (1<<0) | ||
61 | #define _PAGE_DIRTY (1<<1) | ||
62 | #define _PAGE_ACCESSED (1<<2) | ||
63 | |||
64 | /* | ||
65 | * _PAGE_FILE is only meaningful if _PAGE_PRESENT is false, while | ||
66 | * _PAGE_DIRTY is only meaningful if _PAGE_PRESENT is true. | ||
67 | * So we can overload the bit... | ||
68 | */ | ||
69 | #define _PAGE_FILE _PAGE_DIRTY /* set: pagecache, unset = swap */ | ||
70 | |||
71 | /* | ||
72 | * For now, let's say that Valid and Present are the same thing. | ||
73 | * Alternatively, we could say that it's the "or" of R, W, and X | ||
74 | * permissions. | ||
75 | */ | ||
76 | #define _PAGE_VALID _PAGE_PRESENT | ||
77 | |||
78 | /* | ||
79 | * We're not defining _PAGE_GLOBAL here, since there's no concept | ||
80 | * of global pages or ASIDs exposed to the Hexagon Virtual Machine, | ||
81 | * and we want to use the same page table structures and macros in | ||
82 | * the native kernel as we do in the virtual machine kernel. | ||
83 | * So we'll put up with a bit of inefficiency for now... | ||
84 | */ | ||
85 | |||
86 | /* | ||
87 | * Top "FOURTH" level (pgd), which for the Hexagon VM is really | ||
88 | * only the second from the bottom, pgd and pud both being collapsed. | ||
89 | * Each entry represents 4MB of virtual address space, 4K of table | ||
90 | * thus maps the full 4GB. | ||
91 | */ | ||
92 | #define PGDIR_SHIFT 22 | ||
93 | #define PTRS_PER_PGD 1024 | ||
94 | |||
95 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
96 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
97 | |||
98 | #ifdef CONFIG_PAGE_SIZE_4KB | ||
99 | #define PTRS_PER_PTE 1024 | ||
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_PAGE_SIZE_16KB | ||
103 | #define PTRS_PER_PTE 256 | ||
104 | #endif | ||
105 | |||
106 | #ifdef CONFIG_PAGE_SIZE_64KB | ||
107 | #define PTRS_PER_PTE 64 | ||
108 | #endif | ||
109 | |||
110 | #ifdef CONFIG_PAGE_SIZE_256KB | ||
111 | #define PTRS_PER_PTE 16 | ||
112 | #endif | ||
113 | |||
114 | #ifdef CONFIG_PAGE_SIZE_1MB | ||
115 | #define PTRS_PER_PTE 4 | ||
116 | #endif | ||
117 | |||
118 | /* Any bigger and the PTE disappears. */ | ||
119 | #define pgd_ERROR(e) \ | ||
120 | printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\ | ||
121 | pgd_val(e)) | ||
122 | |||
123 | /* | ||
124 | * Page Protection Constants. Includes (in this variant) cache attributes. | ||
125 | */ | ||
126 | extern unsigned long _dflt_cache_att; | ||
127 | |||
128 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
129 | _dflt_cache_att) | ||
130 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
131 | _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att) | ||
132 | #define PAGE_COPY PAGE_READONLY | ||
133 | #define PAGE_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
134 | _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att) | ||
135 | #define PAGE_COPY_EXEC PAGE_EXEC | ||
136 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ | ||
137 | _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att) | ||
138 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | \ | ||
139 | _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att) | ||
140 | |||
141 | |||
142 | /* | ||
143 | * Aliases for mapping mmap() protection bits to page protections. | ||
144 | * These get used for static initialization, so using the _dflt_cache_att | ||
145 | * variable for the default cache attribute isn't workable. If the | ||
146 | * default gets changed at boot time, the boot option code has to | ||
147 | * update data structures like the protaction_map[] array. | ||
148 | */ | ||
149 | #define CACHEDEF (CACHE_DEFAULT << 6) | ||
150 | |||
151 | /* Private (copy-on-write) page protections. */ | ||
152 | #define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) | ||
153 | #define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF) | ||
154 | #define __P010 __P000 /* Write-only copy-on-write */ | ||
155 | #define __P011 __P001 /* Read/Write copy-on-write */ | ||
156 | #define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
157 | _PAGE_EXECUTE | CACHEDEF) | ||
158 | #define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \ | ||
159 | _PAGE_READ | CACHEDEF) | ||
160 | #define __P110 __P100 /* Write/execute copy-on-write */ | ||
161 | #define __P111 __P101 /* Read/Write/Execute, copy-on-write */ | ||
162 | |||
163 | /* Shared page protections. */ | ||
164 | #define __S000 __P000 | ||
165 | #define __S001 __P001 | ||
166 | #define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
167 | _PAGE_WRITE | CACHEDEF) | ||
168 | #define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ | ||
169 | _PAGE_WRITE | CACHEDEF) | ||
170 | #define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
171 | _PAGE_EXECUTE | CACHEDEF) | ||
172 | #define __S101 __P101 | ||
173 | #define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
174 | _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) | ||
175 | #define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ | ||
176 | _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) | ||
177 | |||
178 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ | ||
179 | |||
180 | /* Seems to be zero even in architectures where the zero page is firewalled? */ | ||
181 | #define FIRST_USER_ADDRESS 0 | ||
182 | #define pte_special(pte) 0 | ||
183 | #define pte_mkspecial(pte) (pte) | ||
184 | |||
185 | /* HUGETLB not working currently */ | ||
186 | #ifdef CONFIG_HUGETLB_PAGE | ||
187 | #define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE) | ||
188 | #endif | ||
189 | |||
190 | /* | ||
191 | * For now, assume that higher-level code will do TLB/MMU invalidations | ||
192 | * and don't insert that overhead into this low-level function. | ||
193 | */ | ||
194 | extern void sync_icache_dcache(pte_t pte); | ||
195 | |||
196 | #define pte_present_exec_user(pte) \ | ||
197 | ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \ | ||
198 | (_PAGE_EXECUTE | _PAGE_USER)) | ||
199 | |||
200 | static inline void set_pte(pte_t *ptep, pte_t pteval) | ||
201 | { | ||
202 | /* should really be using pte_exec, if it weren't declared later. */ | ||
203 | if (pte_present_exec_user(pteval)) | ||
204 | sync_icache_dcache(pteval); | ||
205 | |||
206 | *ptep = pteval; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid | ||
211 | * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE | ||
212 | * (Linux PTE), the key is to have bits 11..9 all zero. We'd use 0x7 | ||
213 | * as a universal null entry, but some of those least significant bits | ||
214 | * are interpreted by software. | ||
215 | */ | ||
216 | #define _NULL_PMD 0x7 | ||
217 | #define _NULL_PTE 0x0 | ||
218 | |||
219 | static inline void pmd_clear(pmd_t *pmd_entry_ptr) | ||
220 | { | ||
221 | pmd_val(*pmd_entry_ptr) = _NULL_PMD; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Conveniently, a null PTE value is invalid. | ||
226 | */ | ||
227 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | ||
228 | pte_t *ptep) | ||
229 | { | ||
230 | pte_val(*ptep) = _NULL_PTE; | ||
231 | } | ||
232 | |||
233 | #ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL | ||
234 | /** | ||
235 | * pmd_index - returns the index of the entry in the PMD page | ||
236 | * which would control the given virtual address | ||
237 | */ | ||
238 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
239 | |||
240 | #endif | ||
241 | |||
242 | /** | ||
243 | * pgd_index - returns the index of the entry in the PGD page | ||
244 | * which would control the given virtual address | ||
245 | * | ||
246 | * This returns the *index* for the address in the pgd_t | ||
247 | */ | ||
248 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
249 | |||
250 | /* | ||
251 | * pgd_offset - find an offset in a page-table-directory | ||
252 | */ | ||
253 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | ||
254 | |||
255 | /* | ||
256 | * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr | ||
257 | */ | ||
258 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
259 | |||
260 | /** | ||
261 | * pmd_none - check if pmd_entry is mapped | ||
262 | * @pmd_entry: pmd entry | ||
263 | * | ||
264 | * MIPS checks it against that "invalid pte table" thing. | ||
265 | */ | ||
266 | static inline int pmd_none(pmd_t pmd) | ||
267 | { | ||
268 | return pmd_val(pmd) == _NULL_PMD; | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * pmd_present - is there a page table behind this? | ||
273 | * Essentially the inverse of pmd_none. We maybe | ||
274 | * save an inline instruction by defining it this | ||
275 | * way, instead of simply "!pmd_none". | ||
276 | */ | ||
277 | static inline int pmd_present(pmd_t pmd) | ||
278 | { | ||
279 | return pmd_val(pmd) != (unsigned long)_NULL_PMD; | ||
280 | } | ||
281 | |||
282 | /** | ||
283 | * pmd_bad - check if a PMD entry is "bad". That might mean swapped out. | ||
284 | * As we have no known cause of badness, it's null, as it is for many | ||
285 | * architectures. | ||
286 | */ | ||
287 | static inline int pmd_bad(pmd_t pmd) | ||
288 | { | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * pmd_page - converts a PMD entry to a page pointer | ||
294 | */ | ||
295 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | ||
296 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
297 | |||
298 | /** | ||
299 | * pte_none - check if pte is mapped | ||
300 | * @pte: pte_t entry | ||
301 | */ | ||
302 | static inline int pte_none(pte_t pte) | ||
303 | { | ||
304 | return pte_val(pte) == _NULL_PTE; | ||
305 | }; | ||
306 | |||
307 | /* | ||
308 | * pte_present - check if page is present | ||
309 | */ | ||
310 | static inline int pte_present(pte_t pte) | ||
311 | { | ||
312 | return pte_val(pte) & _PAGE_PRESENT; | ||
313 | } | ||
314 | |||
315 | /* mk_pte - make a PTE out of a page pointer and protection bits */ | ||
316 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
317 | |||
318 | /* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */ | ||
319 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
320 | |||
321 | /* pte_mkold - mark PTE as not recently accessed */ | ||
322 | static inline pte_t pte_mkold(pte_t pte) | ||
323 | { | ||
324 | pte_val(pte) &= ~_PAGE_ACCESSED; | ||
325 | return pte; | ||
326 | } | ||
327 | |||
328 | /* pte_mkyoung - mark PTE as recently accessed */ | ||
329 | static inline pte_t pte_mkyoung(pte_t pte) | ||
330 | { | ||
331 | pte_val(pte) |= _PAGE_ACCESSED; | ||
332 | return pte; | ||
333 | } | ||
334 | |||
335 | /* pte_mkclean - mark page as in sync with backing store */ | ||
336 | static inline pte_t pte_mkclean(pte_t pte) | ||
337 | { | ||
338 | pte_val(pte) &= ~_PAGE_DIRTY; | ||
339 | return pte; | ||
340 | } | ||
341 | |||
342 | /* pte_mkdirty - mark page as modified */ | ||
343 | static inline pte_t pte_mkdirty(pte_t pte) | ||
344 | { | ||
345 | pte_val(pte) |= _PAGE_DIRTY; | ||
346 | return pte; | ||
347 | } | ||
348 | |||
349 | /* pte_young - "is PTE marked as accessed"? */ | ||
350 | static inline int pte_young(pte_t pte) | ||
351 | { | ||
352 | return pte_val(pte) & _PAGE_ACCESSED; | ||
353 | } | ||
354 | |||
355 | /* pte_dirty - "is PTE dirty?" */ | ||
356 | static inline int pte_dirty(pte_t pte) | ||
357 | { | ||
358 | return pte_val(pte) & _PAGE_DIRTY; | ||
359 | } | ||
360 | |||
361 | /* pte_modify - set protection bits on PTE */ | ||
362 | static inline pte_t pte_modify(pte_t pte, pgprot_t prot) | ||
363 | { | ||
364 | pte_val(pte) &= PAGE_MASK; | ||
365 | pte_val(pte) |= pgprot_val(prot); | ||
366 | return pte; | ||
367 | } | ||
368 | |||
369 | /* pte_wrprotect - mark page as not writable */ | ||
370 | static inline pte_t pte_wrprotect(pte_t pte) | ||
371 | { | ||
372 | pte_val(pte) &= ~_PAGE_WRITE; | ||
373 | return pte; | ||
374 | } | ||
375 | |||
376 | /* pte_mkwrite - mark page as writable */ | ||
377 | static inline pte_t pte_mkwrite(pte_t pte) | ||
378 | { | ||
379 | pte_val(pte) |= _PAGE_WRITE; | ||
380 | return pte; | ||
381 | } | ||
382 | |||
383 | /* pte_mkexec - mark PTE as executable */ | ||
384 | static inline pte_t pte_mkexec(pte_t pte) | ||
385 | { | ||
386 | pte_val(pte) |= _PAGE_EXECUTE; | ||
387 | return pte; | ||
388 | } | ||
389 | |||
390 | /* pte_read - "is PTE marked as readable?" */ | ||
391 | static inline int pte_read(pte_t pte) | ||
392 | { | ||
393 | return pte_val(pte) & _PAGE_READ; | ||
394 | } | ||
395 | |||
396 | /* pte_write - "is PTE marked as writable?" */ | ||
397 | static inline int pte_write(pte_t pte) | ||
398 | { | ||
399 | return pte_val(pte) & _PAGE_WRITE; | ||
400 | } | ||
401 | |||
402 | |||
403 | /* pte_exec - "is PTE marked as executable?" */ | ||
404 | static inline int pte_exec(pte_t pte) | ||
405 | { | ||
406 | return pte_val(pte) & _PAGE_EXECUTE; | ||
407 | } | ||
408 | |||
409 | /* __pte_to_swp_entry - extract swap entry from PTE */ | ||
410 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
411 | |||
412 | /* __swp_entry_to_pte - extract PTE from swap entry */ | ||
413 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
414 | |||
415 | /* pfn_pte - convert page number and protection value to page table entry */ | ||
416 | #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot)) | ||
417 | |||
418 | /* pte_pfn - convert pte to page frame number */ | ||
419 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
420 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) | ||
421 | |||
422 | /* | ||
423 | * set_pte_at - update page table and do whatever magic may be | ||
424 | * necessary to make the underlying hardware/firmware take note. | ||
425 | * | ||
426 | * VM may require a virtual instruction to alert the MMU. | ||
427 | */ | ||
428 | #define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte) | ||
429 | |||
430 | /* | ||
431 | * May need to invoke the virtual machine as well... | ||
432 | */ | ||
433 | #define pte_unmap(pte) do { } while (0) | ||
434 | #define pte_unmap_nested(pte) do { } while (0) | ||
435 | |||
436 | /* | ||
437 | * pte_offset_map - returns the linear address of the page table entry | ||
438 | * corresponding to an address | ||
439 | */ | ||
440 | #define pte_offset_map(dir, address) \ | ||
441 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | ||
442 | |||
443 | #define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr) | ||
444 | |||
445 | /* pte_offset_kernel - kernel version of pte_offset */ | ||
446 | #define pte_offset_kernel(dir, address) \ | ||
447 | ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \ | ||
448 | + __pte_offset(address)) | ||
449 | |||
450 | /* ZERO_PAGE - returns the globally shared zero page */ | ||
451 | #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) | ||
452 | |||
453 | #define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
454 | |||
455 | /* Nothing special about IO remapping at this point */ | ||
456 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
457 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
458 | |||
459 | /* I think this is in case we have page table caches; needed by init/main.c */ | ||
460 | #define pgtable_cache_init() do { } while (0) | ||
461 | |||
462 | /* | ||
463 | * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the | ||
464 | * PTE is interpreted as swap information. Depending on the _PAGE_FILE | ||
465 | * bit, the remaining free bits are eitehr interpreted as a file offset | ||
466 | * or a swap type/offset tuple. Rather than have the TLB fill handler | ||
467 | * test _PAGE_PRESENT, we're going to reserve the permissions bits | ||
468 | * and set them to all zeros for swap entries, which speeds up the | ||
469 | * miss handler at the cost of 3 bits of offset. That trade-off can | ||
470 | * be revisited if necessary, but Hexagon processor architecture and | ||
471 | * target applications suggest a lot of TLB misses and not much swap space. | ||
472 | * | ||
473 | * Format of swap PTE: | ||
474 | * bit 0: Present (zero) | ||
475 | * bit 1: _PAGE_FILE (zero) | ||
476 | * bits 2-6: swap type (arch independent layer uses 5 bits max) | ||
477 | * bits 7-9: bits 2:0 of offset | ||
478 | * bits 10-12: effectively _PAGE_PROTNONE (all zero) | ||
479 | * bits 13-31: bits 21:3 of swap offset | ||
480 | * | ||
481 | * Format of file PTE: | ||
482 | * bit 0: Present (zero) | ||
483 | * bit 1: _PAGE_FILE (zero) | ||
484 | * bits 2-9: bits 7:0 of offset | ||
485 | * bits 10-12: effectively _PAGE_PROTNONE (all zero) | ||
486 | * bits 13-31: bits 26:8 of swap offset | ||
487 | * | ||
488 | * The split offset makes some of the following macros a little gnarly, | ||
489 | * but there's plenty of precedent for this sort of thing. | ||
490 | */ | ||
491 | #define PTE_FILE_MAX_BITS 27 | ||
492 | |||
493 | /* Used for swap PTEs */ | ||
494 | #define __swp_type(swp_pte) (((swp_pte).val >> 2) & 0x1f) | ||
495 | |||
496 | #define __swp_offset(swp_pte) \ | ||
497 | ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x003ffff8)) | ||
498 | |||
499 | #define __swp_entry(type, offset) \ | ||
500 | ((swp_entry_t) { \ | ||
501 | ((type << 2) | \ | ||
502 | ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) }) | ||
503 | |||
504 | /* Used for file PTEs */ | ||
505 | #define pte_file(pte) \ | ||
506 | ((pte_val(pte) & (_PAGE_FILE | _PAGE_PRESENT)) == _PAGE_FILE) | ||
507 | |||
508 | #define pte_to_pgoff(pte) \ | ||
509 | (((pte_val(pte) >> 2) & 0xff) | ((pte_val(pte) >> 5) & 0x07ffff00)) | ||
510 | |||
511 | #define pgoff_to_pte(off) \ | ||
512 | ((pte_t) { ((((off) & 0x7ffff00) << 5) | (((off) & 0xff) << 2)\ | ||
513 | | _PAGE_FILE) }) | ||
514 | |||
515 | /* Oh boy. There are a lot of possible arch overrides found in this file. */ | ||
516 | #include <asm-generic/pgtable.h> | ||
517 | |||
518 | #endif | ||
diff --git a/arch/hexagon/mm/pgalloc.c b/arch/hexagon/mm/pgalloc.c new file mode 100644 index 000000000000..b175e2d42b89 --- /dev/null +++ b/arch/hexagon/mm/pgalloc.c | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
16 | * 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | |||
21 | void __init pgtable_cache_init(void) | ||
22 | { | ||
23 | } | ||