diff options
-rw-r--r-- | arch/sh/mm/Kconfig | 49 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 12 | ||||
-rw-r--r-- | include/asm-sh/elf.h | 2 | ||||
-rw-r--r-- | include/asm-sh/page.h | 35 | ||||
-rw-r--r-- | include/asm-sh/pgtable-2level.h | 70 | ||||
-rw-r--r-- | include/asm-sh/pgtable.h | 361 |
8 files changed, 376 insertions, 161 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 27463e26a7b8..88e9663fc9fc 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -235,13 +235,22 @@ config MEMORY_SIZE | |||
235 | 235 | ||
236 | config 32BIT | 236 | config 32BIT |
237 | bool "Support 32-bit physical addressing through PMB" | 237 | bool "Support 32-bit physical addressing through PMB" |
238 | depends on CPU_SH4A && MMU | 238 | depends on CPU_SH4A && MMU && (!X2TLB || BROKEN) |
239 | default y | 239 | default y |
240 | help | 240 | help |
241 | If you say Y here, physical addressing will be extended to | 241 | If you say Y here, physical addressing will be extended to |
242 | 32-bits through the SH-4A PMB. If this is not set, legacy | 242 | 32-bits through the SH-4A PMB. If this is not set, legacy |
243 | 29-bit physical addressing will be used. | 243 | 29-bit physical addressing will be used. |
244 | 244 | ||
245 | config X2TLB | ||
246 | bool "Enable extended TLB mode" | ||
247 | depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL | ||
248 | help | ||
249 | Selecting this option will enable the extended mode of the SH-X2 | ||
250 | TLB. For legacy SH-X behaviour and interoperability, say N. For | ||
251 | all of the fun new features and a willingless to submit bug reports, | ||
252 | say Y. | ||
253 | |||
245 | config VSYSCALL | 254 | config VSYSCALL |
246 | bool "Support vsyscall page" | 255 | bool "Support vsyscall page" |
247 | depends on MMU | 256 | depends on MMU |
@@ -256,16 +265,52 @@ config VSYSCALL | |||
256 | (the default value) say Y. | 265 | (the default value) say Y. |
257 | 266 | ||
258 | choice | 267 | choice |
268 | prompt "Kernel page size" | ||
269 | default PAGE_SIZE_4KB | ||
270 | |||
271 | config PAGE_SIZE_4KB | ||
272 | bool "4kB" | ||
273 | help | ||
274 | This is the default page size used by all SuperH CPUs. | ||
275 | |||
276 | config PAGE_SIZE_8KB | ||
277 | bool "8kB" | ||
278 | depends on EXPERIMENTAL && X2TLB | ||
279 | help | ||
280 | This enables 8kB pages as supported by SH-X2 and later MMUs. | ||
281 | |||
282 | config PAGE_SIZE_64KB | ||
283 | bool "64kB" | ||
284 | depends on EXPERIMENTAL && CPU_SH4 | ||
285 | help | ||
286 | This enables support for 64kB pages, possible on all SH-4 | ||
287 | CPUs and later. Highly experimental, not recommended. | ||
288 | |||
289 | endchoice | ||
290 | |||
291 | choice | ||
259 | prompt "HugeTLB page size" | 292 | prompt "HugeTLB page size" |
260 | depends on HUGETLB_PAGE && CPU_SH4 && MMU | 293 | depends on HUGETLB_PAGE && CPU_SH4 && MMU |
261 | default HUGETLB_PAGE_SIZE_64K | 294 | default HUGETLB_PAGE_SIZE_64K |
262 | 295 | ||
263 | config HUGETLB_PAGE_SIZE_64K | 296 | config HUGETLB_PAGE_SIZE_64K |
264 | bool "64K" | 297 | bool "64kB" |
298 | |||
299 | config HUGETLB_PAGE_SIZE_256K | ||
300 | bool "256kB" | ||
301 | depends on X2TLB | ||
265 | 302 | ||
266 | config HUGETLB_PAGE_SIZE_1MB | 303 | config HUGETLB_PAGE_SIZE_1MB |
267 | bool "1MB" | 304 | bool "1MB" |
268 | 305 | ||
306 | config HUGETLB_PAGE_SIZE_4MB | ||
307 | bool "4MB" | ||
308 | depends on X2TLB | ||
309 | |||
310 | config HUGETLB_PAGE_SIZE_64MB | ||
311 | bool "64MB" | ||
312 | depends on X2TLB | ||
313 | |||
269 | endchoice | 314 | endchoice |
270 | 315 | ||
271 | source "mm/Kconfig" | 316 | source "mm/Kconfig" |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 7154d1ce9785..8b275166f400 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -93,7 +93,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
93 | pud = pud_offset(pgd, addr); | 93 | pud = pud_offset(pgd, addr); |
94 | if (pud_none(*pud)) { | 94 | if (pud_none(*pud)) { |
95 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); | 95 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); |
96 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); | 96 | set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); |
97 | if (pmd != pmd_offset(pud, 0)) { | 97 | if (pmd != pmd_offset(pud, 0)) { |
98 | pud_ERROR(*pud); | 98 | pud_ERROR(*pud); |
99 | return; | 99 | return; |
@@ -103,7 +103,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
103 | pmd = pmd_offset(pud, addr); | 103 | pmd = pmd_offset(pud, addr); |
104 | if (pmd_none(*pmd)) { | 104 | if (pmd_none(*pmd)) { |
105 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | 105 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); |
106 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | 106 | set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); |
107 | if (pte != pte_offset_kernel(pmd, 0)) { | 107 | if (pte != pte_offset_kernel(pmd, 0)) { |
108 | pmd_ERROR(*pmd); | 108 | pmd_ERROR(*pmd); |
109 | return; | 109 | return; |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a9fe80cfc233..11d54c149821 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -28,9 +28,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, | |||
28 | { | 28 | { |
29 | unsigned long end; | 29 | unsigned long end; |
30 | unsigned long pfn; | 30 | unsigned long pfn; |
31 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | | 31 | pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); |
32 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
33 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); | ||
34 | 32 | ||
35 | address &= ~PMD_MASK; | 33 | address &= ~PMD_MASK; |
36 | end = address + size; | 34 | end = address + size; |
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 07371ed7a313..e973ac3b13be 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c | |||
@@ -37,10 +37,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
37 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 37 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
38 | clear_page(to); | 38 | clear_page(to); |
39 | else { | 39 | else { |
40 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | | ||
41 | _PAGE_RW | _PAGE_CACHABLE | | ||
42 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
43 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); | ||
44 | unsigned long phys_addr = PHYSADDR(to); | 40 | unsigned long phys_addr = PHYSADDR(to); |
45 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); | 41 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); |
46 | pgd_t *pgd = pgd_offset_k(p3_addr); | 42 | pgd_t *pgd = pgd_offset_k(p3_addr); |
@@ -50,7 +46,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
50 | pte_t entry; | 46 | pte_t entry; |
51 | unsigned long flags; | 47 | unsigned long flags; |
52 | 48 | ||
53 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); | 49 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); |
54 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); | 50 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); |
55 | set_pte(pte, entry); | 51 | set_pte(pte, entry); |
56 | local_irq_save(flags); | 52 | local_irq_save(flags); |
@@ -77,10 +73,6 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
77 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 73 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
78 | copy_page(to, from); | 74 | copy_page(to, from); |
79 | else { | 75 | else { |
80 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | | ||
81 | _PAGE_RW | _PAGE_CACHABLE | | ||
82 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
83 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); | ||
84 | unsigned long phys_addr = PHYSADDR(to); | 76 | unsigned long phys_addr = PHYSADDR(to); |
85 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); | 77 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); |
86 | pgd_t *pgd = pgd_offset_k(p3_addr); | 78 | pgd_t *pgd = pgd_offset_k(p3_addr); |
@@ -90,7 +82,7 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
90 | pte_t entry; | 82 | pte_t entry; |
91 | unsigned long flags; | 83 | unsigned long flags; |
92 | 84 | ||
93 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); | 85 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); |
94 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); | 86 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); |
95 | set_pte(pte, entry); | 87 | set_pte(pte, entry); |
96 | local_irq_save(flags); | 88 | local_irq_save(flags); |
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h index fc050fd7645e..43ca244564b1 100644 --- a/include/asm-sh/elf.h +++ b/include/asm-sh/elf.h | |||
@@ -74,7 +74,7 @@ typedef struct user_fpu_struct elf_fpregset_t; | |||
74 | #define ELF_ARCH EM_SH | 74 | #define ELF_ARCH EM_SH |
75 | 75 | ||
76 | #define USE_ELF_CORE_DUMP | 76 | #define USE_ELF_CORE_DUMP |
77 | #define ELF_EXEC_PAGESIZE 4096 | 77 | #define ELF_EXEC_PAGESIZE PAGE_SIZE |
78 | 78 | ||
79 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | 79 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical |
80 | use of this is to invoke "./ld.so someprog" to test out a new version of | 80 | use of this is to invoke "./ld.so someprog" to test out a new version of |
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index ca8b26d90475..380fd62dd05a 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h | |||
@@ -13,9 +13,16 @@ | |||
13 | [ P4 control ] 0xE0000000 | 13 | [ P4 control ] 0xE0000000 |
14 | */ | 14 | */ |
15 | 15 | ||
16 | |||
17 | /* PAGE_SHIFT determines the page size */ | 16 | /* PAGE_SHIFT determines the page size */ |
18 | #define PAGE_SHIFT 12 | 17 | #if defined(CONFIG_PAGE_SIZE_4KB) |
18 | # define PAGE_SHIFT 12 | ||
19 | #elif defined(CONFIG_PAGE_SIZE_8KB) | ||
20 | # define PAGE_SHIFT 13 | ||
21 | #elif defined(CONFIG_PAGE_SIZE_64KB) | ||
22 | # define PAGE_SHIFT 16 | ||
23 | #else | ||
24 | # error "Bogus kernel page size?" | ||
25 | #endif | ||
19 | 26 | ||
20 | #ifdef __ASSEMBLY__ | 27 | #ifdef __ASSEMBLY__ |
21 | #define PAGE_SIZE (1 << PAGE_SHIFT) | 28 | #define PAGE_SIZE (1 << PAGE_SHIFT) |
@@ -28,8 +35,14 @@ | |||
28 | 35 | ||
29 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 36 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
30 | #define HPAGE_SHIFT 16 | 37 | #define HPAGE_SHIFT 16 |
38 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
39 | #define HPAGE_SHIFT 18 | ||
31 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | 40 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
32 | #define HPAGE_SHIFT 20 | 41 | #define HPAGE_SHIFT 20 |
42 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
43 | #define HPAGE_SHIFT 22 | ||
44 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) | ||
45 | #define HPAGE_SHIFT 26 | ||
33 | #endif | 46 | #endif |
34 | 47 | ||
35 | #ifdef CONFIG_HUGETLB_PAGE | 48 | #ifdef CONFIG_HUGETLB_PAGE |
@@ -69,15 +82,25 @@ extern void __copy_user_page(void *to, void *from, void *orig_to); | |||
69 | /* | 82 | /* |
70 | * These are used to make use of C type-checking.. | 83 | * These are used to make use of C type-checking.. |
71 | */ | 84 | */ |
72 | typedef struct { unsigned long pte; } pte_t; | 85 | #ifdef CONFIG_X2TLB |
73 | typedef struct { unsigned long pgd; } pgd_t; | 86 | typedef struct { unsigned long pte_low, pte_high; } pte_t; |
87 | typedef struct { unsigned long long pgprot; } pgprot_t; | ||
88 | #define pte_val(x) \ | ||
89 | ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) | ||
90 | #define __pte(x) \ | ||
91 | ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) | ||
92 | #else | ||
93 | typedef struct { unsigned long pte_low; } pte_t; | ||
74 | typedef struct { unsigned long pgprot; } pgprot_t; | 94 | typedef struct { unsigned long pgprot; } pgprot_t; |
95 | #define pte_val(x) ((x).pte_low) | ||
96 | #define __pte(x) ((pte_t) { (x) } ) | ||
97 | #endif | ||
98 | |||
99 | typedef struct { unsigned long pgd; } pgd_t; | ||
75 | 100 | ||
76 | #define pte_val(x) ((x).pte) | ||
77 | #define pgd_val(x) ((x).pgd) | 101 | #define pgd_val(x) ((x).pgd) |
78 | #define pgprot_val(x) ((x).pgprot) | 102 | #define pgprot_val(x) ((x).pgprot) |
79 | 103 | ||
80 | #define __pte(x) ((pte_t) { (x) } ) | ||
81 | #define __pgd(x) ((pgd_t) { (x) } ) | 104 | #define __pgd(x) ((pgd_t) { (x) } ) |
82 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 105 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
83 | 106 | ||
diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h deleted file mode 100644 index b525db6f61c6..000000000000 --- a/include/asm-sh/pgtable-2level.h +++ /dev/null | |||
@@ -1,70 +0,0 @@ | |||
1 | #ifndef __ASM_SH_PGTABLE_2LEVEL_H | ||
2 | #define __ASM_SH_PGTABLE_2LEVEL_H | ||
3 | |||
4 | /* | ||
5 | * traditional two-level paging structure: | ||
6 | */ | ||
7 | |||
8 | #define PGDIR_SHIFT 22 | ||
9 | #define PTRS_PER_PGD 1024 | ||
10 | |||
11 | /* | ||
12 | * this is two-level, so we don't really have any | ||
13 | * PMD directory physically. | ||
14 | */ | ||
15 | #define PMD_SHIFT 22 | ||
16 | #define PTRS_PER_PMD 1 | ||
17 | |||
18 | #define PTRS_PER_PTE 1024 | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | ||
21 | #define pte_ERROR(e) \ | ||
22 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
23 | #define pmd_ERROR(e) \ | ||
24 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | ||
25 | #define pgd_ERROR(e) \ | ||
26 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
27 | |||
28 | /* | ||
29 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
30 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
31 | * into the pgd entry) | ||
32 | */ | ||
33 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
34 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
35 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
36 | static inline void pgd_clear (pgd_t * pgdp) { } | ||
37 | |||
38 | /* | ||
39 | * Certain architectures need to do special things when PTEs | ||
40 | * within a page table are directly modified. Thus, the following | ||
41 | * hook is made available. | ||
42 | */ | ||
43 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | ||
44 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
45 | |||
46 | /* | ||
47 | * (pmds are folded into pgds so this doesn't get actually called, | ||
48 | * but the define is needed for a generic inline function.) | ||
49 | */ | ||
50 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | ||
51 | #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) | ||
52 | |||
53 | #define pgd_page_vaddr(pgd) \ | ||
54 | ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) | ||
55 | |||
56 | #define pgd_page(pgd) \ | ||
57 | (phys_to_page(pgd_val(pgd))) | ||
58 | |||
59 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | ||
60 | { | ||
61 | return (pmd_t *) dir; | ||
62 | } | ||
63 | |||
64 | #define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) | ||
65 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
66 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
67 | |||
68 | #endif /* !__ASSEMBLY__ */ | ||
69 | |||
70 | #endif /* __ASM_SH_PGTABLE_2LEVEL_H */ | ||
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index 2c8682ad1012..22c3d0b3e11a 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h | |||
@@ -15,15 +15,10 @@ | |||
15 | #include <asm-generic/pgtable-nopmd.h> | 15 | #include <asm-generic/pgtable-nopmd.h> |
16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
17 | 17 | ||
18 | #define PTRS_PER_PGD 1024 | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
21 | #include <asm/addrspace.h> | 19 | #include <asm/addrspace.h> |
22 | #include <asm/fixmap.h> | 20 | #include <asm/fixmap.h> |
23 | 21 | ||
24 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
25 | extern void paging_init(void); | ||
26 | |||
27 | /* | 22 | /* |
28 | * ZERO_PAGE is a global shared page that is always zero: used | 23 | * ZERO_PAGE is a global shared page that is always zero: used |
29 | * for zero-mapped memory areas etc.. | 24 | * for zero-mapped memory areas etc.. |
@@ -33,15 +28,28 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
33 | 28 | ||
34 | #endif /* !__ASSEMBLY__ */ | 29 | #endif /* !__ASSEMBLY__ */ |
35 | 30 | ||
36 | /* traditional two-level paging structure */ | 31 | /* |
37 | #define PGDIR_SHIFT 22 | 32 | * traditional two-level paging structure |
38 | #define PTRS_PER_PMD 1 | 33 | */ |
39 | #define PTRS_PER_PTE 1024 | 34 | /* PTE bits */ |
40 | #define PMD_SIZE (1UL << PMD_SHIFT) | 35 | #ifdef CONFIG_X2TLB |
41 | #define PMD_MASK (~(PMD_SIZE-1)) | 36 | # define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ |
37 | #else | ||
38 | # define PTE_MAGNITUDE 2 /* 32-bit PTEs */ | ||
39 | #endif | ||
40 | #define PTE_SHIFT PAGE_SHIFT | ||
41 | #define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) | ||
42 | |||
43 | /* PGD bits */ | ||
44 | #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) | ||
45 | #define PGDIR_BITS (32 - PGDIR_SHIFT) | ||
42 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 46 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
43 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 47 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
44 | 48 | ||
49 | /* Entries per level */ | ||
50 | #define PTRS_PER_PTE (1UL << PTE_BITS) | ||
51 | #define PTRS_PER_PGD (1UL << PGDIR_BITS) | ||
52 | |||
45 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | 53 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
46 | #define FIRST_USER_ADDRESS 0 | 54 | #define FIRST_USER_ADDRESS 0 |
47 | 55 | ||
@@ -57,7 +65,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
57 | /* | 65 | /* |
58 | * Linux PTEL encoding. | 66 | * Linux PTEL encoding. |
59 | * | 67 | * |
60 | * Hardware and software bit definitions for the PTEL value: | 68 | * Hardware and software bit definitions for the PTEL value (see below for |
69 | * notes on SH-X2 MMUs and 64-bit PTEs): | ||
61 | * | 70 | * |
62 | * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4). | 71 | * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4). |
63 | * | 72 | * |
@@ -76,20 +85,57 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
76 | * | 85 | * |
77 | * - Bits 31, 30, and 29 remain unused by everyone and can be used for future | 86 | * - Bits 31, 30, and 29 remain unused by everyone and can be used for future |
78 | * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS. | 87 | * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS. |
88 | * | ||
89 | * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day. | ||
90 | * | ||
91 | * SH-X2 MMUs and extended PTEs | ||
92 | * | ||
93 | * SH-X2 supports an extended mode TLB with split data arrays due to the | ||
94 | * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and | ||
95 | * SZ bit placeholders still exist in data array 1, but are implemented as | ||
96 | * reserved bits, with the real logic existing in data array 2. | ||
97 | * | ||
98 | * The downside to this is that we can no longer fit everything in to a 32-bit | ||
99 | * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus | ||
100 | * side, this gives us quite a few spare bits to play with for future usage. | ||
79 | */ | 101 | */ |
102 | /* Legacy and compat mode bits */ | ||
80 | #define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */ | 103 | #define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */ |
81 | #define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */ | 104 | #define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */ |
82 | #define _PAGE_DIRTY 0x004 /* D-bit : page changed */ | 105 | #define _PAGE_DIRTY 0x004 /* D-bit : page changed */ |
83 | #define _PAGE_CACHABLE 0x008 /* C-bit : cachable */ | 106 | #define _PAGE_CACHABLE 0x008 /* C-bit : cachable */ |
84 | #define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ | 107 | #ifndef CONFIG_X2TLB |
85 | #define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ | 108 | # define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ |
86 | #define _PAGE_USER 0x040 /* PR1-bit : user space access allowed */ | 109 | # define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ |
87 | #define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ | 110 | # define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/ |
111 | # define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ | ||
112 | #endif | ||
88 | #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ | 113 | #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ |
89 | #define _PAGE_PROTNONE 0x200 /* software: if not present */ | 114 | #define _PAGE_PROTNONE 0x200 /* software: if not present */ |
90 | #define _PAGE_ACCESSED 0x400 /* software: page referenced */ | 115 | #define _PAGE_ACCESSED 0x400 /* software: page referenced */ |
91 | #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ | 116 | #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ |
92 | 117 | ||
118 | /* Extended mode bits */ | ||
119 | #define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */ | ||
120 | #define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */ | ||
121 | #define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */ | ||
122 | #define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */ | ||
123 | |||
124 | #define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */ | ||
125 | #define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */ | ||
126 | #define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */ | ||
127 | |||
128 | #define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */ | ||
129 | #define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ | ||
130 | #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ | ||
131 | |||
132 | /* Wrapper for extended mode pgprot twiddling */ | ||
133 | #ifdef CONFIG_X2TLB | ||
134 | # define _PAGE_EXT(x) ((unsigned long long)(x) << 32) | ||
135 | #else | ||
136 | # define _PAGE_EXT(x) (0) | ||
137 | #endif | ||
138 | |||
93 | /* software: moves to PTEA.TC (Timing Control) */ | 139 | /* software: moves to PTEA.TC (Timing Control) */ |
94 | #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ | 140 | #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ |
95 | #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ | 141 | #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ |
@@ -114,37 +160,165 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
114 | 160 | ||
115 | #define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS)) | 161 | #define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS)) |
116 | 162 | ||
117 | /* Hardware flags: SZ0=1 (4k-byte) */ | 163 | /* Hardware flags, page size encoding */ |
118 | #define _PAGE_FLAGS_HARD _PAGE_SZ0 | 164 | #if defined(CONFIG_X2TLB) |
165 | # if defined(CONFIG_PAGE_SIZE_4KB) | ||
166 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0) | ||
167 | # elif defined(CONFIG_PAGE_SIZE_8KB) | ||
168 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1) | ||
169 | # elif defined(CONFIG_PAGE_SIZE_64KB) | ||
170 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2) | ||
171 | # endif | ||
172 | #else | ||
173 | # if defined(CONFIG_PAGE_SIZE_4KB) | ||
174 | # define _PAGE_FLAGS_HARD _PAGE_SZ0 | ||
175 | # elif defined(CONFIG_PAGE_SIZE_64KB) | ||
176 | # define _PAGE_FLAGS_HARD _PAGE_SZ1 | ||
177 | # endif | ||
178 | #endif | ||
119 | 179 | ||
120 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 180 | #if defined(CONFIG_X2TLB) |
121 | #define _PAGE_SZHUGE (_PAGE_SZ1) | 181 | # if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
122 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | 182 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ2) |
123 | #define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) | 183 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) |
184 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2) | ||
185 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | ||
186 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2) | ||
187 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
188 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ3) | ||
189 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) | ||
190 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3) | ||
191 | # endif | ||
192 | #else | ||
193 | # if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
194 | # define _PAGE_SZHUGE (_PAGE_SZ1) | ||
195 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | ||
196 | # define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) | ||
197 | # endif | ||
124 | #endif | 198 | #endif |
125 | 199 | ||
126 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 200 | #define _PAGE_CHG_MASK \ |
127 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 201 | (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) |
128 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) | ||
129 | 202 | ||
130 | #ifndef __ASSEMBLY__ | 203 | #ifndef __ASSEMBLY__ |
131 | 204 | ||
132 | #ifdef CONFIG_MMU | 205 | #if defined(CONFIG_X2TLB) /* SH-X2 TLB */ |
133 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) | 206 | #define _PAGE_TABLE \ |
134 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) | 207 | (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
135 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | 208 | _PAGE_EXT(_PAGE_EXT_USER_READ | _PAGE_EXT_USER_WRITE)) |
136 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | 209 | |
137 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | 210 | #define _KERNPG_TABLE \ |
211 | (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \ | ||
212 | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_KERN_WRITE)) | ||
213 | |||
214 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ | ||
215 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | ||
216 | |||
217 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
218 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
219 | _PAGE_EXT(_PAGE_EXT_USER_READ | \ | ||
220 | _PAGE_EXT_USER_WRITE)) | ||
221 | |||
222 | #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
223 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
224 | _PAGE_EXT(_PAGE_EXT_USER_EXEC | \ | ||
225 | _PAGE_EXT_USER_READ)) | ||
226 | |||
227 | #define PAGE_COPY PAGE_EXECREAD | ||
228 | |||
229 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
230 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
231 | _PAGE_EXT(_PAGE_EXT_USER_READ)) | ||
232 | |||
233 | #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
234 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
235 | _PAGE_EXT(_PAGE_EXT_USER_WRITE)) | ||
236 | |||
237 | #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
238 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
239 | _PAGE_EXT(_PAGE_EXT_USER_WRITE | \ | ||
240 | _PAGE_EXT_USER_READ | \ | ||
241 | _PAGE_EXT_USER_EXEC)) | ||
242 | |||
243 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ | ||
244 | _PAGE_DIRTY | _PAGE_ACCESSED | \ | ||
245 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ | ||
246 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ | ||
247 | _PAGE_EXT_KERN_WRITE | \ | ||
248 | _PAGE_EXT_KERN_EXEC)) | ||
249 | |||
250 | #define PAGE_KERNEL_NOCACHE \ | ||
251 | __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ | ||
252 | _PAGE_ACCESSED | _PAGE_HW_SHARED | \ | ||
253 | _PAGE_FLAGS_HARD | \ | ||
254 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ | ||
255 | _PAGE_EXT_KERN_WRITE | \ | ||
256 | _PAGE_EXT_KERN_EXEC)) | ||
257 | |||
258 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ | ||
259 | _PAGE_DIRTY | _PAGE_ACCESSED | \ | ||
260 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ | ||
261 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ | ||
262 | _PAGE_EXT_KERN_EXEC)) | ||
263 | |||
264 | #define PAGE_KERNEL_PCC(slot, type) \ | ||
265 | __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ | ||
266 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ | ||
267 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ | ||
268 | _PAGE_EXT_KERN_WRITE | \ | ||
269 | _PAGE_EXT_KERN_EXEC) \ | ||
270 | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ | ||
271 | (type)) | ||
272 | |||
273 | #elif defined(CONFIG_MMU) /* SH-X TLB */ | ||
274 | #define _PAGE_TABLE \ | ||
275 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
276 | #define _KERNPG_TABLE \ | ||
277 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
278 | |||
279 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ | ||
280 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | ||
281 | |||
282 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ | ||
283 | _PAGE_CACHABLE | _PAGE_ACCESSED | \ | ||
284 | _PAGE_FLAGS_HARD) | ||
285 | |||
286 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ | ||
287 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | ||
288 | |||
289 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ | ||
290 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | ||
291 | |||
292 | #define PAGE_EXECREAD PAGE_READONLY | ||
293 | #define PAGE_RWX PAGE_SHARED | ||
294 | #define PAGE_WRITEONLY PAGE_SHARED | ||
295 | |||
296 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \ | ||
297 | _PAGE_DIRTY | _PAGE_ACCESSED | \ | ||
298 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | ||
299 | |||
138 | #define PAGE_KERNEL_NOCACHE \ | 300 | #define PAGE_KERNEL_NOCACHE \ |
139 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | 301 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ |
140 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | 302 | _PAGE_ACCESSED | _PAGE_HW_SHARED | \ |
303 | _PAGE_FLAGS_HARD) | ||
304 | |||
305 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ | ||
306 | _PAGE_DIRTY | _PAGE_ACCESSED | \ | ||
307 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | ||
308 | |||
141 | #define PAGE_KERNEL_PCC(slot, type) \ | 309 | #define PAGE_KERNEL_PCC(slot, type) \ |
142 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | (type)) | 310 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ |
311 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ | ||
312 | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ | ||
313 | (type)) | ||
143 | #else /* no mmu */ | 314 | #else /* no mmu */ |
144 | #define PAGE_NONE __pgprot(0) | 315 | #define PAGE_NONE __pgprot(0) |
145 | #define PAGE_SHARED __pgprot(0) | 316 | #define PAGE_SHARED __pgprot(0) |
146 | #define PAGE_COPY __pgprot(0) | 317 | #define PAGE_COPY __pgprot(0) |
318 | #define PAGE_EXECREAD __pgprot(0) | ||
319 | #define PAGE_RWX __pgprot(0) | ||
147 | #define PAGE_READONLY __pgprot(0) | 320 | #define PAGE_READONLY __pgprot(0) |
321 | #define PAGE_WRITEONLY __pgprot(0) | ||
148 | #define PAGE_KERNEL __pgprot(0) | 322 | #define PAGE_KERNEL __pgprot(0) |
149 | #define PAGE_KERNEL_NOCACHE __pgprot(0) | 323 | #define PAGE_KERNEL_NOCACHE __pgprot(0) |
150 | #define PAGE_KERNEL_RO __pgprot(0) | 324 | #define PAGE_KERNEL_RO __pgprot(0) |
@@ -154,27 +328,32 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
154 | #endif /* __ASSEMBLY__ */ | 328 | #endif /* __ASSEMBLY__ */ |
155 | 329 | ||
156 | /* | 330 | /* |
157 | * As i386 and MIPS, SuperH can't do page protection for execute, and | 331 | * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page |
158 | * considers that the same as a read. Also, write permissions imply | 332 | * protection for execute, and considers it the same as a read. Also, write |
159 | * read permissions. This is the closest we can get.. | 333 | * permission implies read permission. This is the closest we can get.. |
334 | * | ||
335 | * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, | ||
336 | * not only supporting separate execute, read, and write bits, but having | ||
337 | * completely separate permission bits for user and kernel space. | ||
160 | */ | 338 | */ |
339 | /*xwr*/ | ||
161 | #define __P000 PAGE_NONE | 340 | #define __P000 PAGE_NONE |
162 | #define __P001 PAGE_READONLY | 341 | #define __P001 PAGE_READONLY |
163 | #define __P010 PAGE_COPY | 342 | #define __P010 PAGE_COPY |
164 | #define __P011 PAGE_COPY | 343 | #define __P011 PAGE_COPY |
165 | #define __P100 PAGE_READONLY | 344 | #define __P100 PAGE_EXECREAD |
166 | #define __P101 PAGE_READONLY | 345 | #define __P101 PAGE_EXECREAD |
167 | #define __P110 PAGE_COPY | 346 | #define __P110 PAGE_COPY |
168 | #define __P111 PAGE_COPY | 347 | #define __P111 PAGE_COPY |
169 | 348 | ||
170 | #define __S000 PAGE_NONE | 349 | #define __S000 PAGE_NONE |
171 | #define __S001 PAGE_READONLY | 350 | #define __S001 PAGE_READONLY |
172 | #define __S010 PAGE_SHARED | 351 | #define __S010 PAGE_WRITEONLY |
173 | #define __S011 PAGE_SHARED | 352 | #define __S011 PAGE_SHARED |
174 | #define __S100 PAGE_READONLY | 353 | #define __S100 PAGE_EXECREAD |
175 | #define __S101 PAGE_READONLY | 354 | #define __S101 PAGE_EXECREAD |
176 | #define __S110 PAGE_SHARED | 355 | #define __S110 PAGE_RWX |
177 | #define __S111 PAGE_SHARED | 356 | #define __S111 PAGE_RWX |
178 | 357 | ||
179 | #ifndef __ASSEMBLY__ | 358 | #ifndef __ASSEMBLY__ |
180 | 359 | ||
@@ -183,7 +362,17 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
183 | * within a page table are directly modified. Thus, the following | 362 | * within a page table are directly modified. Thus, the following |
184 | * hook is made available. | 363 | * hook is made available. |
185 | */ | 364 | */ |
365 | #ifdef CONFIG_X2TLB | ||
366 | static inline void set_pte(pte_t *ptep, pte_t pte) | ||
367 | { | ||
368 | ptep->pte_high = pte.pte_high; | ||
369 | smp_wmb(); | ||
370 | ptep->pte_low = pte.pte_low; | ||
371 | } | ||
372 | #else | ||
186 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | 373 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) |
374 | #endif | ||
375 | |||
187 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 376 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
188 | 377 | ||
189 | /* | 378 | /* |
@@ -192,13 +381,13 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
192 | */ | 381 | */ |
193 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | 382 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
194 | 383 | ||
195 | #define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) | 384 | #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) |
196 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 385 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
197 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 386 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
198 | 387 | ||
199 | #define pte_none(x) (!pte_val(x)) | 388 | #define pte_none(x) (!pte_val(x)) |
200 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 389 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
201 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | 390 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) |
202 | 391 | ||
203 | #define pmd_none(x) (!pmd_val(x)) | 392 | #define pmd_none(x) (!pmd_val(x)) |
204 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 393 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
@@ -212,28 +401,52 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
212 | * The following only work if pte_present() is true. | 401 | * The following only work if pte_present() is true. |
213 | * Undefined behaviour if not.. | 402 | * Undefined behaviour if not.. |
214 | */ | 403 | */ |
215 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | 404 | #define pte_not_present(pte) (!(pte_val(pte) & _PAGE_PRESENT)) |
216 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | 405 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) |
217 | static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } | 406 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) |
218 | static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } | 407 | #define pte_file(pte) (pte_val(pte) & _PAGE_FILE) |
219 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 408 | |
220 | static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; } | 409 | #ifdef CONFIG_X2TLB |
221 | static inline int pte_not_present(pte_t pte){ return !(pte_val(pte) & _PAGE_PRESENT); } | 410 | #define pte_read(pte) ((pte).pte_high & _PAGE_EXT_USER_READ) |
222 | 411 | #define pte_exec(pte) ((pte).pte_high & _PAGE_EXT_USER_EXEC) | |
223 | static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } | 412 | #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) |
224 | static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } | 413 | #else |
225 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | 414 | #define pte_read(pte) (pte_val(pte) & _PAGE_USER) |
226 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | 415 | #define pte_exec(pte) (pte_val(pte) & _PAGE_USER) |
227 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } | 416 | #define pte_write(pte) (pte_val(pte) & _PAGE_RW) |
228 | static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } | ||
229 | static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } | ||
230 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | ||
231 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | ||
232 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } | ||
233 | #ifdef CONFIG_HUGETLB_PAGE | ||
234 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } | ||
235 | #endif | 417 | #endif |
236 | 418 | ||
419 | #define PTE_BIT_FUNC(h,fn,op) \ | ||
420 | static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } | ||
421 | |||
422 | #ifdef CONFIG_X2TLB | ||
423 | /* | ||
424 | * We cheat a bit in the SH-X2 TLB case. As the permission bits are | ||
425 | * individually toggled (and user permissions are entirely decoupled from | ||
426 | * kernel permissions), we attempt to couple them a bit more sanely here. | ||
427 | */ | ||
428 | PTE_BIT_FUNC(high, rdprotect, &= ~_PAGE_EXT_USER_READ); | ||
429 | PTE_BIT_FUNC(high, mkread, |= _PAGE_EXT_USER_READ | _PAGE_EXT_KERN_READ); | ||
430 | PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); | ||
431 | PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); | ||
432 | PTE_BIT_FUNC(high, exprotect, &= ~_PAGE_EXT_USER_EXEC); | ||
433 | PTE_BIT_FUNC(high, mkexec, |= _PAGE_EXT_USER_EXEC | _PAGE_EXT_KERN_EXEC); | ||
434 | PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); | ||
435 | #else | ||
436 | PTE_BIT_FUNC(low, rdprotect, &= ~_PAGE_USER); | ||
437 | PTE_BIT_FUNC(low, mkread, |= _PAGE_USER); | ||
438 | PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW); | ||
439 | PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW); | ||
440 | PTE_BIT_FUNC(low, exprotect, &= ~_PAGE_USER); | ||
441 | PTE_BIT_FUNC(low, mkexec, |= _PAGE_USER); | ||
442 | PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE); | ||
443 | #endif | ||
444 | |||
445 | PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); | ||
446 | PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); | ||
447 | PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); | ||
448 | PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); | ||
449 | |||
237 | /* | 450 | /* |
238 | * Macro and implementation to make a page protection as uncachable. | 451 | * Macro and implementation to make a page protection as uncachable. |
239 | */ | 452 | */ |
@@ -258,7 +471,11 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot) | |||
258 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 471 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
259 | 472 | ||
260 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 473 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
261 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } | 474 | { |
475 | set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | | ||
476 | pgprot_val(newprot))); | ||
477 | return pte; | ||
478 | } | ||
262 | 479 | ||
263 | #define pmd_page_vaddr(pmd) \ | 480 | #define pmd_page_vaddr(pmd) \ |
264 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 481 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
@@ -283,8 +500,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
283 | #define pte_unmap(pte) do { } while (0) | 500 | #define pte_unmap(pte) do { } while (0) |
284 | #define pte_unmap_nested(pte) do { } while (0) | 501 | #define pte_unmap_nested(pte) do { } while (0) |
285 | 502 | ||
503 | #ifdef CONFIG_X2TLB | ||
504 | #define pte_ERROR(e) \ | ||
505 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \ | ||
506 | &(e), (e).pte_high, (e).pte_low) | ||
507 | #else | ||
286 | #define pte_ERROR(e) \ | 508 | #define pte_ERROR(e) \ |
287 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | 509 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
510 | #endif | ||
511 | |||
288 | #define pgd_ERROR(e) \ | 512 | #define pgd_ERROR(e) \ |
289 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 513 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
290 | 514 | ||
@@ -337,6 +561,9 @@ extern unsigned int kobjsize(const void *objp); | |||
337 | extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 561 | extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
338 | #endif | 562 | #endif |
339 | 563 | ||
564 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
565 | extern void paging_init(void); | ||
566 | |||
340 | #include <asm-generic/pgtable.h> | 567 | #include <asm-generic/pgtable.h> |
341 | 568 | ||
342 | #endif /* !__ASSEMBLY__ */ | 569 | #endif /* !__ASSEMBLY__ */ |