diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-11-20 00:30:26 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2006-12-05 20:45:37 -0500 |
commit | 21440cf04a64cd1b1209c12a6e1a3afba2a28709 (patch) | |
tree | 9af7a89c4c711b8433b3b1e23e2ba0c95f82fbf7 /arch/sh | |
parent | b552c7e8bceae8a04ae79ecee6fa369c1ba4f8e4 (diff) |
sh: Preliminary support for SH-X2 MMU.
This adds some preliminary support for the SH-X2 MMU, used by
newer SH-4A parts (particularly SH7785).
This MMU implements a 'compat' mode with SH-X MMUs and an
'extended' mode for SH-X2 extended features. Extended features
include additional page sizes (8kB, 4MB, 64MB), as well as the
addition of page execute permissions.
The extended mode attributes are placed in a second data array,
which requires us to switch to 64-bit PTEs when in X2 mode.
With the addition of the exec perms, we also overhaul the mmap
prots somewhat, now that it's possible to handle them more
intelligently.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/mm/Kconfig | 49 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 12 |
4 files changed, 52 insertions, 17 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 27463e26a7b8..88e9663fc9fc 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -235,13 +235,22 @@ config MEMORY_SIZE | |||
235 | 235 | ||
236 | config 32BIT | 236 | config 32BIT |
237 | bool "Support 32-bit physical addressing through PMB" | 237 | bool "Support 32-bit physical addressing through PMB" |
238 | depends on CPU_SH4A && MMU | 238 | depends on CPU_SH4A && MMU && (!X2TLB || BROKEN) |
239 | default y | 239 | default y |
240 | help | 240 | help |
241 | If you say Y here, physical addressing will be extended to | 241 | If you say Y here, physical addressing will be extended to |
242 | 32-bits through the SH-4A PMB. If this is not set, legacy | 242 | 32-bits through the SH-4A PMB. If this is not set, legacy |
243 | 29-bit physical addressing will be used. | 243 | 29-bit physical addressing will be used. |
244 | 244 | ||
245 | config X2TLB | ||
246 | bool "Enable extended TLB mode" | ||
247 | depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL | ||
248 | help | ||
249 | Selecting this option will enable the extended mode of the SH-X2 | ||
250 | TLB. For legacy SH-X behaviour and interoperability, say N. For | ||
251 | all of the fun new features and a willingless to submit bug reports, | ||
252 | say Y. | ||
253 | |||
245 | config VSYSCALL | 254 | config VSYSCALL |
246 | bool "Support vsyscall page" | 255 | bool "Support vsyscall page" |
247 | depends on MMU | 256 | depends on MMU |
@@ -256,16 +265,52 @@ config VSYSCALL | |||
256 | (the default value) say Y. | 265 | (the default value) say Y. |
257 | 266 | ||
258 | choice | 267 | choice |
268 | prompt "Kernel page size" | ||
269 | default PAGE_SIZE_4KB | ||
270 | |||
271 | config PAGE_SIZE_4KB | ||
272 | bool "4kB" | ||
273 | help | ||
274 | This is the default page size used by all SuperH CPUs. | ||
275 | |||
276 | config PAGE_SIZE_8KB | ||
277 | bool "8kB" | ||
278 | depends on EXPERIMENTAL && X2TLB | ||
279 | help | ||
280 | This enables 8kB pages as supported by SH-X2 and later MMUs. | ||
281 | |||
282 | config PAGE_SIZE_64KB | ||
283 | bool "64kB" | ||
284 | depends on EXPERIMENTAL && CPU_SH4 | ||
285 | help | ||
286 | This enables support for 64kB pages, possible on all SH-4 | ||
287 | CPUs and later. Highly experimental, not recommended. | ||
288 | |||
289 | endchoice | ||
290 | |||
291 | choice | ||
259 | prompt "HugeTLB page size" | 292 | prompt "HugeTLB page size" |
260 | depends on HUGETLB_PAGE && CPU_SH4 && MMU | 293 | depends on HUGETLB_PAGE && CPU_SH4 && MMU |
261 | default HUGETLB_PAGE_SIZE_64K | 294 | default HUGETLB_PAGE_SIZE_64K |
262 | 295 | ||
263 | config HUGETLB_PAGE_SIZE_64K | 296 | config HUGETLB_PAGE_SIZE_64K |
264 | bool "64K" | 297 | bool "64kB" |
298 | |||
299 | config HUGETLB_PAGE_SIZE_256K | ||
300 | bool "256kB" | ||
301 | depends on X2TLB | ||
265 | 302 | ||
266 | config HUGETLB_PAGE_SIZE_1MB | 303 | config HUGETLB_PAGE_SIZE_1MB |
267 | bool "1MB" | 304 | bool "1MB" |
268 | 305 | ||
306 | config HUGETLB_PAGE_SIZE_4MB | ||
307 | bool "4MB" | ||
308 | depends on X2TLB | ||
309 | |||
310 | config HUGETLB_PAGE_SIZE_64MB | ||
311 | bool "64MB" | ||
312 | depends on X2TLB | ||
313 | |||
269 | endchoice | 314 | endchoice |
270 | 315 | ||
271 | source "mm/Kconfig" | 316 | source "mm/Kconfig" |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 7154d1ce9785..8b275166f400 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -93,7 +93,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
93 | pud = pud_offset(pgd, addr); | 93 | pud = pud_offset(pgd, addr); |
94 | if (pud_none(*pud)) { | 94 | if (pud_none(*pud)) { |
95 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); | 95 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); |
96 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); | 96 | set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); |
97 | if (pmd != pmd_offset(pud, 0)) { | 97 | if (pmd != pmd_offset(pud, 0)) { |
98 | pud_ERROR(*pud); | 98 | pud_ERROR(*pud); |
99 | return; | 99 | return; |
@@ -103,7 +103,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
103 | pmd = pmd_offset(pud, addr); | 103 | pmd = pmd_offset(pud, addr); |
104 | if (pmd_none(*pmd)) { | 104 | if (pmd_none(*pmd)) { |
105 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | 105 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); |
106 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | 106 | set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); |
107 | if (pte != pte_offset_kernel(pmd, 0)) { | 107 | if (pte != pte_offset_kernel(pmd, 0)) { |
108 | pmd_ERROR(*pmd); | 108 | pmd_ERROR(*pmd); |
109 | return; | 109 | return; |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a9fe80cfc233..11d54c149821 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -28,9 +28,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, | |||
28 | { | 28 | { |
29 | unsigned long end; | 29 | unsigned long end; |
30 | unsigned long pfn; | 30 | unsigned long pfn; |
31 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | | 31 | pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); |
32 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
33 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); | ||
34 | 32 | ||
35 | address &= ~PMD_MASK; | 33 | address &= ~PMD_MASK; |
36 | end = address + size; | 34 | end = address + size; |
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 07371ed7a313..e973ac3b13be 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c | |||
@@ -37,10 +37,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
37 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 37 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
38 | clear_page(to); | 38 | clear_page(to); |
39 | else { | 39 | else { |
40 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | | ||
41 | _PAGE_RW | _PAGE_CACHABLE | | ||
42 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
43 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); | ||
44 | unsigned long phys_addr = PHYSADDR(to); | 40 | unsigned long phys_addr = PHYSADDR(to); |
45 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); | 41 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); |
46 | pgd_t *pgd = pgd_offset_k(p3_addr); | 42 | pgd_t *pgd = pgd_offset_k(p3_addr); |
@@ -50,7 +46,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
50 | pte_t entry; | 46 | pte_t entry; |
51 | unsigned long flags; | 47 | unsigned long flags; |
52 | 48 | ||
53 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); | 49 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); |
54 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); | 50 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); |
55 | set_pte(pte, entry); | 51 | set_pte(pte, entry); |
56 | local_irq_save(flags); | 52 | local_irq_save(flags); |
@@ -77,10 +73,6 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
77 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 73 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
78 | copy_page(to, from); | 74 | copy_page(to, from); |
79 | else { | 75 | else { |
80 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | | ||
81 | _PAGE_RW | _PAGE_CACHABLE | | ||
82 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
83 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); | ||
84 | unsigned long phys_addr = PHYSADDR(to); | 76 | unsigned long phys_addr = PHYSADDR(to); |
85 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); | 77 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); |
86 | pgd_t *pgd = pgd_offset_k(p3_addr); | 78 | pgd_t *pgd = pgd_offset_k(p3_addr); |
@@ -90,7 +82,7 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
90 | pte_t entry; | 82 | pte_t entry; |
91 | unsigned long flags; | 83 | unsigned long flags; |
92 | 84 | ||
93 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); | 85 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); |
94 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); | 86 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); |
95 | set_pte(pte, entry); | 87 | set_pte(pte, entry); |
96 | local_irq_save(flags); | 88 | local_irq_save(flags); |