diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-24 16:22:33 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-24 16:22:33 -0400 |
commit | baea7b946f00a291b166ccae7fcfed6c01530cc6 (patch) | |
tree | 4aa275fbdbec9c7b9b4629e8bee2bbecd3c6a6af /arch/sh/mm | |
parent | ae19ffbadc1b2100285a5b5b3d0a4e0a11390904 (diff) | |
parent | 94e0fb086fc5663c38bbc0fe86d698be8314f82f (diff) |
Merge branch 'origin' into for-linus
Conflicts:
MAINTAINERS
Diffstat (limited to 'arch/sh/mm')
29 files changed, 1101 insertions, 1188 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 2795618e4f07..64dc1ad59801 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -82,7 +82,7 @@ config 32BIT | |||
82 | 82 | ||
83 | config PMB_ENABLE | 83 | config PMB_ENABLE |
84 | bool "Support 32-bit physical addressing through PMB" | 84 | bool "Support 32-bit physical addressing through PMB" |
85 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 85 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) |
86 | select 32BIT | 86 | select 32BIT |
87 | default y | 87 | default y |
88 | help | 88 | help |
@@ -97,7 +97,7 @@ choice | |||
97 | 97 | ||
98 | config PMB | 98 | config PMB |
99 | bool "PMB" | 99 | bool "PMB" |
100 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 100 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) |
101 | select 32BIT | 101 | select 32BIT |
102 | help | 102 | help |
103 | If you say Y here, physical addressing will be extended to | 103 | If you say Y here, physical addressing will be extended to |
@@ -106,7 +106,8 @@ config PMB | |||
106 | 106 | ||
107 | config PMB_FIXED | 107 | config PMB_FIXED |
108 | bool "fixed PMB" | 108 | bool "fixed PMB" |
109 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \ | 109 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || \ |
110 | CPU_SUBTYPE_SH7780 || \ | ||
110 | CPU_SUBTYPE_SH7785) | 111 | CPU_SUBTYPE_SH7785) |
111 | select 32BIT | 112 | select 32BIT |
112 | help | 113 | help |
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 9f4bc3d90b1e..3759bf853293 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -1,5 +1,65 @@ | |||
1 | ifeq ($(CONFIG_SUPERH32),y) | 1 | # |
2 | include ${srctree}/arch/sh/mm/Makefile_32 | 2 | # Makefile for the Linux SuperH-specific parts of the memory manager. |
3 | else | 3 | # |
4 | include ${srctree}/arch/sh/mm/Makefile_64 | 4 | |
5 | obj-y := cache.o init.o consistent.o mmap.o | ||
6 | |||
7 | cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o | ||
8 | cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o | ||
9 | cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o | ||
10 | cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o | ||
11 | cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o | ||
12 | cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o | ||
13 | |||
14 | obj-y += $(cacheops-y) | ||
15 | |||
16 | mmu-y := nommu.o extable_32.o | ||
17 | mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ | ||
18 | ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o | ||
19 | |||
20 | obj-y += $(mmu-y) | ||
21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | ||
22 | |||
23 | ifdef CONFIG_DEBUG_FS | ||
24 | obj-$(CONFIG_CPU_SH4) += cache-debugfs.o | ||
5 | endif | 25 | endif |
26 | |||
27 | ifdef CONFIG_MMU | ||
28 | tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o | ||
29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o | ||
30 | tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o | ||
31 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o | ||
32 | obj-y += $(tlb-y) | ||
33 | endif | ||
34 | |||
35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
36 | obj-$(CONFIG_PMB) += pmb.o | ||
37 | obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o | ||
38 | obj-$(CONFIG_NUMA) += numa.o | ||
39 | |||
40 | # Special flags for fault_64.o. This puts restrictions on the number of | ||
41 | # caller-save registers that the compiler can target when building this file. | ||
42 | # This is required because the code is called from a context in entry.S where | ||
43 | # very few registers have been saved in the exception handler (for speed | ||
44 | # reasons). | ||
45 | # The caller save registers that have been saved and which can be used are | ||
46 | # r2,r3,r4,r5 : argument passing | ||
47 | # r15, r18 : SP and LINK | ||
48 | # tr0-4 : allow all caller-save TR's. The compiler seems to be able to make | ||
49 | # use of them, so it's probably beneficial to performance to save them | ||
50 | # and have them available for it. | ||
51 | # | ||
52 | # The resources not listed below are callee save, i.e. the compiler is free to | ||
53 | # use any of them and will spill them to the stack itself. | ||
54 | |||
55 | CFLAGS_fault_64.o += -ffixed-r7 \ | ||
56 | -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ | ||
57 | -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ | ||
58 | -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ | ||
59 | -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ | ||
60 | -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ | ||
61 | -ffixed-r41 -ffixed-r42 -ffixed-r43 \ | ||
62 | -ffixed-r60 -ffixed-r61 -ffixed-r62 \ | ||
63 | -fomit-frame-pointer | ||
64 | |||
65 | EXTRA_CFLAGS += -Werror | ||
diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 deleted file mode 100644 index 986a1e055834..000000000000 --- a/arch/sh/mm/Makefile_32 +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o extable_32.o consistent.o mmap.o | ||
6 | |||
7 | ifndef CONFIG_CACHE_OFF | ||
8 | cache-$(CONFIG_CPU_SH2) := cache-sh2.o | ||
9 | cache-$(CONFIG_CPU_SH2A) := cache-sh2a.o | ||
10 | cache-$(CONFIG_CPU_SH3) := cache-sh3.o | ||
11 | cache-$(CONFIG_CPU_SH4) := cache-sh4.o | ||
12 | cache-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o | ||
13 | endif | ||
14 | |||
15 | obj-y += $(cache-y) | ||
16 | |||
17 | mmu-y := tlb-nommu.o pg-nommu.o | ||
18 | mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o | ||
19 | |||
20 | obj-y += $(mmu-y) | ||
21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | ||
22 | |||
23 | ifdef CONFIG_DEBUG_FS | ||
24 | obj-$(CONFIG_CPU_SH4) += cache-debugfs.o | ||
25 | endif | ||
26 | |||
27 | ifdef CONFIG_MMU | ||
28 | tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o | ||
29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o | ||
30 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o | ||
31 | obj-y += $(tlb-y) | ||
32 | ifndef CONFIG_CACHE_OFF | ||
33 | obj-$(CONFIG_CPU_SH4) += pg-sh4.o | ||
34 | obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o | ||
35 | endif | ||
36 | endif | ||
37 | |||
38 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
39 | obj-$(CONFIG_PMB) += pmb.o | ||
40 | obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o | ||
41 | obj-$(CONFIG_NUMA) += numa.o | ||
42 | |||
43 | EXTRA_CFLAGS += -Werror | ||
diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64 deleted file mode 100644 index 2863ffb7006d..000000000000 --- a/arch/sh/mm/Makefile_64 +++ /dev/null | |||
@@ -1,46 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o consistent.o mmap.o | ||
6 | |||
7 | mmu-y := tlb-nommu.o pg-nommu.o extable_32.o | ||
8 | mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ | ||
9 | extable_64.o | ||
10 | |||
11 | ifndef CONFIG_CACHE_OFF | ||
12 | obj-y += cache-sh5.o | ||
13 | endif | ||
14 | |||
15 | obj-y += $(mmu-y) | ||
16 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | ||
17 | |||
18 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
19 | obj-$(CONFIG_NUMA) += numa.o | ||
20 | |||
21 | EXTRA_CFLAGS += -Werror | ||
22 | |||
23 | # Special flags for fault_64.o. This puts restrictions on the number of | ||
24 | # caller-save registers that the compiler can target when building this file. | ||
25 | # This is required because the code is called from a context in entry.S where | ||
26 | # very few registers have been saved in the exception handler (for speed | ||
27 | # reasons). | ||
28 | # The caller save registers that have been saved and which can be used are | ||
29 | # r2,r3,r4,r5 : argument passing | ||
30 | # r15, r18 : SP and LINK | ||
31 | # tr0-4 : allow all caller-save TR's. The compiler seems to be able to make | ||
32 | # use of them, so it's probably beneficial to performance to save them | ||
33 | # and have them available for it. | ||
34 | # | ||
35 | # The resources not listed below are callee save, i.e. the compiler is free to | ||
36 | # use any of them and will spill them to the stack itself. | ||
37 | |||
38 | CFLAGS_fault_64.o += -ffixed-r7 \ | ||
39 | -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ | ||
40 | -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ | ||
41 | -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ | ||
42 | -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ | ||
43 | -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ | ||
44 | -ffixed-r41 -ffixed-r42 -ffixed-r43 \ | ||
45 | -ffixed-r60 -ffixed-r61 -ffixed-r62 \ | ||
46 | -fomit-frame-pointer | ||
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index c4e80d2b764b..699a71f46327 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | void __flush_wback_region(void *start, int size) | 19 | static void sh2__flush_wback_region(void *start, int size) |
20 | { | 20 | { |
21 | unsigned long v; | 21 | unsigned long v; |
22 | unsigned long begin, end; | 22 | unsigned long begin, end; |
@@ -37,7 +37,7 @@ void __flush_wback_region(void *start, int size) | |||
37 | } | 37 | } |
38 | } | 38 | } |
39 | 39 | ||
40 | void __flush_purge_region(void *start, int size) | 40 | static void sh2__flush_purge_region(void *start, int size) |
41 | { | 41 | { |
42 | unsigned long v; | 42 | unsigned long v; |
43 | unsigned long begin, end; | 43 | unsigned long begin, end; |
@@ -51,7 +51,7 @@ void __flush_purge_region(void *start, int size) | |||
51 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); | 51 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); |
52 | } | 52 | } |
53 | 53 | ||
54 | void __flush_invalidate_region(void *start, int size) | 54 | static void sh2__flush_invalidate_region(void *start, int size) |
55 | { | 55 | { |
56 | #ifdef CONFIG_CACHE_WRITEBACK | 56 | #ifdef CONFIG_CACHE_WRITEBACK |
57 | /* | 57 | /* |
@@ -82,3 +82,10 @@ void __flush_invalidate_region(void *start, int size) | |||
82 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); | 82 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); |
83 | #endif | 83 | #endif |
84 | } | 84 | } |
85 | |||
86 | void __init sh2_cache_init(void) | ||
87 | { | ||
88 | __flush_wback_region = sh2__flush_wback_region; | ||
89 | __flush_purge_region = sh2__flush_purge_region; | ||
90 | __flush_invalidate_region = sh2__flush_invalidate_region; | ||
91 | } | ||
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 24d86a794065..975899d83564 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
16 | #include <asm/io.h> | 16 | #include <asm/io.h> |
17 | 17 | ||
18 | void __flush_wback_region(void *start, int size) | 18 | static void sh2a__flush_wback_region(void *start, int size) |
19 | { | 19 | { |
20 | unsigned long v; | 20 | unsigned long v; |
21 | unsigned long begin, end; | 21 | unsigned long begin, end; |
@@ -44,7 +44,7 @@ void __flush_wback_region(void *start, int size) | |||
44 | local_irq_restore(flags); | 44 | local_irq_restore(flags); |
45 | } | 45 | } |
46 | 46 | ||
47 | void __flush_purge_region(void *start, int size) | 47 | static void sh2a__flush_purge_region(void *start, int size) |
48 | { | 48 | { |
49 | unsigned long v; | 49 | unsigned long v; |
50 | unsigned long begin, end; | 50 | unsigned long begin, end; |
@@ -65,7 +65,7 @@ void __flush_purge_region(void *start, int size) | |||
65 | local_irq_restore(flags); | 65 | local_irq_restore(flags); |
66 | } | 66 | } |
67 | 67 | ||
68 | void __flush_invalidate_region(void *start, int size) | 68 | static void sh2a__flush_invalidate_region(void *start, int size) |
69 | { | 69 | { |
70 | unsigned long v; | 70 | unsigned long v; |
71 | unsigned long begin, end; | 71 | unsigned long begin, end; |
@@ -97,13 +97,15 @@ void __flush_invalidate_region(void *start, int size) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | /* WBack O-Cache and flush I-Cache */ | 99 | /* WBack O-Cache and flush I-Cache */ |
100 | void flush_icache_range(unsigned long start, unsigned long end) | 100 | static void sh2a_flush_icache_range(void *args) |
101 | { | 101 | { |
102 | struct flusher_data *data = args; | ||
103 | unsigned long start, end; | ||
102 | unsigned long v; | 104 | unsigned long v; |
103 | unsigned long flags; | 105 | unsigned long flags; |
104 | 106 | ||
105 | start = start & ~(L1_CACHE_BYTES-1); | 107 | start = data->addr1 & ~(L1_CACHE_BYTES-1); |
106 | end = (end + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); | 108 | end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); |
107 | 109 | ||
108 | local_irq_save(flags); | 110 | local_irq_save(flags); |
109 | jump_to_uncached(); | 111 | jump_to_uncached(); |
@@ -127,3 +129,12 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
127 | back_to_cached(); | 129 | back_to_cached(); |
128 | local_irq_restore(flags); | 130 | local_irq_restore(flags); |
129 | } | 131 | } |
132 | |||
133 | void __init sh2a_cache_init(void) | ||
134 | { | ||
135 | local_flush_icache_range = sh2a_flush_icache_range; | ||
136 | |||
137 | __flush_wback_region = sh2a__flush_wback_region; | ||
138 | __flush_purge_region = sh2a__flush_purge_region; | ||
139 | __flush_invalidate_region = sh2a__flush_invalidate_region; | ||
140 | } | ||
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c index 6d1dbec08ad4..faef80c98134 100644 --- a/arch/sh/mm/cache-sh3.c +++ b/arch/sh/mm/cache-sh3.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * SIZE: Size of the region. | 32 | * SIZE: Size of the region. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | void __flush_wback_region(void *start, int size) | 35 | static void sh3__flush_wback_region(void *start, int size) |
36 | { | 36 | { |
37 | unsigned long v, j; | 37 | unsigned long v, j; |
38 | unsigned long begin, end; | 38 | unsigned long begin, end; |
@@ -71,7 +71,7 @@ void __flush_wback_region(void *start, int size) | |||
71 | * START: Virtual Address (U0, P1, or P3) | 71 | * START: Virtual Address (U0, P1, or P3) |
72 | * SIZE: Size of the region. | 72 | * SIZE: Size of the region. |
73 | */ | 73 | */ |
74 | void __flush_purge_region(void *start, int size) | 74 | static void sh3__flush_purge_region(void *start, int size) |
75 | { | 75 | { |
76 | unsigned long v; | 76 | unsigned long v; |
77 | unsigned long begin, end; | 77 | unsigned long begin, end; |
@@ -90,11 +90,16 @@ void __flush_purge_region(void *start, int size) | |||
90 | } | 90 | } |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | void __init sh3_cache_init(void) |
94 | * No write back please | 94 | { |
95 | * | 95 | __flush_wback_region = sh3__flush_wback_region; |
96 | * Except I don't think there's any way to avoid the writeback. So we | 96 | __flush_purge_region = sh3__flush_purge_region; |
97 | * just alias it to __flush_purge_region(). dwmw2. | 97 | |
98 | */ | 98 | /* |
99 | void __flush_invalidate_region(void *start, int size) | 99 | * No write back please |
100 | __attribute__((alias("__flush_purge_region"))); | 100 | * |
101 | * Except I don't think there's any way to avoid the writeback. | ||
102 | * So we just alias it to sh3__flush_purge_region(). dwmw2. | ||
103 | */ | ||
104 | __flush_invalidate_region = sh3__flush_purge_region; | ||
105 | } | ||
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 5cfe08dbb59e..b2453bbef4cd 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/fs.h> | ||
17 | #include <asm/mmu_context.h> | 18 | #include <asm/mmu_context.h> |
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | 20 | ||
@@ -25,13 +26,6 @@ | |||
25 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ | 26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ |
26 | #define MAX_ICACHE_PAGES 32 | 27 | #define MAX_ICACHE_PAGES 32 |
27 | 28 | ||
28 | static void __flush_dcache_segment_1way(unsigned long start, | ||
29 | unsigned long extent); | ||
30 | static void __flush_dcache_segment_2way(unsigned long start, | ||
31 | unsigned long extent); | ||
32 | static void __flush_dcache_segment_4way(unsigned long start, | ||
33 | unsigned long extent); | ||
34 | |||
35 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, | 29 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, |
36 | unsigned long exec_offset); | 30 | unsigned long exec_offset); |
37 | 31 | ||
@@ -43,182 +37,56 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, | |||
43 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | 37 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = |
44 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | 38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; |
45 | 39 | ||
46 | static void compute_alias(struct cache_info *c) | 40 | /* |
41 | * Write back the range of D-cache, and purge the I-cache. | ||
42 | * | ||
43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | ||
44 | * signal handler code and kprobes code | ||
45 | */ | ||
46 | static void sh4_flush_icache_range(void *args) | ||
47 | { | 47 | { |
48 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | 48 | struct flusher_data *data = args; |
49 | c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; | 49 | unsigned long start, end; |
50 | } | 50 | unsigned long flags, v; |
51 | int i; | ||
51 | 52 | ||
52 | static void __init emit_cache_params(void) | 53 | start = data->addr1; |
53 | { | 54 | end = data->addr2; |
54 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | ||
55 | ctrl_inl(CCN_PVR), | ||
56 | ctrl_inl(CCN_CVR), | ||
57 | ctrl_inl(CCN_PRR)); | ||
58 | printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
59 | boot_cpu_data.icache.ways, | ||
60 | boot_cpu_data.icache.sets, | ||
61 | boot_cpu_data.icache.way_incr); | ||
62 | printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
63 | boot_cpu_data.icache.entry_mask, | ||
64 | boot_cpu_data.icache.alias_mask, | ||
65 | boot_cpu_data.icache.n_aliases); | ||
66 | printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
67 | boot_cpu_data.dcache.ways, | ||
68 | boot_cpu_data.dcache.sets, | ||
69 | boot_cpu_data.dcache.way_incr); | ||
70 | printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
71 | boot_cpu_data.dcache.entry_mask, | ||
72 | boot_cpu_data.dcache.alias_mask, | ||
73 | boot_cpu_data.dcache.n_aliases); | ||
74 | 55 | ||
75 | /* | 56 | /* If there are too many pages then just blow away the caches */ |
76 | * Emit Secondary Cache parameters if the CPU has a probed L2. | 57 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
77 | */ | 58 | local_flush_cache_all(NULL); |
78 | if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | 59 | return; |
79 | printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
80 | boot_cpu_data.scache.ways, | ||
81 | boot_cpu_data.scache.sets, | ||
82 | boot_cpu_data.scache.way_incr); | ||
83 | printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
84 | boot_cpu_data.scache.entry_mask, | ||
85 | boot_cpu_data.scache.alias_mask, | ||
86 | boot_cpu_data.scache.n_aliases); | ||
87 | } | 60 | } |
88 | 61 | ||
89 | if (!__flush_dcache_segment_fn) | 62 | /* |
90 | panic("unknown number of cache ways\n"); | 63 | * Selectively flush d-cache then invalidate the i-cache. |
91 | } | 64 | * This is inefficient, so only use this for small ranges. |
65 | */ | ||
66 | start &= ~(L1_CACHE_BYTES-1); | ||
67 | end += L1_CACHE_BYTES-1; | ||
68 | end &= ~(L1_CACHE_BYTES-1); | ||
92 | 69 | ||
93 | /* | 70 | local_irq_save(flags); |
94 | * SH-4 has virtually indexed and physically tagged cache. | 71 | jump_to_uncached(); |
95 | */ | ||
96 | void __init p3_cache_init(void) | ||
97 | { | ||
98 | compute_alias(&boot_cpu_data.icache); | ||
99 | compute_alias(&boot_cpu_data.dcache); | ||
100 | compute_alias(&boot_cpu_data.scache); | ||
101 | |||
102 | switch (boot_cpu_data.dcache.ways) { | ||
103 | case 1: | ||
104 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
105 | break; | ||
106 | case 2: | ||
107 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
108 | break; | ||
109 | case 4: | ||
110 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
111 | break; | ||
112 | default: | ||
113 | __flush_dcache_segment_fn = NULL; | ||
114 | break; | ||
115 | } | ||
116 | 72 | ||
117 | emit_cache_params(); | 73 | for (v = start; v < end; v += L1_CACHE_BYTES) { |
118 | } | 74 | unsigned long icacheaddr; |
119 | 75 | ||
120 | /* | 76 | __ocbwb(v); |
121 | * Write back the dirty D-caches, but not invalidate them. | ||
122 | * | ||
123 | * START: Virtual Address (U0, P1, or P3) | ||
124 | * SIZE: Size of the region. | ||
125 | */ | ||
126 | void __flush_wback_region(void *start, int size) | ||
127 | { | ||
128 | unsigned long v; | ||
129 | unsigned long begin, end; | ||
130 | |||
131 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
132 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
133 | & ~(L1_CACHE_BYTES-1); | ||
134 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
135 | asm volatile("ocbwb %0" | ||
136 | : /* no output */ | ||
137 | : "m" (__m(v))); | ||
138 | } | ||
139 | } | ||
140 | 77 | ||
141 | /* | 78 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v & |
142 | * Write back the dirty D-caches and invalidate them. | 79 | cpu_data->icache.entry_mask); |
143 | * | ||
144 | * START: Virtual Address (U0, P1, or P3) | ||
145 | * SIZE: Size of the region. | ||
146 | */ | ||
147 | void __flush_purge_region(void *start, int size) | ||
148 | { | ||
149 | unsigned long v; | ||
150 | unsigned long begin, end; | ||
151 | |||
152 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
153 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
154 | & ~(L1_CACHE_BYTES-1); | ||
155 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
156 | asm volatile("ocbp %0" | ||
157 | : /* no output */ | ||
158 | : "m" (__m(v))); | ||
159 | } | ||
160 | } | ||
161 | 80 | ||
162 | /* | 81 | /* Clear i-cache line valid-bit */ |
163 | * No write back please | 82 | for (i = 0; i < cpu_data->icache.ways; i++) { |
164 | */ | 83 | __raw_writel(0, icacheaddr); |
165 | void __flush_invalidate_region(void *start, int size) | 84 | icacheaddr += cpu_data->icache.way_incr; |
166 | { | 85 | } |
167 | unsigned long v; | ||
168 | unsigned long begin, end; | ||
169 | |||
170 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
171 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
172 | & ~(L1_CACHE_BYTES-1); | ||
173 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
174 | asm volatile("ocbi %0" | ||
175 | : /* no output */ | ||
176 | : "m" (__m(v))); | ||
177 | } | 86 | } |
178 | } | ||
179 | |||
180 | /* | ||
181 | * Write back the range of D-cache, and purge the I-cache. | ||
182 | * | ||
183 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | ||
184 | * signal handler code and kprobes code | ||
185 | */ | ||
186 | void flush_icache_range(unsigned long start, unsigned long end) | ||
187 | { | ||
188 | int icacheaddr; | ||
189 | unsigned long flags, v; | ||
190 | int i; | ||
191 | 87 | ||
192 | /* If there are too many pages then just blow the caches */ | 88 | back_to_cached(); |
193 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 89 | local_irq_restore(flags); |
194 | flush_cache_all(); | ||
195 | } else { | ||
196 | /* selectively flush d-cache then invalidate the i-cache */ | ||
197 | /* this is inefficient, so only use for small ranges */ | ||
198 | start &= ~(L1_CACHE_BYTES-1); | ||
199 | end += L1_CACHE_BYTES-1; | ||
200 | end &= ~(L1_CACHE_BYTES-1); | ||
201 | |||
202 | local_irq_save(flags); | ||
203 | jump_to_uncached(); | ||
204 | |||
205 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | ||
206 | asm volatile("ocbwb %0" | ||
207 | : /* no output */ | ||
208 | : "m" (__m(v))); | ||
209 | |||
210 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | ( | ||
211 | v & cpu_data->icache.entry_mask); | ||
212 | |||
213 | for (i = 0; i < cpu_data->icache.ways; | ||
214 | i++, icacheaddr += cpu_data->icache.way_incr) | ||
215 | /* Clear i-cache line valid-bit */ | ||
216 | ctrl_outl(0, icacheaddr); | ||
217 | } | ||
218 | |||
219 | back_to_cached(); | ||
220 | local_irq_restore(flags); | ||
221 | } | ||
222 | } | 90 | } |
223 | 91 | ||
224 | static inline void flush_cache_4096(unsigned long start, | 92 | static inline void flush_cache_4096(unsigned long start, |
@@ -244,9 +112,17 @@ static inline void flush_cache_4096(unsigned long start, | |||
244 | * Write back & invalidate the D-cache of the page. | 112 | * Write back & invalidate the D-cache of the page. |
245 | * (To avoid "alias" issues) | 113 | * (To avoid "alias" issues) |
246 | */ | 114 | */ |
247 | void flush_dcache_page(struct page *page) | 115 | static void sh4_flush_dcache_page(void *arg) |
248 | { | 116 | { |
249 | if (test_bit(PG_mapped, &page->flags)) { | 117 | struct page *page = arg; |
118 | #ifndef CONFIG_SMP | ||
119 | struct address_space *mapping = page_mapping(page); | ||
120 | |||
121 | if (mapping && !mapping_mapped(mapping)) | ||
122 | set_bit(PG_dcache_dirty, &page->flags); | ||
123 | else | ||
124 | #endif | ||
125 | { | ||
250 | unsigned long phys = PHYSADDR(page_address(page)); | 126 | unsigned long phys = PHYSADDR(page_address(page)); |
251 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | 127 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; |
252 | int i, n; | 128 | int i, n; |
@@ -282,13 +158,13 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
282 | local_irq_restore(flags); | 158 | local_irq_restore(flags); |
283 | } | 159 | } |
284 | 160 | ||
285 | void flush_dcache_all(void) | 161 | static inline void flush_dcache_all(void) |
286 | { | 162 | { |
287 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); | 163 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); |
288 | wmb(); | 164 | wmb(); |
289 | } | 165 | } |
290 | 166 | ||
291 | void flush_cache_all(void) | 167 | static void sh4_flush_cache_all(void *unused) |
292 | { | 168 | { |
293 | flush_dcache_all(); | 169 | flush_dcache_all(); |
294 | flush_icache_all(); | 170 | flush_icache_all(); |
@@ -380,8 +256,13 @@ loop_exit: | |||
380 | * | 256 | * |
381 | * Caller takes mm->mmap_sem. | 257 | * Caller takes mm->mmap_sem. |
382 | */ | 258 | */ |
383 | void flush_cache_mm(struct mm_struct *mm) | 259 | static void sh4_flush_cache_mm(void *arg) |
384 | { | 260 | { |
261 | struct mm_struct *mm = arg; | ||
262 | |||
263 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | ||
264 | return; | ||
265 | |||
385 | /* | 266 | /* |
386 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 267 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
387 | * the cache is physically tagged, the data can just be left in there. | 268 | * the cache is physically tagged, the data can just be left in there. |
@@ -417,12 +298,21 @@ void flush_cache_mm(struct mm_struct *mm) | |||
417 | * ADDR: Virtual Address (U0 address) | 298 | * ADDR: Virtual Address (U0 address) |
418 | * PFN: Physical page number | 299 | * PFN: Physical page number |
419 | */ | 300 | */ |
420 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | 301 | static void sh4_flush_cache_page(void *args) |
421 | unsigned long pfn) | ||
422 | { | 302 | { |
423 | unsigned long phys = pfn << PAGE_SHIFT; | 303 | struct flusher_data *data = args; |
304 | struct vm_area_struct *vma; | ||
305 | unsigned long address, pfn, phys; | ||
424 | unsigned int alias_mask; | 306 | unsigned int alias_mask; |
425 | 307 | ||
308 | vma = data->vma; | ||
309 | address = data->addr1; | ||
310 | pfn = data->addr2; | ||
311 | phys = pfn << PAGE_SHIFT; | ||
312 | |||
313 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | ||
314 | return; | ||
315 | |||
426 | alias_mask = boot_cpu_data.dcache.alias_mask; | 316 | alias_mask = boot_cpu_data.dcache.alias_mask; |
427 | 317 | ||
428 | /* We only need to flush D-cache when we have alias */ | 318 | /* We only need to flush D-cache when we have alias */ |
@@ -462,9 +352,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
462 | * Flushing the cache lines for U0 only isn't enough. | 352 | * Flushing the cache lines for U0 only isn't enough. |
463 | * We need to flush for P1 too, which may contain aliases. | 353 | * We need to flush for P1 too, which may contain aliases. |
464 | */ | 354 | */ |
465 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 355 | static void sh4_flush_cache_range(void *args) |
466 | unsigned long end) | ||
467 | { | 356 | { |
357 | struct flusher_data *data = args; | ||
358 | struct vm_area_struct *vma; | ||
359 | unsigned long start, end; | ||
360 | |||
361 | vma = data->vma; | ||
362 | start = data->addr1; | ||
363 | end = data->addr2; | ||
364 | |||
365 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | ||
366 | return; | ||
367 | |||
468 | /* | 368 | /* |
469 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 369 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
470 | * the cache is physically tagged, the data can just be left in there. | 370 | * the cache is physically tagged, the data can just be left in there. |
@@ -492,20 +392,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
492 | } | 392 | } |
493 | } | 393 | } |
494 | 394 | ||
495 | /* | ||
496 | * flush_icache_user_range | ||
497 | * @vma: VMA of the process | ||
498 | * @page: page | ||
499 | * @addr: U0 address | ||
500 | * @len: length of the range (< page size) | ||
501 | */ | ||
502 | void flush_icache_user_range(struct vm_area_struct *vma, | ||
503 | struct page *page, unsigned long addr, int len) | ||
504 | { | ||
505 | flush_cache_page(vma, addr, page_to_pfn(page)); | ||
506 | mb(); | ||
507 | } | ||
508 | |||
509 | /** | 395 | /** |
510 | * __flush_cache_4096 | 396 | * __flush_cache_4096 |
511 | * | 397 | * |
@@ -581,7 +467,49 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, | |||
581 | * Break the 1, 2 and 4 way variants of this out into separate functions to | 467 | * Break the 1, 2 and 4 way variants of this out into separate functions to |
582 | * avoid nearly all the overhead of having the conditional stuff in the function | 468 | * avoid nearly all the overhead of having the conditional stuff in the function |
583 | * bodies (+ the 1 and 2 way cases avoid saving any registers too). | 469 | * bodies (+ the 1 and 2 way cases avoid saving any registers too). |
470 | * | ||
471 | * We want to eliminate unnecessary bus transactions, so this code uses | ||
472 | * a non-obvious technique. | ||
473 | * | ||
474 | * Loop over a cache way sized block of, one cache line at a time. For each | ||
475 | * line, use movca.a to cause the current cache line contents to be written | ||
476 | * back, but without reading anything from main memory. However this has the | ||
477 | * side effect that the cache is now caching that memory location. So follow | ||
478 | * this with a cache invalidate to mark the cache line invalid. And do all | ||
479 | * this with interrupts disabled, to avoid the cache line being accidently | ||
480 | * evicted while it is holding garbage. | ||
481 | * | ||
482 | * This also breaks in a number of circumstances: | ||
483 | * - if there are modifications to the region of memory just above | ||
484 | * empty_zero_page (for example because a breakpoint has been placed | ||
485 | * there), then these can be lost. | ||
486 | * | ||
487 | * This is because the the memory address which the cache temporarily | ||
488 | * caches in the above description is empty_zero_page. So the | ||
489 | * movca.l hits the cache (it is assumed that it misses, or at least | ||
490 | * isn't dirty), modifies the line and then invalidates it, losing the | ||
491 | * required change. | ||
492 | * | ||
493 | * - If caches are disabled or configured in write-through mode, then | ||
494 | * the movca.l writes garbage directly into memory. | ||
584 | */ | 495 | */ |
496 | static void __flush_dcache_segment_writethrough(unsigned long start, | ||
497 | unsigned long extent_per_way) | ||
498 | { | ||
499 | unsigned long addr; | ||
500 | int i; | ||
501 | |||
502 | addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask); | ||
503 | |||
504 | while (extent_per_way) { | ||
505 | for (i = 0; i < cpu_data->dcache.ways; i++) | ||
506 | __raw_writel(0, addr + cpu_data->dcache.way_incr * i); | ||
507 | |||
508 | addr += cpu_data->dcache.linesz; | ||
509 | extent_per_way -= cpu_data->dcache.linesz; | ||
510 | } | ||
511 | } | ||
512 | |||
585 | static void __flush_dcache_segment_1way(unsigned long start, | 513 | static void __flush_dcache_segment_1way(unsigned long start, |
586 | unsigned long extent_per_way) | 514 | unsigned long extent_per_way) |
587 | { | 515 | { |
@@ -773,3 +701,47 @@ static void __flush_dcache_segment_4way(unsigned long start, | |||
773 | a3 += linesz; | 701 | a3 += linesz; |
774 | } while (a0 < a0e); | 702 | } while (a0 < a0e); |
775 | } | 703 | } |
704 | |||
705 | extern void __weak sh4__flush_region_init(void); | ||
706 | |||
707 | /* | ||
708 | * SH-4 has virtually indexed and physically tagged cache. | ||
709 | */ | ||
710 | void __init sh4_cache_init(void) | ||
711 | { | ||
712 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
713 | |||
714 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | ||
715 | ctrl_inl(CCN_PVR), | ||
716 | ctrl_inl(CCN_CVR), | ||
717 | ctrl_inl(CCN_PRR)); | ||
718 | |||
719 | if (wt_enabled) | ||
720 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
721 | else { | ||
722 | switch (boot_cpu_data.dcache.ways) { | ||
723 | case 1: | ||
724 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
725 | break; | ||
726 | case 2: | ||
727 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
728 | break; | ||
729 | case 4: | ||
730 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
731 | break; | ||
732 | default: | ||
733 | panic("unknown number of cache ways\n"); | ||
734 | break; | ||
735 | } | ||
736 | } | ||
737 | |||
738 | local_flush_icache_range = sh4_flush_icache_range; | ||
739 | local_flush_dcache_page = sh4_flush_dcache_page; | ||
740 | local_flush_cache_all = sh4_flush_cache_all; | ||
741 | local_flush_cache_mm = sh4_flush_cache_mm; | ||
742 | local_flush_cache_dup_mm = sh4_flush_cache_mm; | ||
743 | local_flush_cache_page = sh4_flush_cache_page; | ||
744 | local_flush_cache_range = sh4_flush_cache_range; | ||
745 | |||
746 | sh4__flush_region_init(); | ||
747 | } | ||
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 86762092508c..467ff8e260f7 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -20,23 +20,11 @@ | |||
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | 22 | ||
23 | extern void __weak sh4__flush_region_init(void); | ||
24 | |||
23 | /* Wired TLB entry for the D-cache */ | 25 | /* Wired TLB entry for the D-cache */ |
24 | static unsigned long long dtlb_cache_slot; | 26 | static unsigned long long dtlb_cache_slot; |
25 | 27 | ||
26 | void __init p3_cache_init(void) | ||
27 | { | ||
28 | /* Reserve a slot for dcache colouring in the DTLB */ | ||
29 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); | ||
30 | } | ||
31 | |||
32 | #ifdef CONFIG_DCACHE_DISABLED | ||
33 | #define sh64_dcache_purge_all() do { } while (0) | ||
34 | #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0) | ||
35 | #define sh64_dcache_purge_user_range(mm, start, end) do { } while (0) | ||
36 | #define sh64_dcache_purge_phy_page(paddr) do { } while (0) | ||
37 | #define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0) | ||
38 | #endif | ||
39 | |||
40 | /* | 28 | /* |
41 | * The following group of functions deal with mapping and unmapping a | 29 | * The following group of functions deal with mapping and unmapping a |
42 | * temporary page into a DTLB slot that has been set aside for exclusive | 30 | * temporary page into a DTLB slot that has been set aside for exclusive |
@@ -56,7 +44,6 @@ static inline void sh64_teardown_dtlb_cache_slot(void) | |||
56 | local_irq_enable(); | 44 | local_irq_enable(); |
57 | } | 45 | } |
58 | 46 | ||
59 | #ifndef CONFIG_ICACHE_DISABLED | ||
60 | static inline void sh64_icache_inv_all(void) | 47 | static inline void sh64_icache_inv_all(void) |
61 | { | 48 | { |
62 | unsigned long long addr, flag, data; | 49 | unsigned long long addr, flag, data; |
@@ -214,52 +201,6 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | |||
214 | } | 201 | } |
215 | } | 202 | } |
216 | 203 | ||
217 | /* | ||
218 | * Invalidate a small range of user context I-cache, not necessarily page | ||
219 | * (or even cache-line) aligned. | ||
220 | * | ||
221 | * Since this is used inside ptrace, the ASID in the mm context typically | ||
222 | * won't match current_asid. We'll have to switch ASID to do this. For | ||
223 | * safety, and given that the range will be small, do all this under cli. | ||
224 | * | ||
225 | * Note, there is a hazard that the ASID in mm->context is no longer | ||
226 | * actually associated with mm, i.e. if the mm->context has started a new | ||
227 | * cycle since mm was last active. However, this is just a performance | ||
228 | * issue: all that happens is that we invalidate lines belonging to | ||
229 | * another mm, so the owning process has to refill them when that mm goes | ||
230 | * live again. mm itself can't have any cache entries because there will | ||
231 | * have been a flush_cache_all when the new mm->context cycle started. | ||
232 | */ | ||
233 | static void sh64_icache_inv_user_small_range(struct mm_struct *mm, | ||
234 | unsigned long start, int len) | ||
235 | { | ||
236 | unsigned long long eaddr = start; | ||
237 | unsigned long long eaddr_end = start + len; | ||
238 | unsigned long current_asid, mm_asid; | ||
239 | unsigned long flags; | ||
240 | unsigned long long epage_start; | ||
241 | |||
242 | /* | ||
243 | * Align to start of cache line. Otherwise, suppose len==8 and | ||
244 | * start was at 32N+28 : the last 4 bytes wouldn't get invalidated. | ||
245 | */ | ||
246 | eaddr = L1_CACHE_ALIGN(start); | ||
247 | eaddr_end = start + len; | ||
248 | |||
249 | mm_asid = cpu_asid(smp_processor_id(), mm); | ||
250 | local_irq_save(flags); | ||
251 | current_asid = switch_and_save_asid(mm_asid); | ||
252 | |||
253 | epage_start = eaddr & PAGE_MASK; | ||
254 | |||
255 | while (eaddr < eaddr_end) { | ||
256 | __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr)); | ||
257 | eaddr += L1_CACHE_BYTES; | ||
258 | } | ||
259 | switch_and_save_asid(current_asid); | ||
260 | local_irq_restore(flags); | ||
261 | } | ||
262 | |||
263 | static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) | 204 | static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) |
264 | { | 205 | { |
265 | /* The icbi instruction never raises ITLBMISS. i.e. if there's not a | 206 | /* The icbi instruction never raises ITLBMISS. i.e. if there's not a |
@@ -287,9 +228,7 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon | |||
287 | addr += L1_CACHE_BYTES; | 228 | addr += L1_CACHE_BYTES; |
288 | } | 229 | } |
289 | } | 230 | } |
290 | #endif /* !CONFIG_ICACHE_DISABLED */ | ||
291 | 231 | ||
292 | #ifndef CONFIG_DCACHE_DISABLED | ||
293 | /* Buffer used as the target of alloco instructions to purge data from cache | 232 | /* Buffer used as the target of alloco instructions to purge data from cache |
294 | sets by natural eviction. -- RPC */ | 233 | sets by natural eviction. -- RPC */ |
295 | #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) | 234 | #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) |
@@ -541,59 +480,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm, | |||
541 | } | 480 | } |
542 | 481 | ||
543 | /* | 482 | /* |
544 | * Purge the range of addresses from the D-cache. | ||
545 | * | ||
546 | * The addresses lie in the superpage mapping. There's no harm if we | ||
547 | * overpurge at either end - just a small performance loss. | ||
548 | */ | ||
549 | void __flush_purge_region(void *start, int size) | ||
550 | { | ||
551 | unsigned long long ullend, addr, aligned_start; | ||
552 | |||
553 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | ||
554 | addr = L1_CACHE_ALIGN(aligned_start); | ||
555 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | ||
556 | |||
557 | while (addr <= ullend) { | ||
558 | __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr)); | ||
559 | addr += L1_CACHE_BYTES; | ||
560 | } | ||
561 | } | ||
562 | |||
563 | void __flush_wback_region(void *start, int size) | ||
564 | { | ||
565 | unsigned long long ullend, addr, aligned_start; | ||
566 | |||
567 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | ||
568 | addr = L1_CACHE_ALIGN(aligned_start); | ||
569 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | ||
570 | |||
571 | while (addr < ullend) { | ||
572 | __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr)); | ||
573 | addr += L1_CACHE_BYTES; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | void __flush_invalidate_region(void *start, int size) | ||
578 | { | ||
579 | unsigned long long ullend, addr, aligned_start; | ||
580 | |||
581 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | ||
582 | addr = L1_CACHE_ALIGN(aligned_start); | ||
583 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | ||
584 | |||
585 | while (addr < ullend) { | ||
586 | __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr)); | ||
587 | addr += L1_CACHE_BYTES; | ||
588 | } | ||
589 | } | ||
590 | #endif /* !CONFIG_DCACHE_DISABLED */ | ||
591 | |||
592 | /* | ||
593 | * Invalidate the entire contents of both caches, after writing back to | 483 | * Invalidate the entire contents of both caches, after writing back to |
594 | * memory any dirty data from the D-cache. | 484 | * memory any dirty data from the D-cache. |
595 | */ | 485 | */ |
596 | void flush_cache_all(void) | 486 | static void sh5_flush_cache_all(void *unused) |
597 | { | 487 | { |
598 | sh64_dcache_purge_all(); | 488 | sh64_dcache_purge_all(); |
599 | sh64_icache_inv_all(); | 489 | sh64_icache_inv_all(); |
@@ -620,7 +510,7 @@ void flush_cache_all(void) | |||
620 | * I-cache. This is similar to the lack of action needed in | 510 | * I-cache. This is similar to the lack of action needed in |
621 | * flush_tlb_mm - see fault.c. | 511 | * flush_tlb_mm - see fault.c. |
622 | */ | 512 | */ |
623 | void flush_cache_mm(struct mm_struct *mm) | 513 | static void sh5_flush_cache_mm(void *unused) |
624 | { | 514 | { |
625 | sh64_dcache_purge_all(); | 515 | sh64_dcache_purge_all(); |
626 | } | 516 | } |
@@ -632,13 +522,18 @@ void flush_cache_mm(struct mm_struct *mm) | |||
632 | * | 522 | * |
633 | * Note, 'end' is 1 byte beyond the end of the range to flush. | 523 | * Note, 'end' is 1 byte beyond the end of the range to flush. |
634 | */ | 524 | */ |
635 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 525 | static void sh5_flush_cache_range(void *args) |
636 | unsigned long end) | ||
637 | { | 526 | { |
638 | struct mm_struct *mm = vma->vm_mm; | 527 | struct flusher_data *data = args; |
528 | struct vm_area_struct *vma; | ||
529 | unsigned long start, end; | ||
530 | |||
531 | vma = data->vma; | ||
532 | start = data->addr1; | ||
533 | end = data->addr2; | ||
639 | 534 | ||
640 | sh64_dcache_purge_user_range(mm, start, end); | 535 | sh64_dcache_purge_user_range(vma->vm_mm, start, end); |
641 | sh64_icache_inv_user_page_range(mm, start, end); | 536 | sh64_icache_inv_user_page_range(vma->vm_mm, start, end); |
642 | } | 537 | } |
643 | 538 | ||
644 | /* | 539 | /* |
@@ -650,16 +545,23 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
650 | * | 545 | * |
651 | * Note, this is called with pte lock held. | 546 | * Note, this is called with pte lock held. |
652 | */ | 547 | */ |
653 | void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, | 548 | static void sh5_flush_cache_page(void *args) |
654 | unsigned long pfn) | ||
655 | { | 549 | { |
550 | struct flusher_data *data = args; | ||
551 | struct vm_area_struct *vma; | ||
552 | unsigned long eaddr, pfn; | ||
553 | |||
554 | vma = data->vma; | ||
555 | eaddr = data->addr1; | ||
556 | pfn = data->addr2; | ||
557 | |||
656 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); | 558 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); |
657 | 559 | ||
658 | if (vma->vm_flags & VM_EXEC) | 560 | if (vma->vm_flags & VM_EXEC) |
659 | sh64_icache_inv_user_page(vma, eaddr); | 561 | sh64_icache_inv_user_page(vma, eaddr); |
660 | } | 562 | } |
661 | 563 | ||
662 | void flush_dcache_page(struct page *page) | 564 | static void sh5_flush_dcache_page(void *page) |
663 | { | 565 | { |
664 | sh64_dcache_purge_phy_page(page_to_phys(page)); | 566 | sh64_dcache_purge_phy_page(page_to_phys(page)); |
665 | wmb(); | 567 | wmb(); |
@@ -673,162 +575,47 @@ void flush_dcache_page(struct page *page) | |||
673 | * mapping, therefore it's guaranteed that there no cache entries for | 575 | * mapping, therefore it's guaranteed that there no cache entries for |
674 | * the range in cache sets of the wrong colour. | 576 | * the range in cache sets of the wrong colour. |
675 | */ | 577 | */ |
676 | void flush_icache_range(unsigned long start, unsigned long end) | 578 | static void sh5_flush_icache_range(void *args) |
677 | { | 579 | { |
580 | struct flusher_data *data = args; | ||
581 | unsigned long start, end; | ||
582 | |||
583 | start = data->addr1; | ||
584 | end = data->addr2; | ||
585 | |||
678 | __flush_purge_region((void *)start, end); | 586 | __flush_purge_region((void *)start, end); |
679 | wmb(); | 587 | wmb(); |
680 | sh64_icache_inv_kernel_range(start, end); | 588 | sh64_icache_inv_kernel_range(start, end); |
681 | } | 589 | } |
682 | 590 | ||
683 | /* | 591 | /* |
684 | * Flush the range of user (defined by vma->vm_mm) address space starting | ||
685 | * at 'addr' for 'len' bytes from the cache. The range does not straddle | ||
686 | * a page boundary, the unique physical page containing the range is | ||
687 | * 'page'. This seems to be used mainly for invalidating an address | ||
688 | * range following a poke into the program text through the ptrace() call | ||
689 | * from another process (e.g. for BRK instruction insertion). | ||
690 | */ | ||
691 | void flush_icache_user_range(struct vm_area_struct *vma, | ||
692 | struct page *page, unsigned long addr, int len) | ||
693 | { | ||
694 | |||
695 | sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); | ||
696 | mb(); | ||
697 | |||
698 | if (vma->vm_flags & VM_EXEC) | ||
699 | sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * For the address range [start,end), write back the data from the | 592 | * For the address range [start,end), write back the data from the |
704 | * D-cache and invalidate the corresponding region of the I-cache for the | 593 | * D-cache and invalidate the corresponding region of the I-cache for the |
705 | * current process. Used to flush signal trampolines on the stack to | 594 | * current process. Used to flush signal trampolines on the stack to |
706 | * make them executable. | 595 | * make them executable. |
707 | */ | 596 | */ |
708 | void flush_cache_sigtramp(unsigned long vaddr) | 597 | static void sh5_flush_cache_sigtramp(void *vaddr) |
709 | { | 598 | { |
710 | unsigned long end = vaddr + L1_CACHE_BYTES; | 599 | unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES; |
711 | 600 | ||
712 | __flush_wback_region((void *)vaddr, L1_CACHE_BYTES); | 601 | __flush_wback_region(vaddr, L1_CACHE_BYTES); |
713 | wmb(); | 602 | wmb(); |
714 | sh64_icache_inv_current_user_range(vaddr, end); | 603 | sh64_icache_inv_current_user_range((unsigned long)vaddr, end); |
715 | } | 604 | } |
716 | 605 | ||
717 | #ifdef CONFIG_MMU | 606 | void __init sh5_cache_init(void) |
718 | /* | ||
719 | * These *MUST* lie in an area of virtual address space that's otherwise | ||
720 | * unused. | ||
721 | */ | ||
722 | #define UNIQUE_EADDR_START 0xe0000000UL | ||
723 | #define UNIQUE_EADDR_END 0xe8000000UL | ||
724 | |||
725 | /* | ||
726 | * Given a physical address paddr, and a user virtual address user_eaddr | ||
727 | * which will eventually be mapped to it, create a one-off kernel-private | ||
728 | * eaddr mapped to the same paddr. This is used for creating special | ||
729 | * destination pages for copy_user_page and clear_user_page. | ||
730 | */ | ||
731 | static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, | ||
732 | unsigned long paddr) | ||
733 | { | ||
734 | static unsigned long current_pointer = UNIQUE_EADDR_START; | ||
735 | unsigned long coloured_pointer; | ||
736 | |||
737 | if (current_pointer == UNIQUE_EADDR_END) { | ||
738 | sh64_dcache_purge_all(); | ||
739 | current_pointer = UNIQUE_EADDR_START; | ||
740 | } | ||
741 | |||
742 | coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | | ||
743 | (user_eaddr & CACHE_OC_SYN_MASK); | ||
744 | sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); | ||
745 | |||
746 | current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); | ||
747 | |||
748 | return coloured_pointer; | ||
749 | } | ||
750 | |||
751 | static void sh64_copy_user_page_coloured(void *to, void *from, | ||
752 | unsigned long address) | ||
753 | { | 607 | { |
754 | void *coloured_to; | 608 | local_flush_cache_all = sh5_flush_cache_all; |
609 | local_flush_cache_mm = sh5_flush_cache_mm; | ||
610 | local_flush_cache_dup_mm = sh5_flush_cache_mm; | ||
611 | local_flush_cache_page = sh5_flush_cache_page; | ||
612 | local_flush_cache_range = sh5_flush_cache_range; | ||
613 | local_flush_dcache_page = sh5_flush_dcache_page; | ||
614 | local_flush_icache_range = sh5_flush_icache_range; | ||
615 | local_flush_cache_sigtramp = sh5_flush_cache_sigtramp; | ||
755 | 616 | ||
756 | /* | 617 | /* Reserve a slot for dcache colouring in the DTLB */ |
757 | * Discard any existing cache entries of the wrong colour. These are | 618 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); |
758 | * present quite often, if the kernel has recently used the page | ||
759 | * internally, then given it up, then it's been allocated to the user. | ||
760 | */ | ||
761 | sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); | ||
762 | |||
763 | coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); | ||
764 | copy_page(from, coloured_to); | ||
765 | |||
766 | sh64_teardown_dtlb_cache_slot(); | ||
767 | } | ||
768 | |||
769 | static void sh64_clear_user_page_coloured(void *to, unsigned long address) | ||
770 | { | ||
771 | void *coloured_to; | ||
772 | |||
773 | /* | ||
774 | * Discard any existing kernel-originated lines of the wrong | ||
775 | * colour (as above) | ||
776 | */ | ||
777 | sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); | ||
778 | |||
779 | coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); | ||
780 | clear_page(coloured_to); | ||
781 | |||
782 | sh64_teardown_dtlb_cache_slot(); | ||
783 | } | ||
784 | |||
785 | /* | ||
786 | * 'from' and 'to' are kernel virtual addresses (within the superpage | ||
787 | * mapping of the physical RAM). 'address' is the user virtual address | ||
788 | * where the copy 'to' will be mapped after. This allows a custom | ||
789 | * mapping to be used to ensure that the new copy is placed in the | ||
790 | * right cache sets for the user to see it without having to bounce it | ||
791 | * out via memory. Note however : the call to flush_page_to_ram in | ||
792 | * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one | ||
793 | * very important case! | ||
794 | * | ||
795 | * TBD : can we guarantee that on every call, any cache entries for | ||
796 | * 'from' are in the same colour sets as 'address' also? i.e. is this | ||
797 | * always used just to deal with COW? (I suspect not). | ||
798 | * | ||
799 | * There are two possibilities here for when the page 'from' was last accessed: | ||
800 | * - by the kernel : this is OK, no purge required. | ||
801 | * - by the/a user (e.g. for break_COW) : need to purge. | ||
802 | * | ||
803 | * If the potential user mapping at 'address' is the same colour as | ||
804 | * 'from' there is no need to purge any cache lines from the 'from' | ||
805 | * page mapped into cache sets of colour 'address'. (The copy will be | ||
806 | * accessing the page through 'from'). | ||
807 | */ | ||
808 | void copy_user_page(void *to, void *from, unsigned long address, | ||
809 | struct page *page) | ||
810 | { | ||
811 | if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) | ||
812 | sh64_dcache_purge_coloured_phy_page(__pa(from), address); | ||
813 | |||
814 | if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) | ||
815 | copy_page(to, from); | ||
816 | else | ||
817 | sh64_copy_user_page_coloured(to, from, address); | ||
818 | } | ||
819 | 619 | ||
820 | /* | 620 | sh4__flush_region_init(); |
821 | * 'to' is a kernel virtual address (within the superpage mapping of the | ||
822 | * physical RAM). 'address' is the user virtual address where the 'to' | ||
823 | * page will be mapped after. This allows a custom mapping to be used to | ||
824 | * ensure that the new copy is placed in the right cache sets for the | ||
825 | * user to see it without having to bounce it out via memory. | ||
826 | */ | ||
827 | void clear_user_page(void *to, unsigned long address, struct page *page) | ||
828 | { | ||
829 | if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) | ||
830 | clear_page(to); | ||
831 | else | ||
832 | sh64_clear_user_page_coloured(to, address); | ||
833 | } | 621 | } |
834 | #endif | ||
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 22dacc778823..2cadee2037ac 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/mman.h> | 13 | #include <linux/mman.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/fs.h> | ||
15 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
16 | #include <asm/addrspace.h> | 17 | #include <asm/addrspace.h> |
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
@@ -63,15 +64,21 @@ static inline void cache_wback_all(void) | |||
63 | * | 64 | * |
64 | * Called from kernel/module.c:sys_init_module and routine for a.out format. | 65 | * Called from kernel/module.c:sys_init_module and routine for a.out format. |
65 | */ | 66 | */ |
66 | void flush_icache_range(unsigned long start, unsigned long end) | 67 | static void sh7705_flush_icache_range(void *args) |
67 | { | 68 | { |
69 | struct flusher_data *data = args; | ||
70 | unsigned long start, end; | ||
71 | |||
72 | start = data->addr1; | ||
73 | end = data->addr2; | ||
74 | |||
68 | __flush_wback_region((void *)start, end - start); | 75 | __flush_wback_region((void *)start, end - start); |
69 | } | 76 | } |
70 | 77 | ||
71 | /* | 78 | /* |
72 | * Writeback&Invalidate the D-cache of the page | 79 | * Writeback&Invalidate the D-cache of the page |
73 | */ | 80 | */ |
74 | static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | 81 | static void __flush_dcache_page(unsigned long phys) |
75 | { | 82 | { |
76 | unsigned long ways, waysize, addrstart; | 83 | unsigned long ways, waysize, addrstart; |
77 | unsigned long flags; | 84 | unsigned long flags; |
@@ -126,13 +133,18 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | |||
126 | * Write back & invalidate the D-cache of the page. | 133 | * Write back & invalidate the D-cache of the page. |
127 | * (To avoid "alias" issues) | 134 | * (To avoid "alias" issues) |
128 | */ | 135 | */ |
129 | void flush_dcache_page(struct page *page) | 136 | static void sh7705_flush_dcache_page(void *arg) |
130 | { | 137 | { |
131 | if (test_bit(PG_mapped, &page->flags)) | 138 | struct page *page = arg; |
139 | struct address_space *mapping = page_mapping(page); | ||
140 | |||
141 | if (mapping && !mapping_mapped(mapping)) | ||
142 | set_bit(PG_dcache_dirty, &page->flags); | ||
143 | else | ||
132 | __flush_dcache_page(PHYSADDR(page_address(page))); | 144 | __flush_dcache_page(PHYSADDR(page_address(page))); |
133 | } | 145 | } |
134 | 146 | ||
135 | void __uses_jump_to_uncached flush_cache_all(void) | 147 | static void sh7705_flush_cache_all(void *args) |
136 | { | 148 | { |
137 | unsigned long flags; | 149 | unsigned long flags; |
138 | 150 | ||
@@ -144,44 +156,16 @@ void __uses_jump_to_uncached flush_cache_all(void) | |||
144 | local_irq_restore(flags); | 156 | local_irq_restore(flags); |
145 | } | 157 | } |
146 | 158 | ||
147 | void flush_cache_mm(struct mm_struct *mm) | ||
148 | { | ||
149 | /* Is there any good way? */ | ||
150 | /* XXX: possibly call flush_cache_range for each vm area */ | ||
151 | flush_cache_all(); | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Write back and invalidate D-caches. | ||
156 | * | ||
157 | * START, END: Virtual Address (U0 address) | ||
158 | * | ||
159 | * NOTE: We need to flush the _physical_ page entry. | ||
160 | * Flushing the cache lines for U0 only isn't enough. | ||
161 | * We need to flush for P1 too, which may contain aliases. | ||
162 | */ | ||
163 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
164 | unsigned long end) | ||
165 | { | ||
166 | |||
167 | /* | ||
168 | * We could call flush_cache_page for the pages of these range, | ||
169 | * but it's not efficient (scan the caches all the time...). | ||
170 | * | ||
171 | * We can't use A-bit magic, as there's the case we don't have | ||
172 | * valid entry on TLB. | ||
173 | */ | ||
174 | flush_cache_all(); | ||
175 | } | ||
176 | |||
177 | /* | 159 | /* |
178 | * Write back and invalidate I/D-caches for the page. | 160 | * Write back and invalidate I/D-caches for the page. |
179 | * | 161 | * |
180 | * ADDRESS: Virtual Address (U0 address) | 162 | * ADDRESS: Virtual Address (U0 address) |
181 | */ | 163 | */ |
182 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | 164 | static void sh7705_flush_cache_page(void *args) |
183 | unsigned long pfn) | ||
184 | { | 165 | { |
166 | struct flusher_data *data = args; | ||
167 | unsigned long pfn = data->addr2; | ||
168 | |||
185 | __flush_dcache_page(pfn << PAGE_SHIFT); | 169 | __flush_dcache_page(pfn << PAGE_SHIFT); |
186 | } | 170 | } |
187 | 171 | ||
@@ -193,7 +177,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
193 | * Not entirely sure why this is necessary on SH3 with 32K cache but | 177 | * Not entirely sure why this is necessary on SH3 with 32K cache but |
194 | * without it we get occasional "Memory fault" when loading a program. | 178 | * without it we get occasional "Memory fault" when loading a program. |
195 | */ | 179 | */ |
196 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | 180 | static void sh7705_flush_icache_page(void *page) |
197 | { | 181 | { |
198 | __flush_purge_region(page_address(page), PAGE_SIZE); | 182 | __flush_purge_region(page_address(page), PAGE_SIZE); |
199 | } | 183 | } |
184 | |||
185 | void __init sh7705_cache_init(void) | ||
186 | { | ||
187 | local_flush_icache_range = sh7705_flush_icache_range; | ||
188 | local_flush_dcache_page = sh7705_flush_dcache_page; | ||
189 | local_flush_cache_all = sh7705_flush_cache_all; | ||
190 | local_flush_cache_mm = sh7705_flush_cache_all; | ||
191 | local_flush_cache_dup_mm = sh7705_flush_cache_all; | ||
192 | local_flush_cache_range = sh7705_flush_cache_all; | ||
193 | local_flush_cache_page = sh7705_flush_cache_page; | ||
194 | local_flush_icache_page = sh7705_flush_icache_page; | ||
195 | } | ||
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c new file mode 100644 index 000000000000..35c37b7f717a --- /dev/null +++ b/arch/sh/mm/cache.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/cache.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2002 - 2009 Paul Mundt | ||
6 | * | ||
7 | * Released under the terms of the GNU GPL v2.0. | ||
8 | */ | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/highmem.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | |||
19 | void (*local_flush_cache_all)(void *args) = cache_noop; | ||
20 | void (*local_flush_cache_mm)(void *args) = cache_noop; | ||
21 | void (*local_flush_cache_dup_mm)(void *args) = cache_noop; | ||
22 | void (*local_flush_cache_page)(void *args) = cache_noop; | ||
23 | void (*local_flush_cache_range)(void *args) = cache_noop; | ||
24 | void (*local_flush_dcache_page)(void *args) = cache_noop; | ||
25 | void (*local_flush_icache_range)(void *args) = cache_noop; | ||
26 | void (*local_flush_icache_page)(void *args) = cache_noop; | ||
27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | ||
28 | |||
29 | void (*__flush_wback_region)(void *start, int size); | ||
30 | void (*__flush_purge_region)(void *start, int size); | ||
31 | void (*__flush_invalidate_region)(void *start, int size); | ||
32 | |||
33 | static inline void noop__flush_region(void *start, int size) | ||
34 | { | ||
35 | } | ||
36 | |||
37 | static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, | ||
38 | int wait) | ||
39 | { | ||
40 | preempt_disable(); | ||
41 | smp_call_function(func, info, wait); | ||
42 | func(info); | ||
43 | preempt_enable(); | ||
44 | } | ||
45 | |||
46 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
47 | unsigned long vaddr, void *dst, const void *src, | ||
48 | unsigned long len) | ||
49 | { | ||
50 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | ||
51 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
52 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
53 | memcpy(vto, src, len); | ||
54 | kunmap_coherent(vto); | ||
55 | } else { | ||
56 | memcpy(dst, src, len); | ||
57 | if (boot_cpu_data.dcache.n_aliases) | ||
58 | set_bit(PG_dcache_dirty, &page->flags); | ||
59 | } | ||
60 | |||
61 | if (vma->vm_flags & VM_EXEC) | ||
62 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | ||
63 | } | ||
64 | |||
65 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
66 | unsigned long vaddr, void *dst, const void *src, | ||
67 | unsigned long len) | ||
68 | { | ||
69 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | ||
70 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
71 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
72 | memcpy(dst, vfrom, len); | ||
73 | kunmap_coherent(vfrom); | ||
74 | } else { | ||
75 | memcpy(dst, src, len); | ||
76 | if (boot_cpu_data.dcache.n_aliases) | ||
77 | set_bit(PG_dcache_dirty, &page->flags); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | void copy_user_highpage(struct page *to, struct page *from, | ||
82 | unsigned long vaddr, struct vm_area_struct *vma) | ||
83 | { | ||
84 | void *vfrom, *vto; | ||
85 | |||
86 | vto = kmap_atomic(to, KM_USER1); | ||
87 | |||
88 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && | ||
89 | !test_bit(PG_dcache_dirty, &from->flags)) { | ||
90 | vfrom = kmap_coherent(from, vaddr); | ||
91 | copy_page(vto, vfrom); | ||
92 | kunmap_coherent(vfrom); | ||
93 | } else { | ||
94 | vfrom = kmap_atomic(from, KM_USER0); | ||
95 | copy_page(vto, vfrom); | ||
96 | kunmap_atomic(vfrom, KM_USER0); | ||
97 | } | ||
98 | |||
99 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | ||
100 | __flush_purge_region(vto, PAGE_SIZE); | ||
101 | |||
102 | kunmap_atomic(vto, KM_USER1); | ||
103 | /* Make sure this page is cleared on other CPU's too before using it */ | ||
104 | smp_wmb(); | ||
105 | } | ||
106 | EXPORT_SYMBOL(copy_user_highpage); | ||
107 | |||
108 | void clear_user_highpage(struct page *page, unsigned long vaddr) | ||
109 | { | ||
110 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
111 | |||
112 | clear_page(kaddr); | ||
113 | |||
114 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | ||
115 | __flush_purge_region(kaddr, PAGE_SIZE); | ||
116 | |||
117 | kunmap_atomic(kaddr, KM_USER0); | ||
118 | } | ||
119 | EXPORT_SYMBOL(clear_user_highpage); | ||
120 | |||
121 | void __update_cache(struct vm_area_struct *vma, | ||
122 | unsigned long address, pte_t pte) | ||
123 | { | ||
124 | struct page *page; | ||
125 | unsigned long pfn = pte_pfn(pte); | ||
126 | |||
127 | if (!boot_cpu_data.dcache.n_aliases) | ||
128 | return; | ||
129 | |||
130 | page = pfn_to_page(pfn); | ||
131 | if (pfn_valid(pfn) && page_mapping(page)) { | ||
132 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | ||
133 | if (dirty) { | ||
134 | unsigned long addr = (unsigned long)page_address(page); | ||
135 | |||
136 | if (pages_do_alias(addr, address & PAGE_MASK)) | ||
137 | __flush_purge_region((void *)addr, PAGE_SIZE); | ||
138 | } | ||
139 | } | ||
140 | } | ||
141 | |||
142 | void __flush_anon_page(struct page *page, unsigned long vmaddr) | ||
143 | { | ||
144 | unsigned long addr = (unsigned long) page_address(page); | ||
145 | |||
146 | if (pages_do_alias(addr, vmaddr)) { | ||
147 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | ||
148 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
149 | void *kaddr; | ||
150 | |||
151 | kaddr = kmap_coherent(page, vmaddr); | ||
152 | /* XXX.. For now kunmap_coherent() does a purge */ | ||
153 | /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */ | ||
154 | kunmap_coherent(kaddr); | ||
155 | } else | ||
156 | __flush_purge_region((void *)addr, PAGE_SIZE); | ||
157 | } | ||
158 | } | ||
159 | |||
160 | void flush_cache_all(void) | ||
161 | { | ||
162 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); | ||
163 | } | ||
164 | |||
165 | void flush_cache_mm(struct mm_struct *mm) | ||
166 | { | ||
167 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); | ||
168 | } | ||
169 | |||
170 | void flush_cache_dup_mm(struct mm_struct *mm) | ||
171 | { | ||
172 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); | ||
173 | } | ||
174 | |||
175 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, | ||
176 | unsigned long pfn) | ||
177 | { | ||
178 | struct flusher_data data; | ||
179 | |||
180 | data.vma = vma; | ||
181 | data.addr1 = addr; | ||
182 | data.addr2 = pfn; | ||
183 | |||
184 | cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1); | ||
185 | } | ||
186 | |||
187 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
188 | unsigned long end) | ||
189 | { | ||
190 | struct flusher_data data; | ||
191 | |||
192 | data.vma = vma; | ||
193 | data.addr1 = start; | ||
194 | data.addr2 = end; | ||
195 | |||
196 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); | ||
197 | } | ||
198 | |||
199 | void flush_dcache_page(struct page *page) | ||
200 | { | ||
201 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); | ||
202 | } | ||
203 | |||
204 | void flush_icache_range(unsigned long start, unsigned long end) | ||
205 | { | ||
206 | struct flusher_data data; | ||
207 | |||
208 | data.vma = NULL; | ||
209 | data.addr1 = start; | ||
210 | data.addr2 = end; | ||
211 | |||
212 | cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1); | ||
213 | } | ||
214 | |||
215 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
216 | { | ||
217 | /* Nothing uses the VMA, so just pass the struct page along */ | ||
218 | cacheop_on_each_cpu(local_flush_icache_page, page, 1); | ||
219 | } | ||
220 | |||
221 | void flush_cache_sigtramp(unsigned long address) | ||
222 | { | ||
223 | cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); | ||
224 | } | ||
225 | |||
226 | static void compute_alias(struct cache_info *c) | ||
227 | { | ||
228 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | ||
229 | c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; | ||
230 | } | ||
231 | |||
232 | static void __init emit_cache_params(void) | ||
233 | { | ||
234 | printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
235 | boot_cpu_data.icache.ways, | ||
236 | boot_cpu_data.icache.sets, | ||
237 | boot_cpu_data.icache.way_incr); | ||
238 | printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
239 | boot_cpu_data.icache.entry_mask, | ||
240 | boot_cpu_data.icache.alias_mask, | ||
241 | boot_cpu_data.icache.n_aliases); | ||
242 | printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
243 | boot_cpu_data.dcache.ways, | ||
244 | boot_cpu_data.dcache.sets, | ||
245 | boot_cpu_data.dcache.way_incr); | ||
246 | printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
247 | boot_cpu_data.dcache.entry_mask, | ||
248 | boot_cpu_data.dcache.alias_mask, | ||
249 | boot_cpu_data.dcache.n_aliases); | ||
250 | |||
251 | /* | ||
252 | * Emit Secondary Cache parameters if the CPU has a probed L2. | ||
253 | */ | ||
254 | if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | ||
255 | printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
256 | boot_cpu_data.scache.ways, | ||
257 | boot_cpu_data.scache.sets, | ||
258 | boot_cpu_data.scache.way_incr); | ||
259 | printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
260 | boot_cpu_data.scache.entry_mask, | ||
261 | boot_cpu_data.scache.alias_mask, | ||
262 | boot_cpu_data.scache.n_aliases); | ||
263 | } | ||
264 | } | ||
265 | |||
266 | void __init cpu_cache_init(void) | ||
267 | { | ||
268 | compute_alias(&boot_cpu_data.icache); | ||
269 | compute_alias(&boot_cpu_data.dcache); | ||
270 | compute_alias(&boot_cpu_data.scache); | ||
271 | |||
272 | __flush_wback_region = noop__flush_region; | ||
273 | __flush_purge_region = noop__flush_region; | ||
274 | __flush_invalidate_region = noop__flush_region; | ||
275 | |||
276 | if (boot_cpu_data.family == CPU_FAMILY_SH2) { | ||
277 | extern void __weak sh2_cache_init(void); | ||
278 | |||
279 | sh2_cache_init(); | ||
280 | } | ||
281 | |||
282 | if (boot_cpu_data.family == CPU_FAMILY_SH2A) { | ||
283 | extern void __weak sh2a_cache_init(void); | ||
284 | |||
285 | sh2a_cache_init(); | ||
286 | } | ||
287 | |||
288 | if (boot_cpu_data.family == CPU_FAMILY_SH3) { | ||
289 | extern void __weak sh3_cache_init(void); | ||
290 | |||
291 | sh3_cache_init(); | ||
292 | |||
293 | if ((boot_cpu_data.type == CPU_SH7705) && | ||
294 | (boot_cpu_data.dcache.sets == 512)) { | ||
295 | extern void __weak sh7705_cache_init(void); | ||
296 | |||
297 | sh7705_cache_init(); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | if ((boot_cpu_data.family == CPU_FAMILY_SH4) || | ||
302 | (boot_cpu_data.family == CPU_FAMILY_SH4A) || | ||
303 | (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { | ||
304 | extern void __weak sh4_cache_init(void); | ||
305 | |||
306 | sh4_cache_init(); | ||
307 | } | ||
308 | |||
309 | if (boot_cpu_data.family == CPU_FAMILY_SH5) { | ||
310 | extern void __weak sh5_cache_init(void); | ||
311 | |||
312 | sh5_cache_init(); | ||
313 | } | ||
314 | |||
315 | emit_cache_params(); | ||
316 | } | ||
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 71925946f1e1..47530104e0ad 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Page fault handler for SH with an MMU. | 2 | * Page fault handler for SH with an MMU. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 4 | * Copyright (C) 1999 Niibe Yutaka |
5 | * Copyright (C) 2003 - 2008 Paul Mundt | 5 | * Copyright (C) 2003 - 2009 Paul Mundt |
6 | * | 6 | * |
7 | * Based on linux/arch/i386/mm/fault.c: | 7 | * Based on linux/arch/i386/mm/fault.c: |
8 | * Copyright (C) 1995 Linus Torvalds | 8 | * Copyright (C) 1995 Linus Torvalds |
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/hardirq.h> | 16 | #include <linux/hardirq.h> |
17 | #include <linux/kprobes.h> | 17 | #include <linux/kprobes.h> |
18 | #include <linux/perf_counter.h> | 18 | #include <linux/perf_event.h> |
19 | #include <asm/io_trapped.h> | 19 | #include <asm/io_trapped.h> |
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
@@ -25,18 +25,91 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) | |||
25 | { | 25 | { |
26 | int ret = 0; | 26 | int ret = 0; |
27 | 27 | ||
28 | #ifdef CONFIG_KPROBES | 28 | if (kprobes_built_in() && !user_mode(regs)) { |
29 | if (!user_mode(regs)) { | ||
30 | preempt_disable(); | 29 | preempt_disable(); |
31 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | 30 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) |
32 | ret = 1; | 31 | ret = 1; |
33 | preempt_enable(); | 32 | preempt_enable(); |
34 | } | 33 | } |
35 | #endif | ||
36 | 34 | ||
37 | return ret; | 35 | return ret; |
38 | } | 36 | } |
39 | 37 | ||
38 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
39 | { | ||
40 | unsigned index = pgd_index(address); | ||
41 | pgd_t *pgd_k; | ||
42 | pud_t *pud, *pud_k; | ||
43 | pmd_t *pmd, *pmd_k; | ||
44 | |||
45 | pgd += index; | ||
46 | pgd_k = init_mm.pgd + index; | ||
47 | |||
48 | if (!pgd_present(*pgd_k)) | ||
49 | return NULL; | ||
50 | |||
51 | pud = pud_offset(pgd, address); | ||
52 | pud_k = pud_offset(pgd_k, address); | ||
53 | if (!pud_present(*pud_k)) | ||
54 | return NULL; | ||
55 | |||
56 | pmd = pmd_offset(pud, address); | ||
57 | pmd_k = pmd_offset(pud_k, address); | ||
58 | if (!pmd_present(*pmd_k)) | ||
59 | return NULL; | ||
60 | |||
61 | if (!pmd_present(*pmd)) | ||
62 | set_pmd(pmd, *pmd_k); | ||
63 | else { | ||
64 | /* | ||
65 | * The page tables are fully synchronised so there must | ||
66 | * be another reason for the fault. Return NULL here to | ||
67 | * signal that we have not taken care of the fault. | ||
68 | */ | ||
69 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | return pmd_k; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Handle a fault on the vmalloc or module mapping area | ||
78 | */ | ||
79 | static noinline int vmalloc_fault(unsigned long address) | ||
80 | { | ||
81 | pgd_t *pgd_k; | ||
82 | pmd_t *pmd_k; | ||
83 | pte_t *pte_k; | ||
84 | |||
85 | /* Make sure we are in vmalloc/module/P3 area: */ | ||
86 | if (!(address >= VMALLOC_START && address < P3_ADDR_MAX)) | ||
87 | return -1; | ||
88 | |||
89 | /* | ||
90 | * Synchronize this task's top level page-table | ||
91 | * with the 'reference' page table. | ||
92 | * | ||
93 | * Do _not_ use "current" here. We might be inside | ||
94 | * an interrupt in the middle of a task switch.. | ||
95 | */ | ||
96 | pgd_k = get_TTB(); | ||
97 | pmd_k = vmalloc_sync_one(pgd_k, address); | ||
98 | if (!pmd_k) | ||
99 | return -1; | ||
100 | |||
101 | pte_k = pte_offset_kernel(pmd_k, address); | ||
102 | if (!pte_present(*pte_k)) | ||
103 | return -1; | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static int fault_in_kernel_space(unsigned long address) | ||
109 | { | ||
110 | return address >= TASK_SIZE; | ||
111 | } | ||
112 | |||
40 | /* | 113 | /* |
41 | * This routine handles page faults. It determines the address, | 114 | * This routine handles page faults. It determines the address, |
42 | * and the problem, and then passes it off to one of the appropriate | 115 | * and the problem, and then passes it off to one of the appropriate |
@@ -46,6 +119,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
46 | unsigned long writeaccess, | 119 | unsigned long writeaccess, |
47 | unsigned long address) | 120 | unsigned long address) |
48 | { | 121 | { |
122 | unsigned long vec; | ||
49 | struct task_struct *tsk; | 123 | struct task_struct *tsk; |
50 | struct mm_struct *mm; | 124 | struct mm_struct *mm; |
51 | struct vm_area_struct * vma; | 125 | struct vm_area_struct * vma; |
@@ -53,70 +127,41 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
53 | int fault; | 127 | int fault; |
54 | siginfo_t info; | 128 | siginfo_t info; |
55 | 129 | ||
56 | /* | ||
57 | * We don't bother with any notifier callbacks here, as they are | ||
58 | * all handled through the __do_page_fault() fast-path. | ||
59 | */ | ||
60 | |||
61 | tsk = current; | 130 | tsk = current; |
131 | mm = tsk->mm; | ||
62 | si_code = SEGV_MAPERR; | 132 | si_code = SEGV_MAPERR; |
133 | vec = lookup_exception_vector(); | ||
63 | 134 | ||
64 | if (unlikely(address >= TASK_SIZE)) { | 135 | /* |
65 | /* | 136 | * We fault-in kernel-space virtual memory on-demand. The |
66 | * Synchronize this task's top level page-table | 137 | * 'reference' page table is init_mm.pgd. |
67 | * with the 'reference' page table. | 138 | * |
68 | * | 139 | * NOTE! We MUST NOT take any locks for this case. We may |
69 | * Do _not_ use "tsk" here. We might be inside | 140 | * be in an interrupt or a critical region, and should |
70 | * an interrupt in the middle of a task switch.. | 141 | * only copy the information from the master page table, |
71 | */ | 142 | * nothing more. |
72 | int offset = pgd_index(address); | 143 | */ |
73 | pgd_t *pgd, *pgd_k; | 144 | if (unlikely(fault_in_kernel_space(address))) { |
74 | pud_t *pud, *pud_k; | 145 | if (vmalloc_fault(address) >= 0) |
75 | pmd_t *pmd, *pmd_k; | ||
76 | |||
77 | pgd = get_TTB() + offset; | ||
78 | pgd_k = swapper_pg_dir + offset; | ||
79 | |||
80 | if (!pgd_present(*pgd)) { | ||
81 | if (!pgd_present(*pgd_k)) | ||
82 | goto bad_area_nosemaphore; | ||
83 | set_pgd(pgd, *pgd_k); | ||
84 | return; | 146 | return; |
85 | } | 147 | if (notify_page_fault(regs, vec)) |
86 | |||
87 | pud = pud_offset(pgd, address); | ||
88 | pud_k = pud_offset(pgd_k, address); | ||
89 | |||
90 | if (!pud_present(*pud)) { | ||
91 | if (!pud_present(*pud_k)) | ||
92 | goto bad_area_nosemaphore; | ||
93 | set_pud(pud, *pud_k); | ||
94 | return; | 148 | return; |
95 | } | ||
96 | 149 | ||
97 | pmd = pmd_offset(pud, address); | 150 | goto bad_area_nosemaphore; |
98 | pmd_k = pmd_offset(pud_k, address); | ||
99 | if (pmd_present(*pmd) || !pmd_present(*pmd_k)) | ||
100 | goto bad_area_nosemaphore; | ||
101 | set_pmd(pmd, *pmd_k); | ||
102 | |||
103 | return; | ||
104 | } | 151 | } |
105 | 152 | ||
106 | mm = tsk->mm; | 153 | if (unlikely(notify_page_fault(regs, vec))) |
107 | |||
108 | if (unlikely(notify_page_fault(regs, lookup_exception_vector()))) | ||
109 | return; | 154 | return; |
110 | 155 | ||
111 | /* Only enable interrupts if they were on before the fault */ | 156 | /* Only enable interrupts if they were on before the fault */ |
112 | if ((regs->sr & SR_IMASK) != SR_IMASK) | 157 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
113 | local_irq_enable(); | 158 | local_irq_enable(); |
114 | 159 | ||
115 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 160 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
116 | 161 | ||
117 | /* | 162 | /* |
118 | * If we're in an interrupt or have no user | 163 | * If we're in an interrupt, have no user context or are running |
119 | * context, we must not take the fault.. | 164 | * in an atomic region then we must not take the fault: |
120 | */ | 165 | */ |
121 | if (in_atomic() || !mm) | 166 | if (in_atomic() || !mm) |
122 | goto no_context; | 167 | goto no_context; |
@@ -132,10 +177,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
132 | goto bad_area; | 177 | goto bad_area; |
133 | if (expand_stack(vma, address)) | 178 | if (expand_stack(vma, address)) |
134 | goto bad_area; | 179 | goto bad_area; |
135 | /* | 180 | |
136 | * Ok, we have a good vm_area for this memory access, so | 181 | /* |
137 | * we can handle it.. | 182 | * Ok, we have a good vm_area for this memory access, so |
138 | */ | 183 | * we can handle it.. |
184 | */ | ||
139 | good_area: | 185 | good_area: |
140 | si_code = SEGV_ACCERR; | 186 | si_code = SEGV_ACCERR; |
141 | if (writeaccess) { | 187 | if (writeaccess) { |
@@ -162,21 +208,21 @@ survive: | |||
162 | } | 208 | } |
163 | if (fault & VM_FAULT_MAJOR) { | 209 | if (fault & VM_FAULT_MAJOR) { |
164 | tsk->maj_flt++; | 210 | tsk->maj_flt++; |
165 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 211 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
166 | regs, address); | 212 | regs, address); |
167 | } else { | 213 | } else { |
168 | tsk->min_flt++; | 214 | tsk->min_flt++; |
169 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 215 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, |
170 | regs, address); | 216 | regs, address); |
171 | } | 217 | } |
172 | 218 | ||
173 | up_read(&mm->mmap_sem); | 219 | up_read(&mm->mmap_sem); |
174 | return; | 220 | return; |
175 | 221 | ||
176 | /* | 222 | /* |
177 | * Something tried to access memory that isn't in our memory map.. | 223 | * Something tried to access memory that isn't in our memory map.. |
178 | * Fix it, but check if it's kernel or user first.. | 224 | * Fix it, but check if it's kernel or user first.. |
179 | */ | 225 | */ |
180 | bad_area: | 226 | bad_area: |
181 | up_read(&mm->mmap_sem); | 227 | up_read(&mm->mmap_sem); |
182 | 228 | ||
@@ -272,16 +318,15 @@ do_sigbus: | |||
272 | /* | 318 | /* |
273 | * Called with interrupts disabled. | 319 | * Called with interrupts disabled. |
274 | */ | 320 | */ |
275 | asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | 321 | asmlinkage int __kprobes |
276 | unsigned long writeaccess, | 322 | handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, |
277 | unsigned long address) | 323 | unsigned long address) |
278 | { | 324 | { |
279 | pgd_t *pgd; | 325 | pgd_t *pgd; |
280 | pud_t *pud; | 326 | pud_t *pud; |
281 | pmd_t *pmd; | 327 | pmd_t *pmd; |
282 | pte_t *pte; | 328 | pte_t *pte; |
283 | pte_t entry; | 329 | pte_t entry; |
284 | int ret = 1; | ||
285 | 330 | ||
286 | /* | 331 | /* |
287 | * We don't take page faults for P1, P2, and parts of P4, these | 332 | * We don't take page faults for P1, P2, and parts of P4, these |
@@ -292,40 +337,41 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | |||
292 | pgd = pgd_offset_k(address); | 337 | pgd = pgd_offset_k(address); |
293 | } else { | 338 | } else { |
294 | if (unlikely(address >= TASK_SIZE || !current->mm)) | 339 | if (unlikely(address >= TASK_SIZE || !current->mm)) |
295 | goto out; | 340 | return 1; |
296 | 341 | ||
297 | pgd = pgd_offset(current->mm, address); | 342 | pgd = pgd_offset(current->mm, address); |
298 | } | 343 | } |
299 | 344 | ||
300 | pud = pud_offset(pgd, address); | 345 | pud = pud_offset(pgd, address); |
301 | if (pud_none_or_clear_bad(pud)) | 346 | if (pud_none_or_clear_bad(pud)) |
302 | goto out; | 347 | return 1; |
303 | pmd = pmd_offset(pud, address); | 348 | pmd = pmd_offset(pud, address); |
304 | if (pmd_none_or_clear_bad(pmd)) | 349 | if (pmd_none_or_clear_bad(pmd)) |
305 | goto out; | 350 | return 1; |
306 | pte = pte_offset_kernel(pmd, address); | 351 | pte = pte_offset_kernel(pmd, address); |
307 | entry = *pte; | 352 | entry = *pte; |
308 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | 353 | if (unlikely(pte_none(entry) || pte_not_present(entry))) |
309 | goto out; | 354 | return 1; |
310 | if (unlikely(writeaccess && !pte_write(entry))) | 355 | if (unlikely(writeaccess && !pte_write(entry))) |
311 | goto out; | 356 | return 1; |
312 | 357 | ||
313 | if (writeaccess) | 358 | if (writeaccess) |
314 | entry = pte_mkdirty(entry); | 359 | entry = pte_mkdirty(entry); |
315 | entry = pte_mkyoung(entry); | 360 | entry = pte_mkyoung(entry); |
316 | 361 | ||
362 | set_pte(pte, entry); | ||
363 | |||
317 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) | 364 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) |
318 | /* | 365 | /* |
319 | * ITLB is not affected by "ldtlb" instruction. | 366 | * SH-4 does not set MMUCR.RC to the corresponding TLB entry in |
320 | * So, we need to flush the entry by ourselves. | 367 | * the case of an initial page write exception, so we need to |
368 | * flush it in order to avoid potential TLB entry duplication. | ||
321 | */ | 369 | */ |
322 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | 370 | if (writeaccess == 2) |
371 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | ||
323 | #endif | 372 | #endif |
324 | 373 | ||
325 | set_pte(pte, entry); | ||
326 | update_mmu_cache(NULL, address, entry); | 374 | update_mmu_cache(NULL, address, entry); |
327 | 375 | ||
328 | ret = 0; | 376 | return 0; |
329 | out: | ||
330 | return ret; | ||
331 | } | 377 | } |
diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c index bd63b961b2a9..2b356cec2489 100644 --- a/arch/sh/mm/fault_64.c +++ b/arch/sh/mm/fault_64.c | |||
@@ -56,16 +56,7 @@ inline void __do_tlb_refill(unsigned long address, | |||
56 | /* | 56 | /* |
57 | * Set PTEH register | 57 | * Set PTEH register |
58 | */ | 58 | */ |
59 | pteh = address & MMU_VPN_MASK; | 59 | pteh = neff_sign_extend(address & MMU_VPN_MASK); |
60 | |||
61 | /* Sign extend based on neff. */ | ||
62 | #if (NEFF == 32) | ||
63 | /* Faster sign extension */ | ||
64 | pteh = (unsigned long long)(signed long long)(signed long)pteh; | ||
65 | #else | ||
66 | /* General case */ | ||
67 | pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; | ||
68 | #endif | ||
69 | 60 | ||
70 | /* Set the ASID. */ | 61 | /* Set the ASID. */ |
71 | pteh |= get_asid() << PTEH_ASID_SHIFT; | 62 | pteh |= get_asid() << PTEH_ASID_SHIFT; |
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c new file mode 100644 index 000000000000..cef402678f42 --- /dev/null +++ b/arch/sh/mm/flush-sh4.c | |||
@@ -0,0 +1,108 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <asm/mmu_context.h> | ||
3 | #include <asm/cacheflush.h> | ||
4 | |||
5 | /* | ||
6 | * Write back the dirty D-caches, but not invalidate them. | ||
7 | * | ||
8 | * START: Virtual Address (U0, P1, or P3) | ||
9 | * SIZE: Size of the region. | ||
10 | */ | ||
11 | static void sh4__flush_wback_region(void *start, int size) | ||
12 | { | ||
13 | reg_size_t aligned_start, v, cnt, end; | ||
14 | |||
15 | aligned_start = register_align(start); | ||
16 | v = aligned_start & ~(L1_CACHE_BYTES-1); | ||
17 | end = (aligned_start + size + L1_CACHE_BYTES-1) | ||
18 | & ~(L1_CACHE_BYTES-1); | ||
19 | cnt = (end - v) / L1_CACHE_BYTES; | ||
20 | |||
21 | while (cnt >= 8) { | ||
22 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
23 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
24 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
25 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
26 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
27 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
28 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
29 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
30 | cnt -= 8; | ||
31 | } | ||
32 | |||
33 | while (cnt) { | ||
34 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
35 | cnt--; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Write back the dirty D-caches and invalidate them. | ||
41 | * | ||
42 | * START: Virtual Address (U0, P1, or P3) | ||
43 | * SIZE: Size of the region. | ||
44 | */ | ||
45 | static void sh4__flush_purge_region(void *start, int size) | ||
46 | { | ||
47 | reg_size_t aligned_start, v, cnt, end; | ||
48 | |||
49 | aligned_start = register_align(start); | ||
50 | v = aligned_start & ~(L1_CACHE_BYTES-1); | ||
51 | end = (aligned_start + size + L1_CACHE_BYTES-1) | ||
52 | & ~(L1_CACHE_BYTES-1); | ||
53 | cnt = (end - v) / L1_CACHE_BYTES; | ||
54 | |||
55 | while (cnt >= 8) { | ||
56 | __ocbp(v); v += L1_CACHE_BYTES; | ||
57 | __ocbp(v); v += L1_CACHE_BYTES; | ||
58 | __ocbp(v); v += L1_CACHE_BYTES; | ||
59 | __ocbp(v); v += L1_CACHE_BYTES; | ||
60 | __ocbp(v); v += L1_CACHE_BYTES; | ||
61 | __ocbp(v); v += L1_CACHE_BYTES; | ||
62 | __ocbp(v); v += L1_CACHE_BYTES; | ||
63 | __ocbp(v); v += L1_CACHE_BYTES; | ||
64 | cnt -= 8; | ||
65 | } | ||
66 | while (cnt) { | ||
67 | __ocbp(v); v += L1_CACHE_BYTES; | ||
68 | cnt--; | ||
69 | } | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * No write back please | ||
74 | */ | ||
75 | static void sh4__flush_invalidate_region(void *start, int size) | ||
76 | { | ||
77 | reg_size_t aligned_start, v, cnt, end; | ||
78 | |||
79 | aligned_start = register_align(start); | ||
80 | v = aligned_start & ~(L1_CACHE_BYTES-1); | ||
81 | end = (aligned_start + size + L1_CACHE_BYTES-1) | ||
82 | & ~(L1_CACHE_BYTES-1); | ||
83 | cnt = (end - v) / L1_CACHE_BYTES; | ||
84 | |||
85 | while (cnt >= 8) { | ||
86 | __ocbi(v); v += L1_CACHE_BYTES; | ||
87 | __ocbi(v); v += L1_CACHE_BYTES; | ||
88 | __ocbi(v); v += L1_CACHE_BYTES; | ||
89 | __ocbi(v); v += L1_CACHE_BYTES; | ||
90 | __ocbi(v); v += L1_CACHE_BYTES; | ||
91 | __ocbi(v); v += L1_CACHE_BYTES; | ||
92 | __ocbi(v); v += L1_CACHE_BYTES; | ||
93 | __ocbi(v); v += L1_CACHE_BYTES; | ||
94 | cnt -= 8; | ||
95 | } | ||
96 | |||
97 | while (cnt) { | ||
98 | __ocbi(v); v += L1_CACHE_BYTES; | ||
99 | cnt--; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | void __init sh4__flush_region_init(void) | ||
104 | { | ||
105 | __flush_wback_region = sh4__flush_wback_region; | ||
106 | __flush_invalidate_region = sh4__flush_invalidate_region; | ||
107 | __flush_purge_region = sh4__flush_purge_region; | ||
108 | } | ||
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index fe532aeaa16d..8173e38afd38 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -106,27 +106,31 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
106 | pgd_t *pgd; | 106 | pgd_t *pgd; |
107 | pud_t *pud; | 107 | pud_t *pud; |
108 | pmd_t *pmd; | 108 | pmd_t *pmd; |
109 | int pgd_idx; | 109 | pte_t *pte; |
110 | int i, j, k; | ||
110 | unsigned long vaddr; | 111 | unsigned long vaddr; |
111 | 112 | ||
112 | vaddr = start & PMD_MASK; | 113 | vaddr = start; |
113 | end = (end + PMD_SIZE - 1) & PMD_MASK; | 114 | i = __pgd_offset(vaddr); |
114 | pgd_idx = pgd_index(vaddr); | 115 | j = __pud_offset(vaddr); |
115 | pgd = pgd_base + pgd_idx; | 116 | k = __pmd_offset(vaddr); |
116 | 117 | pgd = pgd_base + i; | |
117 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | 118 | |
118 | BUG_ON(pgd_none(*pgd)); | 119 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
119 | pud = pud_offset(pgd, 0); | 120 | pud = (pud_t *)pgd; |
120 | BUG_ON(pud_none(*pud)); | 121 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
121 | pmd = pmd_offset(pud, 0); | 122 | pmd = (pmd_t *)pud; |
122 | 123 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | |
123 | if (!pmd_present(*pmd)) { | 124 | if (pmd_none(*pmd)) { |
124 | pte_t *pte_table; | 125 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
125 | pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | 126 | pmd_populate_kernel(&init_mm, pmd, pte); |
126 | pmd_populate_kernel(&init_mm, pmd, pte_table); | 127 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); |
128 | } | ||
129 | vaddr += PMD_SIZE; | ||
130 | } | ||
131 | k = 0; | ||
127 | } | 132 | } |
128 | 133 | j = 0; | |
129 | vaddr += PMD_SIZE; | ||
130 | } | 134 | } |
131 | } | 135 | } |
132 | #endif /* CONFIG_MMU */ | 136 | #endif /* CONFIG_MMU */ |
@@ -137,7 +141,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
137 | void __init paging_init(void) | 141 | void __init paging_init(void) |
138 | { | 142 | { |
139 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 143 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
140 | unsigned long vaddr; | 144 | unsigned long vaddr, end; |
141 | int nid; | 145 | int nid; |
142 | 146 | ||
143 | /* We don't need to map the kernel through the TLB, as | 147 | /* We don't need to map the kernel through the TLB, as |
@@ -155,7 +159,8 @@ void __init paging_init(void) | |||
155 | * pte's will be filled in by __set_fixmap(). | 159 | * pte's will be filled in by __set_fixmap(). |
156 | */ | 160 | */ |
157 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 161 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
158 | page_table_range_init(vaddr, 0, swapper_pg_dir); | 162 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
163 | page_table_range_init(vaddr, end, swapper_pg_dir); | ||
159 | 164 | ||
160 | kmap_coherent_init(); | 165 | kmap_coherent_init(); |
161 | 166 | ||
@@ -181,8 +186,6 @@ void __init paging_init(void) | |||
181 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); | 186 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); |
182 | } | 187 | } |
183 | 188 | ||
184 | static struct kcore_list kcore_mem, kcore_vmalloc; | ||
185 | |||
186 | void __init mem_init(void) | 189 | void __init mem_init(void) |
187 | { | 190 | { |
188 | int codesize, datasize, initsize; | 191 | int codesize, datasize, initsize; |
@@ -210,6 +213,9 @@ void __init mem_init(void) | |||
210 | high_memory = node_high_memory; | 213 | high_memory = node_high_memory; |
211 | } | 214 | } |
212 | 215 | ||
216 | /* Set this up early, so we can take care of the zero page */ | ||
217 | cpu_cache_init(); | ||
218 | |||
213 | /* clear the zero-page */ | 219 | /* clear the zero-page */ |
214 | memset(empty_zero_page, 0, PAGE_SIZE); | 220 | memset(empty_zero_page, 0, PAGE_SIZE); |
215 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 221 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
@@ -218,20 +224,14 @@ void __init mem_init(void) | |||
218 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 224 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
219 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 225 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
220 | 226 | ||
221 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | ||
222 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | ||
223 | VMALLOC_END - VMALLOC_START); | ||
224 | |||
225 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | 227 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
226 | "%dk data, %dk init)\n", | 228 | "%dk data, %dk init)\n", |
227 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 229 | nr_free_pages() << (PAGE_SHIFT-10), |
228 | num_physpages << (PAGE_SHIFT-10), | 230 | num_physpages << (PAGE_SHIFT-10), |
229 | codesize >> 10, | 231 | codesize >> 10, |
230 | datasize >> 10, | 232 | datasize >> 10, |
231 | initsize >> 10); | 233 | initsize >> 10); |
232 | 234 | ||
233 | p3_cache_init(); | ||
234 | |||
235 | /* Initialize the vDSO */ | 235 | /* Initialize the vDSO */ |
236 | vsyscall_init(); | 236 | vsyscall_init(); |
237 | } | 237 | } |
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index da2f4186f2cd..c3250614e3ae 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c | |||
@@ -57,14 +57,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
57 | if (is_pci_memory_fixed_range(phys_addr, size)) | 57 | if (is_pci_memory_fixed_range(phys_addr, size)) |
58 | return (void __iomem *)phys_addr; | 58 | return (void __iomem *)phys_addr; |
59 | 59 | ||
60 | #if !defined(CONFIG_PMB_FIXED) | ||
61 | /* | ||
62 | * Don't allow anybody to remap normal RAM that we're using.. | ||
63 | */ | ||
64 | if (phys_addr < virt_to_phys(high_memory)) | ||
65 | return NULL; | ||
66 | #endif | ||
67 | |||
68 | /* | 60 | /* |
69 | * Mappings have to be page-aligned | 61 | * Mappings have to be page-aligned |
70 | */ | 62 | */ |
diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c index 828c8597219d..b16843d02b76 100644 --- a/arch/sh/mm/ioremap_64.c +++ b/arch/sh/mm/ioremap_64.c | |||
@@ -94,7 +94,6 @@ static struct resource *shmedia_find_resource(struct resource *root, | |||
94 | static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size, | 94 | static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size, |
95 | const char *name, unsigned long flags) | 95 | const char *name, unsigned long flags) |
96 | { | 96 | { |
97 | static int printed_full; | ||
98 | struct xresource *xres; | 97 | struct xresource *xres; |
99 | struct resource *res; | 98 | struct resource *res; |
100 | char *tack; | 99 | char *tack; |
@@ -108,11 +107,8 @@ static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size, | |||
108 | tack = xres->xname; | 107 | tack = xres->xname; |
109 | res = &xres->xres; | 108 | res = &xres->xres; |
110 | } else { | 109 | } else { |
111 | if (!printed_full) { | 110 | printk_once(KERN_NOTICE "%s: done with statics, " |
112 | printk(KERN_NOTICE "%s: done with statics, " | ||
113 | "switching to kmalloc\n", __func__); | 111 | "switching to kmalloc\n", __func__); |
114 | printed_full = 1; | ||
115 | } | ||
116 | tlen = strlen(name); | 112 | tlen = strlen(name); |
117 | tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL); | 113 | tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL); |
118 | if (!tack) | 114 | if (!tack) |
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c new file mode 100644 index 000000000000..16e01b5fed04 --- /dev/null +++ b/arch/sh/mm/kmap.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/kmap.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2002 - 2009 Paul Mundt | ||
6 | * | ||
7 | * Released under the terms of the GNU GPL v2.0. | ||
8 | */ | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/highmem.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | |||
18 | #define kmap_get_fixmap_pte(vaddr) \ | ||
19 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | ||
20 | |||
21 | static pte_t *kmap_coherent_pte; | ||
22 | |||
23 | void __init kmap_coherent_init(void) | ||
24 | { | ||
25 | unsigned long vaddr; | ||
26 | |||
27 | /* cache the first coherent kmap pte */ | ||
28 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||
29 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||
30 | } | ||
31 | |||
32 | void *kmap_coherent(struct page *page, unsigned long addr) | ||
33 | { | ||
34 | enum fixed_addresses idx; | ||
35 | unsigned long vaddr; | ||
36 | |||
37 | BUG_ON(test_bit(PG_dcache_dirty, &page->flags)); | ||
38 | |||
39 | pagefault_disable(); | ||
40 | |||
41 | idx = FIX_CMAP_END - | ||
42 | ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT); | ||
43 | vaddr = __fix_to_virt(idx); | ||
44 | |||
45 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); | ||
46 | set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL)); | ||
47 | |||
48 | return (void *)vaddr; | ||
49 | } | ||
50 | |||
51 | void kunmap_coherent(void *kvaddr) | ||
52 | { | ||
53 | if (kvaddr >= (void *)FIXADDR_START) { | ||
54 | unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; | ||
55 | enum fixed_addresses idx = __virt_to_fix(vaddr); | ||
56 | |||
57 | /* XXX.. Kill this later, here for sanity at the moment.. */ | ||
58 | __flush_purge_region((void *)vaddr, PAGE_SIZE); | ||
59 | |||
60 | pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); | ||
61 | local_flush_tlb_one(get_asid(), vaddr); | ||
62 | } | ||
63 | |||
64 | pagefault_enable(); | ||
65 | } | ||
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 1b5fdfb4e0c2..d2984fa42d3d 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c | |||
@@ -14,10 +14,10 @@ | |||
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | 16 | ||
17 | #ifdef CONFIG_MMU | ||
18 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | 17 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ |
19 | EXPORT_SYMBOL(shm_align_mask); | 18 | EXPORT_SYMBOL(shm_align_mask); |
20 | 19 | ||
20 | #ifdef CONFIG_MMU | ||
21 | /* | 21 | /* |
22 | * To avoid cache aliases, we map the shared page with same color. | 22 | * To avoid cache aliases, we map the shared page with same color. |
23 | */ | 23 | */ |
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/nommu.c index 71c742b5aee3..ac16c05917ef 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/nommu.c | |||
@@ -1,20 +1,41 @@ | |||
1 | /* | 1 | /* |
2 | * arch/sh/mm/tlb-nommu.c | 2 | * arch/sh/mm/nommu.c |
3 | * | 3 | * |
4 | * TLB Operations for MMUless SH. | 4 | * Various helper routines and stubs for MMUless SH. |
5 | * | 5 | * |
6 | * Copyright (C) 2002 Paul Mundt | 6 | * Copyright (C) 2002 - 2009 Paul Mundt |
7 | * | 7 | * |
8 | * Released under the terms of the GNU GPL v2.0. | 8 | * Released under the terms of the GNU GPL v2.0. |
9 | */ | 9 | */ |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | ||
12 | #include <linux/string.h> | ||
11 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
12 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
13 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #include <asm/page.h> | ||
17 | #include <asm/uaccess.h> | ||
14 | 18 | ||
15 | /* | 19 | /* |
16 | * Nothing too terribly exciting here .. | 20 | * Nothing too terribly exciting here .. |
17 | */ | 21 | */ |
22 | void copy_page(void *to, void *from) | ||
23 | { | ||
24 | memcpy(to, from, PAGE_SIZE); | ||
25 | } | ||
26 | |||
27 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) | ||
28 | { | ||
29 | memcpy(to, from, n); | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | __kernel_size_t __clear_user(void *to, __kernel_size_t n) | ||
34 | { | ||
35 | memset(to, 0, n); | ||
36 | return 0; | ||
37 | } | ||
38 | |||
18 | void local_flush_tlb_all(void) | 39 | void local_flush_tlb_all(void) |
19 | { | 40 | { |
20 | BUG(); | 41 | BUG(); |
@@ -46,8 +67,21 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
46 | BUG(); | 67 | BUG(); |
47 | } | 68 | } |
48 | 69 | ||
49 | void update_mmu_cache(struct vm_area_struct * vma, | 70 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
50 | unsigned long address, pte_t pte) | 71 | { |
72 | } | ||
73 | |||
74 | void __init kmap_coherent_init(void) | ||
75 | { | ||
76 | } | ||
77 | |||
78 | void *kmap_coherent(struct page *page, unsigned long addr) | ||
79 | { | ||
80 | BUG(); | ||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | void kunmap_coherent(void *kvaddr) | ||
51 | { | 85 | { |
52 | BUG(); | 86 | BUG(); |
53 | } | 87 | } |
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 095d93bec7cd..9b784fdb947c 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
12 | #include <linux/lmb.h> | ||
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
13 | #include <linux/numa.h> | 14 | #include <linux/numa.h> |
14 | #include <linux/pfn.h> | 15 | #include <linux/pfn.h> |
@@ -26,6 +27,15 @@ EXPORT_SYMBOL_GPL(node_data); | |||
26 | void __init setup_memory(void) | 27 | void __init setup_memory(void) |
27 | { | 28 | { |
28 | unsigned long free_pfn = PFN_UP(__pa(_end)); | 29 | unsigned long free_pfn = PFN_UP(__pa(_end)); |
30 | u64 base = min_low_pfn << PAGE_SHIFT; | ||
31 | u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn; | ||
32 | |||
33 | lmb_add(base, size); | ||
34 | |||
35 | /* Reserve the LMB regions used by the kernel, initrd, etc.. */ | ||
36 | lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, | ||
37 | (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) - | ||
38 | (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); | ||
29 | 39 | ||
30 | /* | 40 | /* |
31 | * Node 0 sets up its pgdat at the first available pfn, | 41 | * Node 0 sets up its pgdat at the first available pfn, |
@@ -45,24 +55,23 @@ void __init setup_memory(void) | |||
45 | 55 | ||
46 | void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | 56 | void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) |
47 | { | 57 | { |
48 | unsigned long bootmap_pages, bootmap_start, bootmap_size; | 58 | unsigned long bootmap_pages; |
49 | unsigned long start_pfn, free_pfn, end_pfn; | 59 | unsigned long start_pfn, end_pfn; |
60 | unsigned long bootmem_paddr; | ||
50 | 61 | ||
51 | /* Don't allow bogus node assignment */ | 62 | /* Don't allow bogus node assignment */ |
52 | BUG_ON(nid > MAX_NUMNODES || nid == 0); | 63 | BUG_ON(nid > MAX_NUMNODES || nid == 0); |
53 | 64 | ||
54 | /* | 65 | start_pfn = start >> PAGE_SHIFT; |
55 | * The free pfn starts at the beginning of the range, and is | ||
56 | * advanced as necessary for pgdat and node map allocations. | ||
57 | */ | ||
58 | free_pfn = start_pfn = start >> PAGE_SHIFT; | ||
59 | end_pfn = end >> PAGE_SHIFT; | 66 | end_pfn = end >> PAGE_SHIFT; |
60 | 67 | ||
68 | lmb_add(start, end - start); | ||
69 | |||
61 | __add_active_range(nid, start_pfn, end_pfn); | 70 | __add_active_range(nid, start_pfn, end_pfn); |
62 | 71 | ||
63 | /* Node-local pgdat */ | 72 | /* Node-local pgdat */ |
64 | NODE_DATA(nid) = pfn_to_kaddr(free_pfn); | 73 | NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), |
65 | free_pfn += PFN_UP(sizeof(struct pglist_data)); | 74 | SMP_CACHE_BYTES, end_pfn)); |
66 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 75 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
67 | 76 | ||
68 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | 77 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; |
@@ -71,16 +80,17 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | |||
71 | 80 | ||
72 | /* Node-local bootmap */ | 81 | /* Node-local bootmap */ |
73 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | 82 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
74 | bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn); | 83 | bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, |
75 | bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn, | 84 | PAGE_SIZE, end_pfn); |
76 | end_pfn); | 85 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, |
86 | start_pfn, end_pfn); | ||
77 | 87 | ||
78 | free_bootmem_with_active_regions(nid, end_pfn); | 88 | free_bootmem_with_active_regions(nid, end_pfn); |
79 | 89 | ||
80 | /* Reserve the pgdat and bootmap space with the bootmem allocator */ | 90 | /* Reserve the pgdat and bootmap space with the bootmem allocator */ |
81 | reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, | 91 | reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, |
82 | sizeof(struct pglist_data), BOOTMEM_DEFAULT); | 92 | sizeof(struct pglist_data), BOOTMEM_DEFAULT); |
83 | reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT, | 93 | reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr, |
84 | bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); | 94 | bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); |
85 | 95 | ||
86 | /* It's up */ | 96 | /* It's up */ |
diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c deleted file mode 100644 index 91ed4e695ff7..000000000000 --- a/arch/sh/mm/pg-nommu.c +++ /dev/null | |||
@@ -1,38 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-nommu.c | ||
3 | * | ||
4 | * clear_page()/copy_page() implementation for MMUless SH. | ||
5 | * | ||
6 | * Copyright (C) 2003 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | |||
18 | void copy_page(void *to, void *from) | ||
19 | { | ||
20 | memcpy(to, from, PAGE_SIZE); | ||
21 | } | ||
22 | |||
23 | void clear_page(void *to) | ||
24 | { | ||
25 | memset(to, 0, PAGE_SIZE); | ||
26 | } | ||
27 | |||
28 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) | ||
29 | { | ||
30 | memcpy(to, from, n); | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | __kernel_size_t __clear_user(void *to, __kernel_size_t n) | ||
35 | { | ||
36 | memset(to, 0, n); | ||
37 | return 0; | ||
38 | } | ||
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c deleted file mode 100644 index 2fe14da1f839..000000000000 --- a/arch/sh/mm/pg-sh4.c +++ /dev/null | |||
@@ -1,146 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-sh4.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2002 - 2007 Paul Mundt | ||
6 | * | ||
7 | * Released under the terms of the GNU GPL v2.0. | ||
8 | */ | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/highmem.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | |||
18 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) | ||
19 | |||
20 | #define kmap_get_fixmap_pte(vaddr) \ | ||
21 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | ||
22 | |||
23 | static pte_t *kmap_coherent_pte; | ||
24 | |||
25 | void __init kmap_coherent_init(void) | ||
26 | { | ||
27 | unsigned long vaddr; | ||
28 | |||
29 | /* cache the first coherent kmap pte */ | ||
30 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||
31 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||
32 | } | ||
33 | |||
34 | static inline void *kmap_coherent(struct page *page, unsigned long addr) | ||
35 | { | ||
36 | enum fixed_addresses idx; | ||
37 | unsigned long vaddr, flags; | ||
38 | pte_t pte; | ||
39 | |||
40 | inc_preempt_count(); | ||
41 | |||
42 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | ||
43 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | ||
44 | pte = mk_pte(page, PAGE_KERNEL); | ||
45 | |||
46 | local_irq_save(flags); | ||
47 | flush_tlb_one(get_asid(), vaddr); | ||
48 | local_irq_restore(flags); | ||
49 | |||
50 | update_mmu_cache(NULL, vaddr, pte); | ||
51 | |||
52 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||
53 | |||
54 | return (void *)vaddr; | ||
55 | } | ||
56 | |||
57 | static inline void kunmap_coherent(struct page *page) | ||
58 | { | ||
59 | dec_preempt_count(); | ||
60 | preempt_check_resched(); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * clear_user_page | ||
65 | * @to: P1 address | ||
66 | * @address: U0 address to be mapped | ||
67 | * @page: page (virt_to_page(to)) | ||
68 | */ | ||
69 | void clear_user_page(void *to, unsigned long address, struct page *page) | ||
70 | { | ||
71 | __set_bit(PG_mapped, &page->flags); | ||
72 | |||
73 | clear_page(to); | ||
74 | if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) | ||
75 | __flush_wback_region(to, PAGE_SIZE); | ||
76 | } | ||
77 | |||
78 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
79 | unsigned long vaddr, void *dst, const void *src, | ||
80 | unsigned long len) | ||
81 | { | ||
82 | void *vto; | ||
83 | |||
84 | __set_bit(PG_mapped, &page->flags); | ||
85 | |||
86 | vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
87 | memcpy(vto, src, len); | ||
88 | kunmap_coherent(vto); | ||
89 | |||
90 | if (vma->vm_flags & VM_EXEC) | ||
91 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | ||
92 | } | ||
93 | |||
94 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
95 | unsigned long vaddr, void *dst, const void *src, | ||
96 | unsigned long len) | ||
97 | { | ||
98 | void *vfrom; | ||
99 | |||
100 | __set_bit(PG_mapped, &page->flags); | ||
101 | |||
102 | vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
103 | memcpy(dst, vfrom, len); | ||
104 | kunmap_coherent(vfrom); | ||
105 | } | ||
106 | |||
107 | void copy_user_highpage(struct page *to, struct page *from, | ||
108 | unsigned long vaddr, struct vm_area_struct *vma) | ||
109 | { | ||
110 | void *vfrom, *vto; | ||
111 | |||
112 | __set_bit(PG_mapped, &to->flags); | ||
113 | |||
114 | vto = kmap_atomic(to, KM_USER1); | ||
115 | vfrom = kmap_coherent(from, vaddr); | ||
116 | copy_page(vto, vfrom); | ||
117 | kunmap_coherent(vfrom); | ||
118 | |||
119 | if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) | ||
120 | __flush_wback_region(vto, PAGE_SIZE); | ||
121 | |||
122 | kunmap_atomic(vto, KM_USER1); | ||
123 | /* Make sure this page is cleared on other CPU's too before using it */ | ||
124 | smp_wmb(); | ||
125 | } | ||
126 | EXPORT_SYMBOL(copy_user_highpage); | ||
127 | |||
128 | /* | ||
129 | * For SH-4, we have our own implementation for ptep_get_and_clear | ||
130 | */ | ||
131 | pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
132 | { | ||
133 | pte_t pte = *ptep; | ||
134 | |||
135 | pte_clear(mm, addr, ptep); | ||
136 | if (!pte_not_present(pte)) { | ||
137 | unsigned long pfn = pte_pfn(pte); | ||
138 | if (pfn_valid(pfn)) { | ||
139 | struct page *page = pfn_to_page(pfn); | ||
140 | struct address_space *mapping = page_mapping(page); | ||
141 | if (!mapping || !mapping_writably_mapped(mapping)) | ||
142 | __clear_bit(PG_mapped, &page->flags); | ||
143 | } | ||
144 | } | ||
145 | return pte; | ||
146 | } | ||
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c deleted file mode 100644 index eaf25147194c..000000000000 --- a/arch/sh/mm/pg-sh7705.c +++ /dev/null | |||
@@ -1,138 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-sh7705.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | ||
5 | * Copyright (C) 2004 Alex Song | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/mman.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/threads.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <asm/addrspace.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/processor.h> | ||
22 | #include <asm/cache.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | |||
29 | static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) | ||
30 | { | ||
31 | unsigned long v; | ||
32 | unsigned long begin, end; | ||
33 | unsigned long p1_begin; | ||
34 | |||
35 | |||
36 | begin = L1_CACHE_ALIGN((unsigned long)virt); | ||
37 | end = L1_CACHE_ALIGN((unsigned long)virt + size); | ||
38 | |||
39 | p1_begin = (unsigned long)p1 & ~(L1_CACHE_BYTES - 1); | ||
40 | |||
41 | /* do this the slow way as we may not have TLB entries | ||
42 | * for virt yet. */ | ||
43 | for (v = begin; v < end; v += L1_CACHE_BYTES) { | ||
44 | unsigned long p; | ||
45 | unsigned long ways, addr; | ||
46 | |||
47 | p = __pa(p1_begin); | ||
48 | |||
49 | ways = current_cpu_data.dcache.ways; | ||
50 | addr = CACHE_OC_ADDRESS_ARRAY; | ||
51 | |||
52 | do { | ||
53 | unsigned long data; | ||
54 | |||
55 | addr |= (v & current_cpu_data.dcache.entry_mask); | ||
56 | |||
57 | data = ctrl_inl(addr); | ||
58 | if ((data & CACHE_PHYSADDR_MASK) == | ||
59 | (p & CACHE_PHYSADDR_MASK)) { | ||
60 | data &= ~(SH_CACHE_UPDATED|SH_CACHE_VALID); | ||
61 | ctrl_outl(data, addr); | ||
62 | } | ||
63 | |||
64 | addr += current_cpu_data.dcache.way_incr; | ||
65 | } while (--ways); | ||
66 | |||
67 | p1_begin += L1_CACHE_BYTES; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * clear_user_page | ||
73 | * @to: P1 address | ||
74 | * @address: U0 address to be mapped | ||
75 | */ | ||
76 | void clear_user_page(void *to, unsigned long address, struct page *pg) | ||
77 | { | ||
78 | struct page *page = virt_to_page(to); | ||
79 | |||
80 | __set_bit(PG_mapped, &page->flags); | ||
81 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { | ||
82 | clear_page(to); | ||
83 | __flush_wback_region(to, PAGE_SIZE); | ||
84 | } else { | ||
85 | __flush_purge_virtual_region(to, | ||
86 | (void *)(address & 0xfffff000), | ||
87 | PAGE_SIZE); | ||
88 | clear_page(to); | ||
89 | __flush_wback_region(to, PAGE_SIZE); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * copy_user_page | ||
95 | * @to: P1 address | ||
96 | * @from: P1 address | ||
97 | * @address: U0 address to be mapped | ||
98 | */ | ||
99 | void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) | ||
100 | { | ||
101 | struct page *page = virt_to_page(to); | ||
102 | |||
103 | |||
104 | __set_bit(PG_mapped, &page->flags); | ||
105 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { | ||
106 | copy_page(to, from); | ||
107 | __flush_wback_region(to, PAGE_SIZE); | ||
108 | } else { | ||
109 | __flush_purge_virtual_region(to, | ||
110 | (void *)(address & 0xfffff000), | ||
111 | PAGE_SIZE); | ||
112 | copy_page(to, from); | ||
113 | __flush_wback_region(to, PAGE_SIZE); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * For SH7705, we have our own implementation for ptep_get_and_clear | ||
119 | * Copied from pg-sh4.c | ||
120 | */ | ||
121 | pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
122 | { | ||
123 | pte_t pte = *ptep; | ||
124 | |||
125 | pte_clear(mm, addr, ptep); | ||
126 | if (!pte_not_present(pte)) { | ||
127 | unsigned long pfn = pte_pfn(pte); | ||
128 | if (pfn_valid(pfn)) { | ||
129 | struct page *page = pfn_to_page(pfn); | ||
130 | struct address_space *mapping = page_mapping(page); | ||
131 | if (!mapping || !mapping_writably_mapped(mapping)) | ||
132 | __clear_bit(PG_mapped, &page->flags); | ||
133 | } | ||
134 | } | ||
135 | |||
136 | return pte; | ||
137 | } | ||
138 | |||
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 2aab3ea934d7..409b7c2b4b9d 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c | |||
@@ -16,34 +16,16 @@ | |||
16 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | 18 | ||
19 | void update_mmu_cache(struct vm_area_struct * vma, | 19 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
20 | unsigned long address, pte_t pte) | ||
21 | { | 20 | { |
22 | unsigned long flags; | 21 | unsigned long flags, pteval, vpn; |
23 | unsigned long pteval; | ||
24 | unsigned long vpn; | ||
25 | 22 | ||
26 | /* Ptrace may call this routine. */ | 23 | /* |
24 | * Handle debugger faulting in for debugee. | ||
25 | */ | ||
27 | if (vma && current->active_mm != vma->vm_mm) | 26 | if (vma && current->active_mm != vma->vm_mm) |
28 | return; | 27 | return; |
29 | 28 | ||
30 | #ifndef CONFIG_CACHE_OFF | ||
31 | { | ||
32 | unsigned long pfn = pte_pfn(pte); | ||
33 | |||
34 | if (pfn_valid(pfn)) { | ||
35 | struct page *page = pfn_to_page(pfn); | ||
36 | |||
37 | if (!test_bit(PG_mapped, &page->flags)) { | ||
38 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
39 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
40 | PAGE_SIZE); | ||
41 | __set_bit(PG_mapped, &page->flags); | ||
42 | } | ||
43 | } | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | local_irq_save(flags); | 29 | local_irq_save(flags); |
48 | 30 | ||
49 | /* Set PTEH register */ | 31 | /* Set PTEH register */ |
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 17cb7c3adf22..ace8e6d2f59d 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c | |||
@@ -27,32 +27,16 @@ | |||
27 | #include <asm/mmu_context.h> | 27 | #include <asm/mmu_context.h> |
28 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
29 | 29 | ||
30 | void update_mmu_cache(struct vm_area_struct * vma, | 30 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
31 | unsigned long address, pte_t pte) | ||
32 | { | 31 | { |
33 | unsigned long flags; | 32 | unsigned long flags, pteval, vpn; |
34 | unsigned long pteval; | ||
35 | unsigned long vpn; | ||
36 | 33 | ||
37 | /* Ptrace may call this routine. */ | 34 | /* |
35 | * Handle debugger faulting in for debugee. | ||
36 | */ | ||
38 | if (vma && current->active_mm != vma->vm_mm) | 37 | if (vma && current->active_mm != vma->vm_mm) |
39 | return; | 38 | return; |
40 | 39 | ||
41 | #if defined(CONFIG_SH7705_CACHE_32KB) | ||
42 | { | ||
43 | struct page *page = pte_page(pte); | ||
44 | unsigned long pfn = pte_pfn(pte); | ||
45 | |||
46 | if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { | ||
47 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
48 | |||
49 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
50 | PAGE_SIZE); | ||
51 | __set_bit(PG_mapped, &page->flags); | ||
52 | } | ||
53 | } | ||
54 | #endif | ||
55 | |||
56 | local_irq_save(flags); | 40 | local_irq_save(flags); |
57 | 41 | ||
58 | /* Set PTEH register */ | 42 | /* Set PTEH register */ |
@@ -93,4 +77,3 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) | |||
93 | for (i = 0; i < ways; i++) | 77 | for (i = 0; i < ways; i++) |
94 | ctrl_outl(data, addr + (i << 8)); | 78 | ctrl_outl(data, addr + (i << 8)); |
95 | } | 79 | } |
96 | |||
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index f0c7b7397fa6..8cf550e2570f 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c | |||
@@ -15,34 +15,16 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | 17 | ||
18 | void update_mmu_cache(struct vm_area_struct * vma, | 18 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
19 | unsigned long address, pte_t pte) | ||
20 | { | 19 | { |
21 | unsigned long flags; | 20 | unsigned long flags, pteval, vpn; |
22 | unsigned long pteval; | ||
23 | unsigned long vpn; | ||
24 | 21 | ||
25 | /* Ptrace may call this routine. */ | 22 | /* |
23 | * Handle debugger faulting in for debugee. | ||
24 | */ | ||
26 | if (vma && current->active_mm != vma->vm_mm) | 25 | if (vma && current->active_mm != vma->vm_mm) |
27 | return; | 26 | return; |
28 | 27 | ||
29 | #ifndef CONFIG_CACHE_OFF | ||
30 | { | ||
31 | unsigned long pfn = pte_pfn(pte); | ||
32 | |||
33 | if (pfn_valid(pfn)) { | ||
34 | struct page *page = pfn_to_page(pfn); | ||
35 | |||
36 | if (!test_bit(PG_mapped, &page->flags)) { | ||
37 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
38 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
39 | PAGE_SIZE); | ||
40 | __set_bit(PG_mapped, &page->flags); | ||
41 | } | ||
42 | } | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | local_irq_save(flags); | 28 | local_irq_save(flags); |
47 | 29 | ||
48 | /* Set PTEH register */ | 30 | /* Set PTEH register */ |
@@ -61,9 +43,12 @@ void update_mmu_cache(struct vm_area_struct * vma, | |||
61 | */ | 43 | */ |
62 | ctrl_outl(pte.pte_high, MMU_PTEA); | 44 | ctrl_outl(pte.pte_high, MMU_PTEA); |
63 | #else | 45 | #else |
64 | if (cpu_data->flags & CPU_HAS_PTEA) | 46 | if (cpu_data->flags & CPU_HAS_PTEA) { |
65 | /* TODO: make this look less hacky */ | 47 | /* The last 3 bits and the first one of pteval contains |
66 | ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); | 48 | * the PTEA timing control and space attribute bits |
49 | */ | ||
50 | ctrl_outl(copy_ptea_attributes(pteval), MMU_PTEA); | ||
51 | } | ||
67 | #endif | 52 | #endif |
68 | 53 | ||
69 | /* Set PTEL register */ | 54 | /* Set PTEL register */ |
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index dae131243bcc..fdb64e41ec50 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c | |||
@@ -117,26 +117,15 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry) | |||
117 | * Load up a virtual<->physical translation for @eaddr<->@paddr in the | 117 | * Load up a virtual<->physical translation for @eaddr<->@paddr in the |
118 | * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). | 118 | * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). |
119 | */ | 119 | */ |
120 | inline void sh64_setup_tlb_slot(unsigned long long config_addr, | 120 | void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, |
121 | unsigned long eaddr, | 121 | unsigned long asid, unsigned long paddr) |
122 | unsigned long asid, | ||
123 | unsigned long paddr) | ||
124 | { | 122 | { |
125 | unsigned long long pteh, ptel; | 123 | unsigned long long pteh, ptel; |
126 | 124 | ||
127 | /* Sign extension */ | 125 | pteh = neff_sign_extend(eaddr); |
128 | #if (NEFF == 32) | ||
129 | pteh = (unsigned long long)(signed long long)(signed long) eaddr; | ||
130 | #else | ||
131 | #error "Can't sign extend more than 32 bits yet" | ||
132 | #endif | ||
133 | pteh &= PAGE_MASK; | 126 | pteh &= PAGE_MASK; |
134 | pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; | 127 | pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; |
135 | #if (NEFF == 32) | 128 | ptel = neff_sign_extend(paddr); |
136 | ptel = (unsigned long long)(signed long long)(signed long) paddr; | ||
137 | #else | ||
138 | #error "Can't sign extend more than 32 bits yet" | ||
139 | #endif | ||
140 | ptel &= PAGE_MASK; | 129 | ptel &= PAGE_MASK; |
141 | ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); | 130 | ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); |
142 | 131 | ||
@@ -152,5 +141,5 @@ inline void sh64_setup_tlb_slot(unsigned long long config_addr, | |||
152 | * | 141 | * |
153 | * Teardown any existing mapping in the TLB slot @config_addr. | 142 | * Teardown any existing mapping in the TLB slot @config_addr. |
154 | */ | 143 | */ |
155 | inline void sh64_teardown_tlb_slot(unsigned long long config_addr) | 144 | void sh64_teardown_tlb_slot(unsigned long long config_addr) |
156 | __attribute__ ((alias("__flush_tlb_slot"))); | 145 | __attribute__ ((alias("__flush_tlb_slot"))); |
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 3ce40ea34824..de0b0e881823 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/mman.h> | 20 | #include <linux/mman.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/perf_counter.h> | 23 | #include <linux/perf_event.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <asm/system.h> | 25 | #include <asm/system.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
116 | /* Not an IO address, so reenable interrupts */ | 116 | /* Not an IO address, so reenable interrupts */ |
117 | local_irq_enable(); | 117 | local_irq_enable(); |
118 | 118 | ||
119 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 119 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * If we're in an interrupt or have no user | 122 | * If we're in an interrupt or have no user |
@@ -201,11 +201,11 @@ survive: | |||
201 | 201 | ||
202 | if (fault & VM_FAULT_MAJOR) { | 202 | if (fault & VM_FAULT_MAJOR) { |
203 | tsk->maj_flt++; | 203 | tsk->maj_flt++; |
204 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 204 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
205 | regs, address); | 205 | regs, address); |
206 | } else { | 206 | } else { |
207 | tsk->min_flt++; | 207 | tsk->min_flt++; |
208 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 208 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, |
209 | regs, address); | 209 | regs, address); |
210 | } | 210 | } |
211 | 211 | ||
@@ -329,22 +329,6 @@ do_sigbus: | |||
329 | goto no_context; | 329 | goto no_context; |
330 | } | 330 | } |
331 | 331 | ||
332 | void update_mmu_cache(struct vm_area_struct * vma, | ||
333 | unsigned long address, pte_t pte) | ||
334 | { | ||
335 | /* | ||
336 | * This appears to get called once for every pte entry that gets | ||
337 | * established => I don't think it's efficient to try refilling the | ||
338 | * TLBs with the pages - some may not get accessed even. Also, for | ||
339 | * executable pages, it is impossible to determine reliably here which | ||
340 | * TLB they should be mapped into (or both even). | ||
341 | * | ||
342 | * So, just do nothing here and handle faults on demand. In the | ||
343 | * TLBMISS handling case, the refill is now done anyway after the pte | ||
344 | * has been fixed up, so that deals with most useful cases. | ||
345 | */ | ||
346 | } | ||
347 | |||
348 | void local_flush_tlb_one(unsigned long asid, unsigned long page) | 332 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
349 | { | 333 | { |
350 | unsigned long long match, pteh=0, lpage; | 334 | unsigned long long match, pteh=0, lpage; |
@@ -353,7 +337,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) | |||
353 | /* | 337 | /* |
354 | * Sign-extend based on neff. | 338 | * Sign-extend based on neff. |
355 | */ | 339 | */ |
356 | lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; | 340 | lpage = neff_sign_extend(page); |
357 | match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; | 341 | match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; |
358 | match |= lpage; | 342 | match |= lpage; |
359 | 343 | ||
@@ -482,3 +466,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
482 | /* FIXME: Optimize this later.. */ | 466 | /* FIXME: Optimize this later.. */ |
483 | flush_tlb_all(); | 467 | flush_tlb_all(); |
484 | } | 468 | } |
469 | |||
470 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | ||
471 | { | ||
472 | } | ||