diff options
author | Phil Edworthy <phil.edworthy@renesas.com> | 2012-01-09 11:08:47 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-01-11 23:11:02 -0500 |
commit | c1537b4863da620f12f5b42ece61bf65314148ed (patch) | |
tree | fb1dddd002d8207cb84c1c8fde1f58335449db18 /arch/sh/mm/cache-sh2a.c | |
parent | e343a895a9f342f239c5e3c5ffc6c0b1707e6244 (diff) |
sh: sh2a: Improve cache flush/invalidate functions
The cache functions lock out interrupts for long periods; this patch
reduces the impact when operating on large address ranges. In such
cases it will:
- Invalidate the entire cache rather than individual addresses.
- Do nothing when flushing the operand cache in write-through mode.
- When flushing the operand cache in write-back mdoe, index the
search for matching addresses on the cache entires instead of the
addresses to flush
Note: sh2a__flush_purge_region was only invalidating the operand
cache, this adds flush.
Signed-off-by: Phil Edworthy <phil.edworthy@renesas.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh2a.c')
-rw-r--r-- | arch/sh/mm/cache-sh2a.c | 123 |
1 files changed, 84 insertions, 39 deletions
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 1f51225426a2..ae08cbbfa569 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -15,35 +15,78 @@ | |||
15 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
16 | #include <asm/io.h> | 16 | #include <asm/io.h> |
17 | 17 | ||
18 | /* | ||
19 | * The maximum number of pages we support up to when doing ranged dcache | ||
20 | * flushing. Anything exceeding this will simply flush the dcache in its | ||
21 | * entirety. | ||
22 | */ | ||
23 | #define MAX_OCACHE_PAGES 32 | ||
24 | #define MAX_ICACHE_PAGES 32 | ||
25 | |||
26 | static void sh2a_flush_oc_line(unsigned long v, int way) | ||
27 | { | ||
28 | unsigned long addr = (v & 0x000007f0) | (way << 11); | ||
29 | unsigned long data; | ||
30 | |||
31 | data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr); | ||
32 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | ||
33 | data &= ~SH_CACHE_UPDATED; | ||
34 | __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v) | ||
39 | { | ||
40 | /* Set associative bit to hit all ways */ | ||
41 | unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC; | ||
42 | __raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Write back the dirty D-caches, but not invalidate them. | ||
47 | */ | ||
18 | static void sh2a__flush_wback_region(void *start, int size) | 48 | static void sh2a__flush_wback_region(void *start, int size) |
19 | { | 49 | { |
50 | #ifdef CONFIG_CACHE_WRITEBACK | ||
20 | unsigned long v; | 51 | unsigned long v; |
21 | unsigned long begin, end; | 52 | unsigned long begin, end; |
22 | unsigned long flags; | 53 | unsigned long flags; |
54 | int nr_ways; | ||
23 | 55 | ||
24 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | 56 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); |
25 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | 57 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) |
26 | & ~(L1_CACHE_BYTES-1); | 58 | & ~(L1_CACHE_BYTES-1); |
59 | nr_ways = current_cpu_data.dcache.ways; | ||
27 | 60 | ||
28 | local_irq_save(flags); | 61 | local_irq_save(flags); |
29 | jump_to_uncached(); | 62 | jump_to_uncached(); |
30 | 63 | ||
31 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 64 | /* If there are too many pages then flush the entire cache */ |
32 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0); | 65 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { |
66 | begin = CACHE_OC_ADDRESS_ARRAY; | ||
67 | end = begin + (nr_ways * current_cpu_data.dcache.way_size); | ||
68 | |||
69 | for (v = begin; v < end; v += L1_CACHE_BYTES) { | ||
70 | unsigned long data = __raw_readl(v); | ||
71 | if (data & SH_CACHE_UPDATED) | ||
72 | __raw_writel(data & ~SH_CACHE_UPDATED, v); | ||
73 | } | ||
74 | } else { | ||
33 | int way; | 75 | int way; |
34 | for (way = 0; way < 4; way++) { | 76 | for (way = 0; way < nr_ways; way++) { |
35 | unsigned long data = __raw_readl(addr | (way << 11)); | 77 | for (v = begin; v < end; v += L1_CACHE_BYTES) |
36 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | 78 | sh2a_flush_oc_line(v, way); |
37 | data &= ~SH_CACHE_UPDATED; | ||
38 | __raw_writel(data, addr | (way << 11)); | ||
39 | } | ||
40 | } | 79 | } |
41 | } | 80 | } |
42 | 81 | ||
43 | back_to_cached(); | 82 | back_to_cached(); |
44 | local_irq_restore(flags); | 83 | local_irq_restore(flags); |
84 | #endif | ||
45 | } | 85 | } |
46 | 86 | ||
87 | /* | ||
88 | * Write back the dirty D-caches and invalidate them. | ||
89 | */ | ||
47 | static void sh2a__flush_purge_region(void *start, int size) | 90 | static void sh2a__flush_purge_region(void *start, int size) |
48 | { | 91 | { |
49 | unsigned long v; | 92 | unsigned long v; |
@@ -58,13 +101,22 @@ static void sh2a__flush_purge_region(void *start, int size) | |||
58 | jump_to_uncached(); | 101 | jump_to_uncached(); |
59 | 102 | ||
60 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 103 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
61 | __raw_writel((v & CACHE_PHYSADDR_MASK), | 104 | #ifdef CONFIG_CACHE_WRITEBACK |
62 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 105 | int way; |
106 | int nr_ways = current_cpu_data.dcache.ways; | ||
107 | for (way = 0; way < nr_ways; way++) | ||
108 | sh2a_flush_oc_line(v, way); | ||
109 | #endif | ||
110 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); | ||
63 | } | 111 | } |
112 | |||
64 | back_to_cached(); | 113 | back_to_cached(); |
65 | local_irq_restore(flags); | 114 | local_irq_restore(flags); |
66 | } | 115 | } |
67 | 116 | ||
117 | /* | ||
118 | * Invalidate the D-caches, but no write back please | ||
119 | */ | ||
68 | static void sh2a__flush_invalidate_region(void *start, int size) | 120 | static void sh2a__flush_invalidate_region(void *start, int size) |
69 | { | 121 | { |
70 | unsigned long v; | 122 | unsigned long v; |
@@ -74,29 +126,25 @@ static void sh2a__flush_invalidate_region(void *start, int size) | |||
74 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | 126 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); |
75 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | 127 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) |
76 | & ~(L1_CACHE_BYTES-1); | 128 | & ~(L1_CACHE_BYTES-1); |
129 | |||
77 | local_irq_save(flags); | 130 | local_irq_save(flags); |
78 | jump_to_uncached(); | 131 | jump_to_uncached(); |
79 | 132 | ||
80 | #ifdef CONFIG_CACHE_WRITEBACK | 133 | /* If there are too many pages then just blow the cache */ |
81 | __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); | 134 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { |
82 | /* I-cache invalidate */ | 135 | __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); |
83 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 136 | } else { |
84 | __raw_writel((v & CACHE_PHYSADDR_MASK), | 137 | for (v = begin; v < end; v += L1_CACHE_BYTES) |
85 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 138 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); |
86 | } | ||
87 | #else | ||
88 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
89 | __raw_writel((v & CACHE_PHYSADDR_MASK), | ||
90 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | ||
91 | __raw_writel((v & CACHE_PHYSADDR_MASK), | ||
92 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | ||
93 | } | 139 | } |
94 | #endif | 140 | |
95 | back_to_cached(); | 141 | back_to_cached(); |
96 | local_irq_restore(flags); | 142 | local_irq_restore(flags); |
97 | } | 143 | } |
98 | 144 | ||
99 | /* WBack O-Cache and flush I-Cache */ | 145 | /* |
146 | * Write back the range of D-cache, and purge the I-cache. | ||
147 | */ | ||
100 | static void sh2a_flush_icache_range(void *args) | 148 | static void sh2a_flush_icache_range(void *args) |
101 | { | 149 | { |
102 | struct flusher_data *data = args; | 150 | struct flusher_data *data = args; |
@@ -107,23 +155,20 @@ static void sh2a_flush_icache_range(void *args) | |||
107 | start = data->addr1 & ~(L1_CACHE_BYTES-1); | 155 | start = data->addr1 & ~(L1_CACHE_BYTES-1); |
108 | end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); | 156 | end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); |
109 | 157 | ||
158 | #ifdef CONFIG_CACHE_WRITEBACK | ||
159 | sh2a__flush_wback_region((void *)start, end-start); | ||
160 | #endif | ||
161 | |||
110 | local_irq_save(flags); | 162 | local_irq_save(flags); |
111 | jump_to_uncached(); | 163 | jump_to_uncached(); |
112 | 164 | ||
113 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | 165 | /* I-Cache invalidate */ |
114 | unsigned long addr = (v & 0x000007f0); | 166 | /* If there are too many pages then just blow the cache */ |
115 | int way; | 167 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
116 | /* O-Cache writeback */ | 168 | __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); |
117 | for (way = 0; way < 4; way++) { | 169 | } else { |
118 | unsigned long data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); | 170 | for (v = start; v < end; v += L1_CACHE_BYTES) |
119 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | 171 | sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); |
120 | data &= ~SH_CACHE_UPDATED; | ||
121 | __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); | ||
122 | } | ||
123 | } | ||
124 | /* I-Cache invalidate */ | ||
125 | __raw_writel(addr, | ||
126 | CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); | ||
127 | } | 172 | } |
128 | 173 | ||
129 | back_to_cached(); | 174 | back_to_cached(); |