aboutsummaryrefslogtreecommitdiffstats
path: root/arch/metag
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2012-10-09 05:54:17 -0400
committerJames Hogan <james.hogan@imgtec.com>2013-03-02 15:09:19 -0500
commit99ef7c2ac1e3b01f532bfdebbe92e9960e95bebc (patch)
treefb34d1df645d7eee43fe2441fd4938b0da7bad6f /arch/metag
parent027f891f7640144d4b7b15113f3ae9af2c8b095d (diff)
metag: Cache/TLB handling
Add cache and TLB handling code for metag, including the required callbacks used by MM switches and DMA operations. Caches can be partitioned between the hardware threads and the global space, however this is usually configured by the bootloader so Linux doesn't make any changes to this configuration. TLBs aren't configurable, so only need consideration to flush them. On Meta1 the L1 cache was VIVT which required a full flush on MM switch. Meta2 has a VIPT L1 cache so it doesn't require the full flush on MM switch. Meta2 can also have a writeback L2 with hardware prefetch which requires some special handling. Support is optional, and the L2 can be detected and initialised by Linux. Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch/metag')
-rw-r--r--arch/metag/include/asm/cache.h23
-rw-r--r--arch/metag/include/asm/cacheflush.h250
-rw-r--r--arch/metag/include/asm/l2cache.h258
-rw-r--r--arch/metag/include/asm/tlb.h36
-rw-r--r--arch/metag/include/asm/tlbflush.h77
-rw-r--r--arch/metag/mm/cache.c441
-rw-r--r--arch/metag/mm/l2cache.c192
7 files changed, 1277 insertions, 0 deletions
diff --git a/arch/metag/include/asm/cache.h b/arch/metag/include/asm/cache.h
new file mode 100644
index 000000000000..a43b650cfdc0
--- /dev/null
+++ b/arch/metag/include/asm/cache.h
@@ -0,0 +1,23 @@
1#ifndef __ASM_METAG_CACHE_H
2#define __ASM_METAG_CACHE_H
3
4/* L1 cache line size (64 bytes) */
5#define L1_CACHE_SHIFT 6
6#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7
8/* Meta requires large data items to be 8 byte aligned. */
9#define ARCH_SLAB_MINALIGN 8
10
11/*
12 * With an L2 cache, we may invalidate dirty lines, so we need to ensure DMA
13 * buffers have cache line alignment.
14 */
15#ifdef CONFIG_METAG_L2C
16#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
17#else
18#define ARCH_DMA_MINALIGN 8
19#endif
20
21#define __read_mostly __attribute__((__section__(".data..read_mostly")))
22
23#endif
diff --git a/arch/metag/include/asm/cacheflush.h b/arch/metag/include/asm/cacheflush.h
new file mode 100644
index 000000000000..7787ec5e3ed0
--- /dev/null
+++ b/arch/metag/include/asm/cacheflush.h
@@ -0,0 +1,250 @@
1#ifndef _METAG_CACHEFLUSH_H
2#define _METAG_CACHEFLUSH_H
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/io.h>
7
8#include <asm/l2cache.h>
9#include <asm/metag_isa.h>
10#include <asm/metag_mem.h>
11
12void metag_cache_probe(void);
13
14void metag_data_cache_flush_all(const void *start);
15void metag_code_cache_flush_all(const void *start);
16
17/*
18 * Routines to flush physical cache lines that may be used to cache data or code
19 * normally accessed via the linear address range supplied. The region flushed
20 * must either lie in local or global address space determined by the top bit of
21 * the pStart address. If Bytes is >= 4K then the whole of the related cache
22 * state will be flushed rather than a limited range.
23 */
24void metag_data_cache_flush(const void *start, int bytes);
25void metag_code_cache_flush(const void *start, int bytes);
26
27#ifdef CONFIG_METAG_META12
28
29/* Write through, virtually tagged, split I/D cache. */
30
31static inline void __flush_cache_all(void)
32{
33 metag_code_cache_flush_all((void *) PAGE_OFFSET);
34 metag_data_cache_flush_all((void *) PAGE_OFFSET);
35}
36
37#define flush_cache_all() __flush_cache_all()
38
39/* flush the entire user address space referenced in this mm structure */
40static inline void flush_cache_mm(struct mm_struct *mm)
41{
42 if (mm == current->mm)
43 __flush_cache_all();
44}
45
46#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
47
48/* flush a range of addresses from this mm */
49static inline void flush_cache_range(struct vm_area_struct *vma,
50 unsigned long start, unsigned long end)
51{
52 flush_cache_mm(vma->vm_mm);
53}
54
55static inline void flush_cache_page(struct vm_area_struct *vma,
56 unsigned long vmaddr, unsigned long pfn)
57{
58 flush_cache_mm(vma->vm_mm);
59}
60
61#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
62static inline void flush_dcache_page(struct page *page)
63{
64 metag_data_cache_flush_all((void *) PAGE_OFFSET);
65}
66
67#define flush_dcache_mmap_lock(mapping) do { } while (0)
68#define flush_dcache_mmap_unlock(mapping) do { } while (0)
69
70static inline void flush_icache_page(struct vm_area_struct *vma,
71 struct page *page)
72{
73 metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
74}
75
76static inline void flush_cache_vmap(unsigned long start, unsigned long end)
77{
78 metag_data_cache_flush_all((void *) PAGE_OFFSET);
79}
80
81static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
82{
83 metag_data_cache_flush_all((void *) PAGE_OFFSET);
84}
85
86#else
87
88/* Write through, physically tagged, split I/D cache. */
89
90#define flush_cache_all() do { } while (0)
91#define flush_cache_mm(mm) do { } while (0)
92#define flush_cache_dup_mm(mm) do { } while (0)
93#define flush_cache_range(vma, start, end) do { } while (0)
94#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
95#define flush_dcache_mmap_lock(mapping) do { } while (0)
96#define flush_dcache_mmap_unlock(mapping) do { } while (0)
97#define flush_icache_page(vma, pg) do { } while (0)
98#define flush_cache_vmap(start, end) do { } while (0)
99#define flush_cache_vunmap(start, end) do { } while (0)
100
101#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
102static inline void flush_dcache_page(struct page *page)
103{
104 /* FIXME: We can do better than this. All we are trying to do is
105 * make the i-cache coherent, we should use the PG_arch_1 bit like
106 * e.g. powerpc.
107 */
108#ifdef CONFIG_SMP
109 metag_out32(1, SYSC_ICACHE_FLUSH);
110#else
111 metag_code_cache_flush_all((void *) PAGE_OFFSET);
112#endif
113}
114
115#endif
116
117/* Push n pages at kernel virtual address and clear the icache */
118static inline void flush_icache_range(unsigned long address,
119 unsigned long endaddr)
120{
121#ifdef CONFIG_SMP
122 metag_out32(1, SYSC_ICACHE_FLUSH);
123#else
124 metag_code_cache_flush((void *) address, endaddr - address);
125#endif
126}
127
128static inline void flush_cache_sigtramp(unsigned long addr, int size)
129{
130 /*
131 * Flush the icache in case there was previously some code
132 * fetched from this address, perhaps a previous sigtramp.
133 *
134 * We don't need to flush the dcache, it's write through and
135 * we just wrote the sigtramp code through it.
136 */
137#ifdef CONFIG_SMP
138 metag_out32(1, SYSC_ICACHE_FLUSH);
139#else
140 metag_code_cache_flush((void *) addr, size);
141#endif
142}
143
144#ifdef CONFIG_METAG_L2C
145
146/*
147 * Perform a single specific CACHEWD operation on an address, masking lower bits
148 * of address first.
149 */
150static inline void cachewd_line(void *addr, unsigned int data)
151{
152 unsigned long masked = (unsigned long)addr & -0x40;
153 __builtin_meta2_cachewd((void *)masked, data);
154}
155
156/* Perform a certain CACHEW op on each cache line in a range */
157static inline void cachew_region_op(void *start, unsigned long size,
158 unsigned int op)
159{
160 unsigned long offset = (unsigned long)start & 0x3f;
161 int i;
162 if (offset) {
163 size += offset;
164 start -= offset;
165 }
166 i = (size - 1) >> 6;
167 do {
168 __builtin_meta2_cachewd(start, op);
169 start += 0x40;
170 } while (i--);
171}
172
173/* prevent write fence and flushbacks being reordered in L2 */
174static inline void l2c_fence_flush(void *addr)
175{
176 /*
177 * Synchronise by reading back and re-flushing.
178 * It is assumed this access will miss, as the caller should have just
179 * flushed the cache line.
180 */
181 (void)(volatile u8 *)addr;
182 cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
183}
184
185/* prevent write fence and writebacks being reordered in L2 */
186static inline void l2c_fence(void *addr)
187{
188 /*
189 * A write back has occurred, but not necessarily an invalidate, so the
190 * readback in l2c_fence_flush() would hit in the cache and have no
191 * effect. Therefore fully flush the line first.
192 */
193 cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
194 l2c_fence_flush(addr);
195}
196
197/* Used to keep memory consistent when doing DMA. */
198static inline void flush_dcache_region(void *start, unsigned long size)
199{
200 /* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
201 if (meta_l2c_is_enabled()) {
202 cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
203 if (meta_l2c_is_writeback())
204 l2c_fence_flush(start + size - 1);
205 } else {
206 metag_data_cache_flush(start, size);
207 }
208}
209
210/* Write back dirty lines to memory (or do nothing if no writeback caches) */
211static inline void writeback_dcache_region(void *start, unsigned long size)
212{
213 if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
214 cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
215 l2c_fence(start + size - 1);
216 }
217}
218
219/* Invalidate (may also write back if necessary) */
220static inline void invalidate_dcache_region(void *start, unsigned long size)
221{
222 if (meta_l2c_is_enabled())
223 cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
224 else
225 metag_data_cache_flush(start, size);
226}
227#else
228#define flush_dcache_region(s, l) metag_data_cache_flush((s), (l))
229#define writeback_dcache_region(s, l) do {} while (0)
230#define invalidate_dcache_region(s, l) flush_dcache_region((s), (l))
231#endif
232
233static inline void copy_to_user_page(struct vm_area_struct *vma,
234 struct page *page, unsigned long vaddr,
235 void *dst, const void *src,
236 unsigned long len)
237{
238 memcpy(dst, src, len);
239 flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
240}
241
242static inline void copy_from_user_page(struct vm_area_struct *vma,
243 struct page *page, unsigned long vaddr,
244 void *dst, const void *src,
245 unsigned long len)
246{
247 memcpy(dst, src, len);
248}
249
250#endif /* _METAG_CACHEFLUSH_H */
diff --git a/arch/metag/include/asm/l2cache.h b/arch/metag/include/asm/l2cache.h
new file mode 100644
index 000000000000..bffbeaa4d93b
--- /dev/null
+++ b/arch/metag/include/asm/l2cache.h
@@ -0,0 +1,258 @@
1#ifndef _METAG_L2CACHE_H
2#define _METAG_L2CACHE_H
3
4#ifdef CONFIG_METAG_L2C
5
6#include <asm/global_lock.h>
7#include <asm/io.h>
8
9/*
10 * Store the last known value of pfenable (we don't want prefetch enabled while
11 * L2 is off).
12 */
13extern int l2c_pfenable;
14
15/* defined in arch/metag/drivers/core-sysfs.c */
16extern struct sysdev_class cache_sysclass;
17
18static inline void wr_fence(void);
19
20/*
21 * Functions for reading of L2 cache configuration.
22 */
23
24/* Get raw L2 config register (CORE_CONFIG3) */
25static inline unsigned int meta_l2c_config(void)
26{
27 const unsigned int *corecfg3 = (const unsigned int *)METAC_CORE_CONFIG3;
28 return *corecfg3;
29}
30
31/* Get whether the L2 is present */
32static inline int meta_l2c_is_present(void)
33{
34 return meta_l2c_config() & METAC_CORECFG3_L2C_HAVE_L2C_BIT;
35}
36
37/* Get whether the L2 is configured for write-back instead of write-through */
38static inline int meta_l2c_is_writeback(void)
39{
40 return meta_l2c_config() & METAC_CORECFG3_L2C_MODE_BIT;
41}
42
43/* Get whether the L2 is unified instead of separated code/data */
44static inline int meta_l2c_is_unified(void)
45{
46 return meta_l2c_config() & METAC_CORECFG3_L2C_UNIFIED_BIT;
47}
48
49/* Get the L2 cache size in bytes */
50static inline unsigned int meta_l2c_size(void)
51{
52 unsigned int size_s;
53 if (!meta_l2c_is_present())
54 return 0;
55 size_s = (meta_l2c_config() & METAC_CORECFG3_L2C_SIZE_BITS)
56 >> METAC_CORECFG3_L2C_SIZE_S;
57 /* L2CSIZE is in KiB */
58 return 1024 << size_s;
59}
60
61/* Get the number of ways in the L2 cache */
62static inline unsigned int meta_l2c_ways(void)
63{
64 unsigned int ways_s;
65 if (!meta_l2c_is_present())
66 return 0;
67 ways_s = (meta_l2c_config() & METAC_CORECFG3_L2C_NUM_WAYS_BITS)
68 >> METAC_CORECFG3_L2C_NUM_WAYS_S;
69 return 0x1 << ways_s;
70}
71
72/* Get the line size of the L2 cache */
73static inline unsigned int meta_l2c_linesize(void)
74{
75 unsigned int line_size;
76 if (!meta_l2c_is_present())
77 return 0;
78 line_size = (meta_l2c_config() & METAC_CORECFG3_L2C_LINE_SIZE_BITS)
79 >> METAC_CORECFG3_L2C_LINE_SIZE_S;
80 switch (line_size) {
81 case METAC_CORECFG3_L2C_LINE_SIZE_64B:
82 return 64;
83 default:
84 return 0;
85 }
86}
87
88/* Get the revision ID of the L2 cache */
89static inline unsigned int meta_l2c_revision(void)
90{
91 return (meta_l2c_config() & METAC_CORECFG3_L2C_REV_ID_BITS)
92 >> METAC_CORECFG3_L2C_REV_ID_S;
93}
94
95
96/*
97 * Start an initialisation of the L2 cachelines and wait for completion.
98 * This should only be done in a LOCK1 or LOCK2 critical section while the L2
99 * is disabled.
100 */
101static inline void _meta_l2c_init(void)
102{
103 metag_out32(SYSC_L2C_INIT_INIT, SYSC_L2C_INIT);
104 while (metag_in32(SYSC_L2C_INIT) == SYSC_L2C_INIT_IN_PROGRESS)
105 /* do nothing */;
106}
107
108/*
109 * Start a writeback of dirty L2 cachelines and wait for completion.
110 * This should only be done in a LOCK1 or LOCK2 critical section.
111 */
112static inline void _meta_l2c_purge(void)
113{
114 metag_out32(SYSC_L2C_PURGE_PURGE, SYSC_L2C_PURGE);
115 while (metag_in32(SYSC_L2C_PURGE) == SYSC_L2C_PURGE_IN_PROGRESS)
116 /* do nothing */;
117}
118
119/* Set whether the L2 cache is enabled. */
120static inline void _meta_l2c_enable(int enabled)
121{
122 unsigned int enable;
123
124 enable = metag_in32(SYSC_L2C_ENABLE);
125 if (enabled)
126 enable |= SYSC_L2C_ENABLE_ENABLE_BIT;
127 else
128 enable &= ~SYSC_L2C_ENABLE_ENABLE_BIT;
129 metag_out32(enable, SYSC_L2C_ENABLE);
130}
131
132/* Set whether the L2 cache prefetch is enabled. */
133static inline void _meta_l2c_pf_enable(int pfenabled)
134{
135 unsigned int enable;
136
137 enable = metag_in32(SYSC_L2C_ENABLE);
138 if (pfenabled)
139 enable |= SYSC_L2C_ENABLE_PFENABLE_BIT;
140 else
141 enable &= ~SYSC_L2C_ENABLE_PFENABLE_BIT;
142 metag_out32(enable, SYSC_L2C_ENABLE);
143}
144
145/* Return whether the L2 cache is enabled */
146static inline int _meta_l2c_is_enabled(void)
147{
148 return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_ENABLE_BIT;
149}
150
151/* Return whether the L2 cache prefetch is enabled */
152static inline int _meta_l2c_pf_is_enabled(void)
153{
154 return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_PFENABLE_BIT;
155}
156
157
158/* Return whether the L2 cache is enabled */
159static inline int meta_l2c_is_enabled(void)
160{
161 int en;
162
163 /*
164 * There is no need to lock at the moment, as the enable bit is never
165 * intermediately changed, so we will never see an intermediate result.
166 */
167 en = _meta_l2c_is_enabled();
168
169 return en;
170}
171
172/*
173 * Ensure the L2 cache is disabled.
174 * Return whether the L2 was previously disabled.
175 */
176int meta_l2c_disable(void);
177
178/*
179 * Ensure the L2 cache is enabled.
180 * Return whether the L2 was previously enabled.
181 */
182int meta_l2c_enable(void);
183
184/* Return whether the L2 cache prefetch is enabled */
185static inline int meta_l2c_pf_is_enabled(void)
186{
187 return l2c_pfenable;
188}
189
190/*
191 * Set whether the L2 cache prefetch is enabled.
192 * Return whether the L2 prefetch was previously enabled.
193 */
194int meta_l2c_pf_enable(int pfenable);
195
196/*
197 * Flush the L2 cache.
198 * Return 1 if the L2 is disabled.
199 */
200int meta_l2c_flush(void);
201
202/*
203 * Write back all dirty cache lines in the L2 cache.
204 * Return 1 if the L2 is disabled or there isn't any writeback.
205 */
206static inline int meta_l2c_writeback(void)
207{
208 unsigned long flags;
209 int en;
210
211 /* no need to purge if it's not a writeback cache */
212 if (!meta_l2c_is_writeback())
213 return 1;
214
215 /*
216 * Purge only works if the L2 is enabled, and involves reading back to
217 * detect completion, so keep this operation atomic with other threads.
218 */
219 __global_lock1(flags);
220 en = meta_l2c_is_enabled();
221 if (likely(en)) {
222 wr_fence();
223 _meta_l2c_purge();
224 }
225 __global_unlock1(flags);
226
227 return !en;
228}
229
230#else /* CONFIG_METAG_L2C */
231
232#define meta_l2c_config() 0
233#define meta_l2c_is_present() 0
234#define meta_l2c_is_writeback() 0
235#define meta_l2c_is_unified() 0
236#define meta_l2c_size() 0
237#define meta_l2c_ways() 0
238#define meta_l2c_linesize() 0
239#define meta_l2c_revision() 0
240
241#define meta_l2c_is_enabled() 0
242#define _meta_l2c_pf_is_enabled() 0
243#define meta_l2c_pf_is_enabled() 0
244#define meta_l2c_disable() 1
245#define meta_l2c_enable() 0
246#define meta_l2c_pf_enable(X) 0
247static inline int meta_l2c_flush(void)
248{
249 return 1;
250}
251static inline int meta_l2c_writeback(void)
252{
253 return 1;
254}
255
256#endif /* CONFIG_METAG_L2C */
257
258#endif /* _METAG_L2CACHE_H */
diff --git a/arch/metag/include/asm/tlb.h b/arch/metag/include/asm/tlb.h
new file mode 100644
index 000000000000..048282f1cc1f
--- /dev/null
+++ b/arch/metag/include/asm/tlb.h
@@ -0,0 +1,36 @@
1#ifndef __ASM_METAG_TLB_H
2#define __ASM_METAG_TLB_H
3
4#include <asm/cacheflush.h>
5#include <asm/page.h>
6
7/* Note, read http://lkml.org/lkml/2004/1/15/6 */
8
9#ifdef CONFIG_METAG_META12
10
11#define tlb_start_vma(tlb, vma) \
12 do { \
13 if (!tlb->fullmm) \
14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
15 } while (0)
16
17#define tlb_end_vma(tlb, vma) \
18 do { \
19 if (!tlb->fullmm) \
20 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
21 } while (0)
22
23
24#else
25
26#define tlb_start_vma(tlb, vma) do { } while (0)
27#define tlb_end_vma(tlb, vma) do { } while (0)
28
29#endif
30
31#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
32#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
33
34#include <asm-generic/tlb.h>
35
36#endif
diff --git a/arch/metag/include/asm/tlbflush.h b/arch/metag/include/asm/tlbflush.h
new file mode 100644
index 000000000000..566acf918a64
--- /dev/null
+++ b/arch/metag/include/asm/tlbflush.h
@@ -0,0 +1,77 @@
1#ifndef __ASM_METAG_TLBFLUSH_H
2#define __ASM_METAG_TLBFLUSH_H
3
4#include <linux/io.h>
5#include <linux/sched.h>
6#include <asm/metag_mem.h>
7#include <asm/pgalloc.h>
8
9/*
10 * TLB flushing:
11 *
12 * - flush_tlb() flushes the current mm struct TLBs
13 * - flush_tlb_all() flushes all processes TLBs
14 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
15 * - flush_tlb_page(vma, vmaddr) flushes one page
16 * - flush_tlb_range(mm, start, end) flushes a range of pages
17 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
18 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
19 *
20 * FIXME: Meta 2 can flush single TLB entries.
21 *
22 */
23
24#if defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP)
25static inline void __flush_tlb(void)
26{
27 /* flush TLB entries for just the current hardware thread */
28 int thread = hard_processor_id();
29 metag_out32(0, (LINSYSCFLUSH_TxMMCU_BASE +
30 LINSYSCFLUSH_TxMMCU_STRIDE * thread));
31}
32#else
33static inline void __flush_tlb(void)
34{
35 /* flush TLB entries for all hardware threads */
36 metag_out32(0, LINSYSCFLUSH_MMCU);
37}
38#endif /* defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP) */
39
40#define flush_tlb() __flush_tlb()
41
42#define flush_tlb_all() __flush_tlb()
43
44#define local_flush_tlb_all() __flush_tlb()
45
46static inline void flush_tlb_mm(struct mm_struct *mm)
47{
48 if (mm == current->active_mm)
49 __flush_tlb();
50}
51
52static inline void flush_tlb_page(struct vm_area_struct *vma,
53 unsigned long addr)
54{
55 flush_tlb_mm(vma->vm_mm);
56}
57
58static inline void flush_tlb_range(struct vm_area_struct *vma,
59 unsigned long start, unsigned long end)
60{
61 flush_tlb_mm(vma->vm_mm);
62}
63
64static inline void flush_tlb_pgtables(struct mm_struct *mm,
65 unsigned long start, unsigned long end)
66{
67 flush_tlb_mm(mm);
68}
69
70static inline void flush_tlb_kernel_range(unsigned long start,
71 unsigned long end)
72{
73 flush_tlb_all();
74}
75
76#endif /* __ASM_METAG_TLBFLUSH_H */
77
diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c
new file mode 100644
index 000000000000..b713ec01c204
--- /dev/null
+++ b/arch/metag/mm/cache.c
@@ -0,0 +1,441 @@
1/*
2 * arch/metag/mm/cache.c
3 *
4 * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
9 *
10 * Cache control code
11 */
12
13#include <linux/export.h>
14#include <linux/io.h>
15#include <asm/cacheflush.h>
16#include <asm/core_reg.h>
17#include <asm/metag_isa.h>
18#include <asm/metag_mem.h>
19#include <asm/metag_regs.h>
20
21#define DEFAULT_CACHE_WAYS_LOG2 2
22
23/*
24 * Size of a set in the caches. Initialised for default 16K stride, adjusted
25 * according to values passed through TBI global heap segment via LDLK (on ATP)
26 * or config registers (on HTP/MTP)
27 */
28static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
29 - DEFAULT_CACHE_WAYS_LOG2;
30static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
31 - DEFAULT_CACHE_WAYS_LOG2;
32/*
33 * The number of sets in the caches. Initialised for HTP/ATP, adjusted
34 * according to NOMMU setting in config registers
35 */
36static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
37static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
38
39/**
40 * metag_cache_probe() - Probe L1 cache configuration.
41 *
42 * Probe the L1 cache configuration to aid the L1 physical cache flushing
43 * functions.
44 */
45void metag_cache_probe(void)
46{
47#ifndef CONFIG_METAG_META12
48 int coreid = metag_in32(METAC_CORE_ID);
49 int config = metag_in32(METAC_CORE_CONFIG2);
50 int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS;
51
52 if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 ||
53 cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) {
54 icache_sets_log2 = 1;
55 dcache_sets_log2 = 1;
56 }
57
58 /* For normal size caches, the smallest size is 4Kb.
59 For small caches, the smallest size is 64b */
60 icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT)
61 ? 6 : 12;
62 icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS)
63 >> METAC_CORE_C2ICSZ_S;
64 icache_set_shift -= icache_sets_log2;
65
66 dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT)
67 ? 6 : 12;
68 dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS)
69 >> METAC_CORECFG2_DCSZ_S;
70 dcache_set_shift -= dcache_sets_log2;
71#else
72 /* Extract cache sizes from global heap segment */
73 unsigned long val, u;
74 int width, shift, addend;
75 PTBISEG seg;
76
77 seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
78 TBID_SEGSCOPE_GLOBAL,
79 TBID_SEGTYPE_HEAP));
80 if (seg != NULL) {
81 val = seg->Data[1];
82
83 /* Work out width of I-cache size bit-field */
84 u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS)
85 >> METAG_TBI_ICACHE_SIZE_S;
86 width = 0;
87 while (u & 1) {
88 width++;
89 u >>= 1;
90 }
91 /* Extract sign-extended size addend value */
92 shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width);
93 addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS)
94 << shift)
95 >> (shift + METAG_TBI_ICACHE_SIZE_S);
96 /* Now calculate I-cache set size */
97 icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
98 - DEFAULT_CACHE_WAYS_LOG2)
99 + addend;
100
101 /* Similarly for D-cache */
102 u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS)
103 >> METAG_TBI_DCACHE_SIZE_S;
104 width = 0;
105 while (u & 1) {
106 width++;
107 u >>= 1;
108 }
109 shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width);
110 addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS)
111 << shift)
112 >> (shift + METAG_TBI_DCACHE_SIZE_S);
113 dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
114 - DEFAULT_CACHE_WAYS_LOG2)
115 + addend;
116 }
117#endif
118}
119
120static void metag_phys_data_cache_flush(const void *start)
121{
122 unsigned long flush0, flush1, flush2, flush3;
123 int loops, step;
124 int thread;
125 int part, offset;
126 int set_shift;
127
128 /* Use a sequence of writes to flush the cache region requested */
129 thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
130 >> TXENABLE_THREAD_S;
131
132 /* Cache is broken into sets which lie in contiguous RAMs */
133 set_shift = dcache_set_shift;
134
135 /* Move to the base of the physical cache flush region */
136 flush0 = LINSYSCFLUSH_DCACHE_LINE;
137 step = 64;
138
139 /* Get partition data for this thread */
140 part = metag_in32(SYSC_DCPART0 +
141 (SYSC_xCPARTn_STRIDE * thread));
142
143 if ((int)start < 0)
144 /* Access Global vs Local partition */
145 part >>= SYSC_xCPARTG_AND_S
146 - SYSC_xCPARTL_AND_S;
147
148 /* Extract offset and move SetOff */
149 offset = (part & SYSC_xCPARTL_OR_BITS)
150 >> SYSC_xCPARTL_OR_S;
151 flush0 += (offset << (set_shift - 4));
152
153 /* Shrink size */
154 part = (part & SYSC_xCPARTL_AND_BITS)
155 >> SYSC_xCPARTL_AND_S;
156 loops = ((part + 1) << (set_shift - 4));
157
158 /* Reduce loops by step of cache line size */
159 loops /= step;
160
161 flush1 = flush0 + (1 << set_shift);
162 flush2 = flush0 + (2 << set_shift);
163 flush3 = flush0 + (3 << set_shift);
164
165 if (dcache_sets_log2 == 1) {
166 flush2 = flush1;
167 flush3 = flush1 + step;
168 flush1 = flush0 + step;
169 step <<= 1;
170 loops >>= 1;
171 }
172
173 /* Clear loops ways in cache */
174 while (loops-- != 0) {
175 /* Clear the ways. */
176#if 0
177 /*
178 * GCC doesn't generate very good code for this so we
179 * provide inline assembly instead.
180 */
181 metag_out8(0, flush0);
182 metag_out8(0, flush1);
183 metag_out8(0, flush2);
184 metag_out8(0, flush3);
185
186 flush0 += step;
187 flush1 += step;
188 flush2 += step;
189 flush3 += step;
190#else
191 asm volatile (
192 "SETB\t[%0+%4++],%5\n"
193 "SETB\t[%1+%4++],%5\n"
194 "SETB\t[%2+%4++],%5\n"
195 "SETB\t[%3+%4++],%5\n"
196 : "+e" (flush0),
197 "+e" (flush1),
198 "+e" (flush2),
199 "+e" (flush3)
200 : "e" (step), "a" (0));
201#endif
202 }
203}
204
205void metag_data_cache_flush_all(const void *start)
206{
207 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
208 /* No need to flush the data cache it's not actually enabled */
209 return;
210
211 metag_phys_data_cache_flush(start);
212}
213
214void metag_data_cache_flush(const void *start, int bytes)
215{
216 unsigned long flush0;
217 int loops, step;
218
219 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
220 /* No need to flush the data cache it's not actually enabled */
221 return;
222
223 if (bytes >= 4096) {
224 metag_phys_data_cache_flush(start);
225 return;
226 }
227
228 /* Use linear cache flush mechanism on META IP */
229 flush0 = (int)start;
230 loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes +
231 (DCACHE_LINE_BYTES - 1);
232 loops >>= DCACHE_LINE_S;
233
234#define PRIM_FLUSH(addr, offset) do { \
235 int __addr = ((int) (addr)) + ((offset) * 64); \
236 __builtin_dcache_flush((void *)(__addr)); \
237 } while (0)
238
239#define LOOP_INC (4*64)
240
241 do {
242 /* By default stop */
243 step = 0;
244
245 switch (loops) {
246 /* Drop Thru Cases! */
247 default:
248 PRIM_FLUSH(flush0, 3);
249 loops -= 4;
250 step = 1;
251 case 3:
252 PRIM_FLUSH(flush0, 2);
253 case 2:
254 PRIM_FLUSH(flush0, 1);
255 case 1:
256 PRIM_FLUSH(flush0, 0);
257 flush0 += LOOP_INC;
258 case 0:
259 break;
260 }
261 } while (step);
262}
263EXPORT_SYMBOL(metag_data_cache_flush);
264
265static void metag_phys_code_cache_flush(const void *start, int bytes)
266{
267 unsigned long flush0, flush1, flush2, flush3, end_set;
268 int loops, step;
269 int thread;
270 int set_shift, set_size;
271 int part, offset;
272
273 /* Use a sequence of writes to flush the cache region requested */
274 thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
275 >> TXENABLE_THREAD_S;
276 set_shift = icache_set_shift;
277
278 /* Move to the base of the physical cache flush region */
279 flush0 = LINSYSCFLUSH_ICACHE_LINE;
280 step = 64;
281
282 /* Get partition code for this thread */
283 part = metag_in32(SYSC_ICPART0 +
284 (SYSC_xCPARTn_STRIDE * thread));
285
286 if ((int)start < 0)
287 /* Access Global vs Local partition */
288 part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S;
289
290 /* Extract offset and move SetOff */
291 offset = (part & SYSC_xCPARTL_OR_BITS)
292 >> SYSC_xCPARTL_OR_S;
293 flush0 += (offset << (set_shift - 4));
294
295 /* Shrink size */
296 part = (part & SYSC_xCPARTL_AND_BITS)
297 >> SYSC_xCPARTL_AND_S;
298 loops = ((part + 1) << (set_shift - 4));
299
300 /* Where does the Set end? */
301 end_set = flush0 + loops;
302 set_size = loops;
303
304#ifdef CONFIG_METAG_META12
305 if ((bytes < 4096) && (bytes < loops)) {
306 /* Unreachable on HTP/MTP */
307 /* Only target the sets that could be relavent */
308 flush0 += (loops - step) & ((int) start);
309 loops = (((int) start) & (step-1)) + bytes + step - 1;
310 }
311#endif
312
313 /* Reduce loops by step of cache line size */
314 loops /= step;
315
316 flush1 = flush0 + (1<<set_shift);
317 flush2 = flush0 + (2<<set_shift);
318 flush3 = flush0 + (3<<set_shift);
319
320 if (icache_sets_log2 == 1) {
321 flush2 = flush1;
322 flush3 = flush1 + step;
323 flush1 = flush0 + step;
324#if 0
325 /* flush0 will stop one line early in this case
326 * (flush1 will do the final line).
327 * However we don't correct end_set here at the moment
328 * because it will never wrap on HTP/MTP
329 */
330 end_set -= step;
331#endif
332 step <<= 1;
333 loops >>= 1;
334 }
335
336 /* Clear loops ways in cache */
337 while (loops-- != 0) {
338#if 0
339 /*
340 * GCC doesn't generate very good code for this so we
341 * provide inline assembly instead.
342 */
343 /* Clear the ways */
344 metag_out8(0, flush0);
345 metag_out8(0, flush1);
346 metag_out8(0, flush2);
347 metag_out8(0, flush3);
348
349 flush0 += step;
350 flush1 += step;
351 flush2 += step;
352 flush3 += step;
353#else
354 asm volatile (
355 "SETB\t[%0+%4++],%5\n"
356 "SETB\t[%1+%4++],%5\n"
357 "SETB\t[%2+%4++],%5\n"
358 "SETB\t[%3+%4++],%5\n"
359 : "+e" (flush0),
360 "+e" (flush1),
361 "+e" (flush2),
362 "+e" (flush3)
363 : "e" (step), "a" (0));
364#endif
365
366 if (flush0 == end_set) {
367 /* Wrap within Set 0 */
368 flush0 -= set_size;
369 flush1 -= set_size;
370 flush2 -= set_size;
371 flush3 -= set_size;
372 }
373 }
374}
375
376void metag_code_cache_flush_all(const void *start)
377{
378 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
379 /* No need to flush the code cache it's not actually enabled */
380 return;
381
382 metag_phys_code_cache_flush(start, 4096);
383}
384
385void metag_code_cache_flush(const void *start, int bytes)
386{
387#ifndef CONFIG_METAG_META12
388 void *flush;
389 int loops, step;
390#endif /* !CONFIG_METAG_META12 */
391
392 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
393 /* No need to flush the code cache it's not actually enabled */
394 return;
395
396#ifdef CONFIG_METAG_META12
397 /* CACHEWD isn't available on Meta1, so always do full cache flush */
398 metag_phys_code_cache_flush(start, bytes);
399
400#else /* CONFIG_METAG_META12 */
401 /* If large size do full physical cache flush */
402 if (bytes >= 4096) {
403 metag_phys_code_cache_flush(start, bytes);
404 return;
405 }
406
407 /* Use linear cache flush mechanism on META IP */
408 flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1));
409 loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes +
410 (ICACHE_LINE_BYTES-1);
411 loops >>= ICACHE_LINE_S;
412
413#define PRIM_IFLUSH(addr, offset) \
414 __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT)
415
416#define LOOP_INC (4*64)
417
418 do {
419 /* By default stop */
420 step = 0;
421
422 switch (loops) {
423 /* Drop Thru Cases! */
424 default:
425 PRIM_IFLUSH(flush, 3);
426 loops -= 4;
427 step = 1;
428 case 3:
429 PRIM_IFLUSH(flush, 2);
430 case 2:
431 PRIM_IFLUSH(flush, 1);
432 case 1:
433 PRIM_IFLUSH(flush, 0);
434 flush += LOOP_INC;
435 case 0:
436 break;
437 }
438 } while (step);
439#endif /* !CONFIG_METAG_META12 */
440}
441EXPORT_SYMBOL(metag_code_cache_flush);
diff --git a/arch/metag/mm/l2cache.c b/arch/metag/mm/l2cache.c
new file mode 100644
index 000000000000..c64ee615cf90
--- /dev/null
+++ b/arch/metag/mm/l2cache.c
@@ -0,0 +1,192 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/delay.h>
4
5#include <asm/l2cache.h>
6#include <asm/metag_isa.h>
7
8/* If non-0, then initialise the L2 cache */
9static int l2cache_init = 1;
10/* If non-0, then initialise the L2 cache prefetch */
11static int l2cache_init_pf = 1;
12
13int l2c_pfenable;
14
15static volatile u32 l2c_testdata[16] __initdata __aligned(64);
16
17static int __init parse_l2cache(char *p)
18{
19 char *cp = p;
20
21 if (get_option(&cp, &l2cache_init) != 1) {
22 pr_err("Bad l2cache parameter (%s)\n", p);
23 return 1;
24 }
25 return 0;
26}
27early_param("l2cache", parse_l2cache);
28
29static int __init parse_l2cache_pf(char *p)
30{
31 char *cp = p;
32
33 if (get_option(&cp, &l2cache_init_pf) != 1) {
34 pr_err("Bad l2cache_pf parameter (%s)\n", p);
35 return 1;
36 }
37 return 0;
38}
39early_param("l2cache_pf", parse_l2cache_pf);
40
41static int __init meta_l2c_setup(void)
42{
43 /*
44 * If the L2 cache isn't even present, don't do anything, but say so in
45 * the log.
46 */
47 if (!meta_l2c_is_present()) {
48 pr_info("L2 Cache: Not present\n");
49 return 0;
50 }
51
52 /*
53 * Check whether the line size is recognised.
54 */
55 if (!meta_l2c_linesize()) {
56 pr_warn_once("L2 Cache: unknown line size id (config=0x%08x)\n",
57 meta_l2c_config());
58 }
59
60 /*
61 * Initialise state.
62 */
63 l2c_pfenable = _meta_l2c_pf_is_enabled();
64
65 /*
66 * Enable the L2 cache and print to log whether it was already enabled
67 * by the bootloader.
68 */
69 if (l2cache_init) {
70 pr_info("L2 Cache: Enabling... ");
71 if (meta_l2c_enable())
72 pr_cont("already enabled\n");
73 else
74 pr_cont("done\n");
75 } else {
76 pr_info("L2 Cache: Not enabling\n");
77 }
78
79 /*
80 * Enable L2 cache prefetch.
81 */
82 if (l2cache_init_pf) {
83 pr_info("L2 Cache: Enabling prefetch... ");
84 if (meta_l2c_pf_enable(1))
85 pr_cont("already enabled\n");
86 else
87 pr_cont("done\n");
88 } else {
89 pr_info("L2 Cache: Not enabling prefetch\n");
90 }
91
92 return 0;
93}
94core_initcall(meta_l2c_setup);
95
96int meta_l2c_disable(void)
97{
98 unsigned long flags;
99 int en;
100
101 if (!meta_l2c_is_present())
102 return 1;
103
104 /*
105 * Prevent other threads writing during the writeback, otherwise the
106 * writes will get "lost" when the L2 is disabled.
107 */
108 __global_lock2(flags);
109 en = meta_l2c_is_enabled();
110 if (likely(en)) {
111 _meta_l2c_pf_enable(0);
112 wr_fence();
113 _meta_l2c_purge();
114 _meta_l2c_enable(0);
115 }
116 __global_unlock2(flags);
117
118 return !en;
119}
120
121int meta_l2c_enable(void)
122{
123 unsigned long flags;
124 int en;
125
126 if (!meta_l2c_is_present())
127 return 0;
128
129 /*
130 * Init (clearing the L2) can happen while the L2 is disabled, so other
131 * threads are safe to continue executing, however we must not init the
132 * cache if it's already enabled (dirty lines would be discarded), so
133 * this operation should still be atomic with other threads.
134 */
135 __global_lock1(flags);
136 en = meta_l2c_is_enabled();
137 if (likely(!en)) {
138 _meta_l2c_init();
139 _meta_l2c_enable(1);
140 _meta_l2c_pf_enable(l2c_pfenable);
141 }
142 __global_unlock1(flags);
143
144 return en;
145}
146
147int meta_l2c_pf_enable(int pfenable)
148{
149 unsigned long flags;
150 int en = l2c_pfenable;
151
152 if (!meta_l2c_is_present())
153 return 0;
154
155 /*
156 * We read modify write the enable register, so this operation must be
157 * atomic with other threads.
158 */
159 __global_lock1(flags);
160 en = l2c_pfenable;
161 l2c_pfenable = pfenable;
162 if (meta_l2c_is_enabled())
163 _meta_l2c_pf_enable(pfenable);
164 __global_unlock1(flags);
165
166 return en;
167}
168
169int meta_l2c_flush(void)
170{
171 unsigned long flags;
172 int en;
173
174 /*
175 * Prevent other threads writing during the writeback. This also
176 * involves read modify writes.
177 */
178 __global_lock2(flags);
179 en = meta_l2c_is_enabled();
180 if (likely(en)) {
181 _meta_l2c_pf_enable(0);
182 wr_fence();
183 _meta_l2c_purge();
184 _meta_l2c_enable(0);
185 _meta_l2c_init();
186 _meta_l2c_enable(1);
187 _meta_l2c_pf_enable(l2c_pfenable);
188 }
189 __global_unlock2(flags);
190
191 return !en;
192}