aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreentime Hu <greentime@andestech.com>2017-10-24 03:40:25 -0400
committerGreentime Hu <greentime@andestech.com>2018-02-21 21:44:32 -0500
commit7de9cf474083bfbba469f72dc208f7b51747632d (patch)
tree45e8f3f41c736e8a6abc0995aaf5ab8d86bea917
parent664eec400bf8f3ab4d41279d6fb674a66ff3ba94 (diff)
nds32: Cache and TLB routines
This patch contains cache and TLB maintenance functions. Signed-off-by: Vincent Chen <vincentc@andestech.com> Signed-off-by: Greentime Hu <greentime@andestech.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--arch/nds32/include/asm/cache.h12
-rw-r--r--arch/nds32/include/asm/cache_info.h13
-rw-r--r--arch/nds32/include/asm/cacheflush.h44
-rw-r--r--arch/nds32/include/asm/mmu_context.h68
-rw-r--r--arch/nds32/include/asm/proc-fns.h44
-rw-r--r--arch/nds32/include/asm/tlb.h28
-rw-r--r--arch/nds32/include/asm/tlbflush.h47
-rw-r--r--arch/nds32/include/uapi/asm/cachectl.h14
-rw-r--r--arch/nds32/kernel/cacheinfo.c49
-rw-r--r--arch/nds32/mm/cacheflush.c322
-rw-r--r--arch/nds32/mm/proc.c533
-rw-r--r--arch/nds32/mm/tlb.c50
12 files changed, 1224 insertions, 0 deletions
diff --git a/arch/nds32/include/asm/cache.h b/arch/nds32/include/asm/cache.h
new file mode 100644
index 000000000000..347db4881c5f
--- /dev/null
+++ b/arch/nds32/include/asm/cache.h
@@ -0,0 +1,12 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#ifndef __NDS32_CACHE_H__
5#define __NDS32_CACHE_H__
6
7#define L1_CACHE_BYTES 32
8#define L1_CACHE_SHIFT 5
9
10#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
11
12#endif /* __NDS32_CACHE_H__ */
diff --git a/arch/nds32/include/asm/cache_info.h b/arch/nds32/include/asm/cache_info.h
new file mode 100644
index 000000000000..38ec458ba543
--- /dev/null
+++ b/arch/nds32/include/asm/cache_info.h
@@ -0,0 +1,13 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4struct cache_info {
5 unsigned char ways;
6 unsigned char line_size;
7 unsigned short sets;
8 unsigned short size;
9#if defined(CONFIG_CPU_CACHE_ALIASING)
10 unsigned short aliasing_num;
11 unsigned int aliasing_mask;
12#endif
13};
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
new file mode 100644
index 000000000000..7b9b20a381cb
--- /dev/null
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -0,0 +1,44 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#ifndef __NDS32_CACHEFLUSH_H__
5#define __NDS32_CACHEFLUSH_H__
6
7#include <linux/mm.h>
8
9#define PG_dcache_dirty PG_arch_1
10
11#ifdef CONFIG_CPU_CACHE_ALIASING
12void flush_cache_mm(struct mm_struct *mm);
13void flush_cache_dup_mm(struct mm_struct *mm);
14void flush_cache_range(struct vm_area_struct *vma,
15 unsigned long start, unsigned long end);
16void flush_cache_page(struct vm_area_struct *vma,
17 unsigned long addr, unsigned long pfn);
18void flush_cache_kmaps(void);
19void flush_cache_vmap(unsigned long start, unsigned long end);
20void flush_cache_vunmap(unsigned long start, unsigned long end);
21
22#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
23void flush_dcache_page(struct page *page);
24void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
25 unsigned long vaddr, void *dst, void *src, int len);
26void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
27 unsigned long vaddr, void *dst, void *src, int len);
28
29#define ARCH_HAS_FLUSH_ANON_PAGE
30void flush_anon_page(struct vm_area_struct *vma,
31 struct page *page, unsigned long vaddr);
32
33#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
34void flush_kernel_dcache_page(struct page *page);
35void flush_icache_range(unsigned long start, unsigned long end);
36void flush_icache_page(struct vm_area_struct *vma, struct page *page);
37#define flush_dcache_mmap_lock(mapping) spin_lock_irq(&(mapping)->tree_lock)
38#define flush_dcache_mmap_unlock(mapping) spin_unlock_irq(&(mapping)->tree_lock)
39
40#else
41#include <asm-generic/cacheflush.h>
42#endif
43
44#endif /* __NDS32_CACHEFLUSH_H__ */
diff --git a/arch/nds32/include/asm/mmu_context.h b/arch/nds32/include/asm/mmu_context.h
new file mode 100644
index 000000000000..fd7d13cefccc
--- /dev/null
+++ b/arch/nds32/include/asm/mmu_context.h
@@ -0,0 +1,68 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#ifndef __ASM_NDS32_MMU_CONTEXT_H
5#define __ASM_NDS32_MMU_CONTEXT_H
6
7#include <linux/spinlock.h>
8#include <asm/tlbflush.h>
9#include <asm/proc-fns.h>
10#include <asm-generic/mm_hooks.h>
11
12static inline int
13init_new_context(struct task_struct *tsk, struct mm_struct *mm)
14{
15 mm->context.id = 0;
16 return 0;
17}
18
19#define destroy_context(mm) do { } while(0)
20
21#define CID_BITS 9
22extern spinlock_t cid_lock;
23extern unsigned int cpu_last_cid;
24
25static inline void __new_context(struct mm_struct *mm)
26{
27 unsigned int cid;
28 unsigned long flags;
29
30 spin_lock_irqsave(&cid_lock, flags);
31 cid = cpu_last_cid;
32 cpu_last_cid += 1 << TLB_MISC_offCID;
33 if (cpu_last_cid == 0)
34 cpu_last_cid = 1 << TLB_MISC_offCID << CID_BITS;
35
36 if ((cid & TLB_MISC_mskCID) == 0)
37 flush_tlb_all();
38 spin_unlock_irqrestore(&cid_lock, flags);
39
40 mm->context.id = cid;
41}
42
43static inline void check_context(struct mm_struct *mm)
44{
45 if (unlikely
46 ((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS))
47 __new_context(mm);
48}
49
50static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
51{
52}
53
54static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
55 struct task_struct *tsk)
56{
57 unsigned int cpu = smp_processor_id();
58
59 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
60 check_context(next);
61 cpu_switch_mm(next);
62 }
63}
64
65#define deactivate_mm(tsk,mm) do { } while (0)
66#define activate_mm(prev,next) switch_mm(prev, next, NULL)
67
68#endif
diff --git a/arch/nds32/include/asm/proc-fns.h b/arch/nds32/include/asm/proc-fns.h
new file mode 100644
index 000000000000..bedc4f59e064
--- /dev/null
+++ b/arch/nds32/include/asm/proc-fns.h
@@ -0,0 +1,44 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#ifndef __NDS32_PROCFNS_H__
5#define __NDS32_PROCFNS_H__
6
7#ifdef __KERNEL__
8#include <asm/page.h>
9
10struct mm_struct;
11struct vm_area_struct;
12extern void cpu_proc_init(void);
13extern void cpu_proc_fin(void);
14extern void cpu_do_idle(void);
15extern void cpu_reset(unsigned long reset);
16extern void cpu_switch_mm(struct mm_struct *mm);
17
18extern void cpu_dcache_inval_all(void);
19extern void cpu_dcache_wbinval_all(void);
20extern void cpu_dcache_inval_page(unsigned long page);
21extern void cpu_dcache_wb_page(unsigned long page);
22extern void cpu_dcache_wbinval_page(unsigned long page);
23extern void cpu_dcache_inval_range(unsigned long start, unsigned long end);
24extern void cpu_dcache_wb_range(unsigned long start, unsigned long end);
25extern void cpu_dcache_wbinval_range(unsigned long start, unsigned long end);
26
27extern void cpu_icache_inval_all(void);
28extern void cpu_icache_inval_page(unsigned long page);
29extern void cpu_icache_inval_range(unsigned long start, unsigned long end);
30
31extern void cpu_cache_wbinval_page(unsigned long page, int flushi);
32extern void cpu_cache_wbinval_range(unsigned long start,
33 unsigned long end, int flushi);
34extern void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
35 unsigned long start,
36 unsigned long end, bool flushi,
37 bool wbd);
38
39extern void cpu_dma_wb_range(unsigned long start, unsigned long end);
40extern void cpu_dma_inval_range(unsigned long start, unsigned long end);
41extern void cpu_dma_wbinval_range(unsigned long start, unsigned long end);
42
43#endif /* __KERNEL__ */
44#endif /* __NDS32_PROCFNS_H__ */
diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h
new file mode 100644
index 000000000000..b35ae5eae3ab
--- /dev/null
+++ b/arch/nds32/include/asm/tlb.h
@@ -0,0 +1,28 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#ifndef __ASMNDS32_TLB_H
5#define __ASMNDS32_TLB_H
6
7#define tlb_start_vma(tlb,vma) \
8 do { \
9 if (!tlb->fullmm) \
10 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
11 } while (0)
12
13#define tlb_end_vma(tlb,vma) \
14 do { \
15 if(!tlb->fullmm) \
16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
17 } while (0)
18
19#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
20
21#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
22
23#include <asm-generic/tlb.h>
24
25#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
26#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tln)->mm, pmd)
27
28#endif
diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h
new file mode 100644
index 000000000000..9b411f401903
--- /dev/null
+++ b/arch/nds32/include/asm/tlbflush.h
@@ -0,0 +1,47 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#ifndef _ASMNDS32_TLBFLUSH_H
5#define _ASMNDS32_TLBFLUSH_H
6
7#include <linux/spinlock.h>
8#include <linux/mm.h>
9#include <nds32_intrinsic.h>
10
11static inline void local_flush_tlb_all(void)
12{
13 __nds32__tlbop_flua();
14 __nds32__isb();
15}
16
17static inline void local_flush_tlb_mm(struct mm_struct *mm)
18{
19 __nds32__tlbop_flua();
20 __nds32__isb();
21}
22
23static inline void local_flush_tlb_kernel_range(unsigned long start,
24 unsigned long end)
25{
26 while (start < end) {
27 __nds32__tlbop_inv(start);
28 __nds32__isb();
29 start += PAGE_SIZE;
30 }
31}
32
33void local_flush_tlb_range(struct vm_area_struct *vma,
34 unsigned long start, unsigned long end);
35void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
36
37#define flush_tlb_all local_flush_tlb_all
38#define flush_tlb_mm local_flush_tlb_mm
39#define flush_tlb_range local_flush_tlb_range
40#define flush_tlb_page local_flush_tlb_page
41#define flush_tlb_kernel_range local_flush_tlb_kernel_range
42
43void update_mmu_cache(struct vm_area_struct *vma,
44 unsigned long address, pte_t * pte);
45void tlb_migrate_finish(struct mm_struct *mm);
46
47#endif
diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000000..4cdca9b23974
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/cachectl.h
@@ -0,0 +1,14 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 1994, 1995, 1996 by Ralf Baechle
3// Copyright (C) 2005-2017 Andes Technology Corporation
4#ifndef _ASM_CACHECTL
5#define _ASM_CACHECTL
6
7/*
8 * Options for cacheflush system call
9 */
10#define ICACHE 0 /* flush instruction cache */
11#define DCACHE 1 /* writeback and flush data cache */
12#define BCACHE 2 /* flush instruction cache + writeback and flush data cache */
13
14#endif /* _ASM_CACHECTL */
diff --git a/arch/nds32/kernel/cacheinfo.c b/arch/nds32/kernel/cacheinfo.c
new file mode 100644
index 000000000000..0a7bc696dd55
--- /dev/null
+++ b/arch/nds32/kernel/cacheinfo.c
@@ -0,0 +1,49 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/bitops.h>
5#include <linux/cacheinfo.h>
6#include <linux/cpu.h>
7
8static void ci_leaf_init(struct cacheinfo *this_leaf,
9 enum cache_type type, unsigned int level)
10{
11 char cache_type = (type & CACHE_TYPE_INST ? ICACHE : DCACHE);
12
13 this_leaf->level = level;
14 this_leaf->type = type;
15 this_leaf->coherency_line_size = CACHE_LINE_SIZE(cache_type);
16 this_leaf->number_of_sets = CACHE_SET(cache_type);;
17 this_leaf->ways_of_associativity = CACHE_WAY(cache_type);
18 this_leaf->size = this_leaf->number_of_sets *
19 this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
20#if defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
21 this_leaf->attributes = CACHE_WRITE_THROUGH;
22#else
23 this_leaf->attributes = CACHE_WRITE_BACK;
24#endif
25}
26
27int init_cache_level(unsigned int cpu)
28{
29 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
30
31 /* Only 1 level and I/D cache seperate. */
32 this_cpu_ci->num_levels = 1;
33 this_cpu_ci->num_leaves = 2;
34 return 0;
35}
36
37int populate_cache_leaves(unsigned int cpu)
38{
39 unsigned int level, idx;
40 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
41 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
42
43 for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
44 idx < this_cpu_ci->num_leaves; idx++, level++) {
45 ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
46 ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
47 }
48 return 0;
49}
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
new file mode 100644
index 000000000000..6eb786a399a2
--- /dev/null
+++ b/arch/nds32/mm/cacheflush.c
@@ -0,0 +1,322 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/module.h>
9#include <asm/cacheflush.h>
10#include <asm/proc-fns.h>
11#include <asm/shmparam.h>
12#include <asm/cache_info.h>
13
14extern struct cache_info L1_cache_info[2];
15
16#ifndef CONFIG_CPU_CACHE_ALIASING
17void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
18 pte_t * pte)
19{
20 struct page *page;
21 unsigned long pfn = pte_pfn(*pte);
22 unsigned long flags;
23
24 if (!pfn_valid(pfn))
25 return;
26
27 if (vma->vm_mm == current->active_mm) {
28 local_irq_save(flags);
29 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
30 __nds32__tlbop_rwr(*pte);
31 __nds32__isb();
32 local_irq_restore(flags);
33 }
34 page = pfn_to_page(pfn);
35
36 if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
37 (vma->vm_flags & VM_EXEC)) {
38
39 if (!PageHighMem(page)) {
40 cpu_cache_wbinval_page((unsigned long)
41 page_address(page),
42 vma->vm_flags & VM_EXEC);
43 } else {
44 unsigned long kaddr = (unsigned long)kmap_atomic(page);
45 cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
46 kunmap_atomic((void *)kaddr);
47 }
48 }
49}
50#else
51extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
52
53static inline unsigned long aliasing(unsigned long addr, unsigned long page)
54{
55 return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
56}
57
58static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
59{
60 unsigned long kaddr, pte;
61
62#define BASE_ADDR0 0xffffc000
63 kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
64 pte = (pa | PAGE_KERNEL);
65 __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
66 __nds32__tlbop_rwlk(pte);
67 __nds32__isb();
68 return kaddr;
69}
70
71static inline void kunmap01(unsigned long kaddr)
72{
73 __nds32__tlbop_unlk(kaddr);
74 __nds32__tlbop_inv(kaddr);
75 __nds32__isb();
76}
77
78static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
79{
80 unsigned long kaddr, pte;
81
82#define BASE_ADDR1 0xffff8000
83 kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
84 pte = (pa | PAGE_KERNEL);
85 __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
86 __nds32__tlbop_rwlk(pte);
87 __nds32__isb();
88 return kaddr;
89}
90
91void flush_cache_mm(struct mm_struct *mm)
92{
93 unsigned long flags;
94
95 local_irq_save(flags);
96 cpu_dcache_wbinval_all();
97 cpu_icache_inval_all();
98 local_irq_restore(flags);
99}
100
101void flush_cache_dup_mm(struct mm_struct *mm)
102{
103}
104
105void flush_cache_range(struct vm_area_struct *vma,
106 unsigned long start, unsigned long end)
107{
108 unsigned long flags;
109
110 if ((end - start) > 8 * PAGE_SIZE) {
111 cpu_dcache_wbinval_all();
112 if (vma->vm_flags & VM_EXEC)
113 cpu_icache_inval_all();
114 return;
115 }
116 local_irq_save(flags);
117 while (start < end) {
118 if (va_present(vma->vm_mm, start))
119 cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
120 start += PAGE_SIZE;
121 }
122 local_irq_restore(flags);
123 return;
124}
125
126void flush_cache_page(struct vm_area_struct *vma,
127 unsigned long addr, unsigned long pfn)
128{
129 unsigned long vto, flags;
130
131 local_irq_save(flags);
132 vto = kremap0(addr, pfn << PAGE_SHIFT);
133 cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
134 kunmap01(vto);
135 local_irq_restore(flags);
136}
137
138void flush_cache_vmap(unsigned long start, unsigned long end)
139{
140 cpu_dcache_wbinval_all();
141 cpu_icache_inval_all();
142}
143
144void flush_cache_vunmap(unsigned long start, unsigned long end)
145{
146 cpu_dcache_wbinval_all();
147 cpu_icache_inval_all();
148}
149
150void copy_user_highpage(struct page *to, struct page *from,
151 unsigned long vaddr, struct vm_area_struct *vma)
152{
153 unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
154 kto = ((unsigned long)page_address(to) & PAGE_MASK);
155 kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
156 pto = page_to_phys(to);
157 pfrom = page_to_phys(from);
158
159 if (aliasing(vaddr, (unsigned long)kfrom))
160 cpu_dcache_wb_page((unsigned long)kfrom);
161 if (aliasing(vaddr, (unsigned long)kto))
162 cpu_dcache_inval_page((unsigned long)kto);
163 local_irq_save(flags);
164 vto = kremap0(vaddr, pto);
165 vfrom = kremap1(vaddr, pfrom);
166 copy_page((void *)vto, (void *)vfrom);
167 kunmap01(vfrom);
168 kunmap01(vto);
169 local_irq_restore(flags);
170}
171
172EXPORT_SYMBOL(copy_user_highpage);
173
174void clear_user_highpage(struct page *page, unsigned long vaddr)
175{
176 unsigned long vto, flags, kto;
177
178 kto = ((unsigned long)page_address(page) & PAGE_MASK);
179
180 local_irq_save(flags);
181 if (aliasing(kto, vaddr) && kto != 0) {
182 cpu_dcache_inval_page(kto);
183 cpu_icache_inval_page(kto);
184 }
185 vto = kremap0(vaddr, page_to_phys(page));
186 clear_page((void *)vto);
187 kunmap01(vto);
188 local_irq_restore(flags);
189}
190
191EXPORT_SYMBOL(clear_user_highpage);
192
193void flush_dcache_page(struct page *page)
194{
195 struct address_space *mapping;
196
197 mapping = page_mapping(page);
198 if (mapping && !mapping_mapped(mapping))
199 set_bit(PG_dcache_dirty, &page->flags);
200 else {
201 int i, pc;
202 unsigned long vto, kaddr, flags;
203 kaddr = (unsigned long)page_address(page);
204 cpu_dcache_wbinval_page(kaddr);
205 pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
206 local_irq_save(flags);
207 for (i = 0; i < pc; i++) {
208 vto =
209 kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
210 cpu_dcache_wbinval_page(vto);
211 kunmap01(vto);
212 }
213 local_irq_restore(flags);
214 }
215}
216
217void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
218 unsigned long vaddr, void *dst, void *src, int len)
219{
220 unsigned long line_size, start, end, vto, flags;
221
222 local_irq_save(flags);
223 vto = kremap0(vaddr, page_to_phys(page));
224 dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
225 memcpy(dst, src, len);
226 if (vma->vm_flags & VM_EXEC) {
227 line_size = L1_cache_info[DCACHE].line_size;
228 start = (unsigned long)dst & ~(line_size - 1);
229 end =
230 ((unsigned long)dst + len + line_size - 1) & ~(line_size -
231 1);
232 cpu_cache_wbinval_range(start, end, 1);
233 }
234 kunmap01(vto);
235 local_irq_restore(flags);
236}
237
238void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
239 unsigned long vaddr, void *dst, void *src, int len)
240{
241 unsigned long vto, flags;
242
243 local_irq_save(flags);
244 vto = kremap0(vaddr, page_to_phys(page));
245 src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
246 memcpy(dst, src, len);
247 kunmap01(vto);
248 local_irq_restore(flags);
249}
250
251void flush_anon_page(struct vm_area_struct *vma,
252 struct page *page, unsigned long vaddr)
253{
254 unsigned long flags;
255 if (!PageAnon(page))
256 return;
257
258 if (vma->vm_mm != current->active_mm)
259 return;
260
261 local_irq_save(flags);
262 if (vma->vm_flags & VM_EXEC)
263 cpu_icache_inval_page(vaddr & PAGE_MASK);
264 cpu_dcache_wbinval_page((unsigned long)page_address(page));
265 local_irq_restore(flags);
266}
267
268void flush_kernel_dcache_page(struct page *page)
269{
270 unsigned long flags;
271 local_irq_save(flags);
272 cpu_dcache_wbinval_page((unsigned long)page_address(page));
273 local_irq_restore(flags);
274}
275
276void flush_icache_range(unsigned long start, unsigned long end)
277{
278 unsigned long line_size, flags;
279 line_size = L1_cache_info[DCACHE].line_size;
280 start = start & ~(line_size - 1);
281 end = (end + line_size - 1) & ~(line_size - 1);
282 local_irq_save(flags);
283 cpu_cache_wbinval_range(start, end, 1);
284 local_irq_restore(flags);
285}
286
287void flush_icache_page(struct vm_area_struct *vma, struct page *page)
288{
289 unsigned long flags;
290 local_irq_save(flags);
291 cpu_cache_wbinval_page((unsigned long)page_address(page),
292 vma->vm_flags & VM_EXEC);
293 local_irq_restore(flags);
294}
295
296void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
297 pte_t * pte)
298{
299 struct page *page;
300 unsigned long flags;
301 unsigned long pfn = pte_pfn(*pte);
302
303 if (!pfn_valid(pfn))
304 return;
305
306 if (vma->vm_mm == current->active_mm) {
307 local_irq_save(flags);
308 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
309 __nds32__tlbop_rwr(*pte);
310 __nds32__isb();
311 local_irq_restore(flags);
312 }
313
314 page = pfn_to_page(pfn);
315 if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
316 (vma->vm_flags & VM_EXEC)) {
317 local_irq_save(flags);
318 cpu_dcache_wbinval_page((unsigned long)page_address(page));
319 local_irq_restore(flags);
320 }
321}
322#endif
diff --git a/arch/nds32/mm/proc.c b/arch/nds32/mm/proc.c
new file mode 100644
index 000000000000..ba80992d13a2
--- /dev/null
+++ b/arch/nds32/mm/proc.c
@@ -0,0 +1,533 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/module.h>
5#include <linux/sched.h>
6#include <linux/mm.h>
7#include <asm/nds32.h>
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#include <asm/cacheflush.h>
11#include <asm/l2_cache.h>
12#include <nds32_intrinsic.h>
13
14#include <asm/cache_info.h>
15extern struct cache_info L1_cache_info[2];
16
17int va_kernel_present(unsigned long addr)
18{
19 pmd_t *pmd;
20 pte_t *ptep, pte;
21
22 pmd = pmd_offset(pgd_offset_k(addr), addr);
23 if (!pmd_none(*pmd)) {
24 ptep = pte_offset_map(pmd, addr);
25 pte = *ptep;
26 if (pte_present(pte))
27 return pte;
28 }
29 return 0;
30}
31
32pte_t va_present(struct mm_struct * mm, unsigned long addr)
33{
34 pgd_t *pgd;
35 pud_t *pud;
36 pmd_t *pmd;
37 pte_t *ptep, pte;
38
39 pgd = pgd_offset(mm, addr);
40 if (!pgd_none(*pgd)) {
41 pud = pud_offset(pgd, addr);
42 if (!pud_none(*pud)) {
43 pmd = pmd_offset(pud, addr);
44 if (!pmd_none(*pmd)) {
45 ptep = pte_offset_map(pmd, addr);
46 pte = *ptep;
47 if (pte_present(pte))
48 return pte;
49 }
50 }
51 }
52 return 0;
53
54}
55
56int va_readable(struct pt_regs *regs, unsigned long addr)
57{
58 struct mm_struct *mm = current->mm;
59 pte_t pte;
60 int ret = 0;
61
62 if (user_mode(regs)) {
63 /* user mode */
64 pte = va_present(mm, addr);
65 if (!pte && pte_read(pte))
66 ret = 1;
67 } else {
68 /* superuser mode is always readable, so we can only
69 * check it is present or not*/
70 return (! !va_kernel_present(addr));
71 }
72 return ret;
73}
74
75int va_writable(struct pt_regs *regs, unsigned long addr)
76{
77 struct mm_struct *mm = current->mm;
78 pte_t pte;
79 int ret = 0;
80
81 if (user_mode(regs)) {
82 /* user mode */
83 pte = va_present(mm, addr);
84 if (!pte && pte_write(pte))
85 ret = 1;
86 } else {
87 /* superuser mode */
88 pte = va_kernel_present(addr);
89 if (!pte && pte_kernel_write(pte))
90 ret = 1;
91 }
92 return ret;
93}
94
95/*
96 * All
97 */
98void cpu_icache_inval_all(void)
99{
100 unsigned long end, line_size;
101
102 line_size = L1_cache_info[ICACHE].line_size;
103 end =
104 line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
105
106 do {
107 end -= line_size;
108 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
109 end -= line_size;
110 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
111 end -= line_size;
112 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
113 end -= line_size;
114 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
115 } while (end > 0);
116 __nds32__isb();
117}
118
119void cpu_dcache_inval_all(void)
120{
121 __nds32__cctl_l1d_invalall();
122}
123
124#ifdef CONFIG_CACHE_L2
125void dcache_wb_all_level(void)
126{
127 unsigned long flags, cmd;
128 local_irq_save(flags);
129 __nds32__cctl_l1d_wball_alvl();
130 /* Section 1: Ensure the section 2 & 3 program code execution after */
131 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
132
133 /* Section 2: Confirm the writeback all level is done in CPU and L2C */
134 cmd = CCTL_CMD_L2_SYNC;
135 L2_CMD_RDY();
136 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
137 L2_CMD_RDY();
138
139 /* Section 3: Writeback whole L2 cache */
140 cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
141 L2_CMD_RDY();
142 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
143 L2_CMD_RDY();
144 __nds32__msync_all();
145 local_irq_restore(flags);
146}
147EXPORT_SYMBOL(dcache_wb_all_level);
148#endif
149
150void cpu_dcache_wb_all(void)
151{
152 __nds32__cctl_l1d_wball_one_lvl();
153 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
154}
155
156void cpu_dcache_wbinval_all(void)
157{
158#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
159 unsigned long flags;
160 local_irq_save(flags);
161#endif
162 cpu_dcache_wb_all();
163 cpu_dcache_inval_all();
164#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
165 local_irq_restore(flags);
166#endif
167}
168
169/*
170 * Page
171 */
172void cpu_icache_inval_page(unsigned long start)
173{
174 unsigned long line_size, end;
175
176 line_size = L1_cache_info[ICACHE].line_size;
177 end = start + PAGE_SIZE;
178
179 do {
180 end -= line_size;
181 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
182 end -= line_size;
183 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
184 end -= line_size;
185 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
186 end -= line_size;
187 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
188 } while (end != start);
189 __nds32__isb();
190}
191
192void cpu_dcache_inval_page(unsigned long start)
193{
194 unsigned long line_size, end;
195
196 line_size = L1_cache_info[DCACHE].line_size;
197 end = start + PAGE_SIZE;
198
199 do {
200 end -= line_size;
201 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
202 end -= line_size;
203 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
204 end -= line_size;
205 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
206 end -= line_size;
207 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
208 } while (end != start);
209}
210
211void cpu_dcache_wb_page(unsigned long start)
212{
213#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
214 unsigned long line_size, end;
215
216 line_size = L1_cache_info[DCACHE].line_size;
217 end = start + PAGE_SIZE;
218
219 do {
220 end -= line_size;
221 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
222 end -= line_size;
223 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
224 end -= line_size;
225 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
226 end -= line_size;
227 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
228 } while (end != start);
229 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
230#endif
231}
232
233void cpu_dcache_wbinval_page(unsigned long start)
234{
235 unsigned long line_size, end;
236
237 line_size = L1_cache_info[DCACHE].line_size;
238 end = start + PAGE_SIZE;
239
240 do {
241 end -= line_size;
242#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
243 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
244#endif
245 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
246 end -= line_size;
247#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
248 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
249#endif
250 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
251 end -= line_size;
252#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
253 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
254#endif
255 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
256 end -= line_size;
257#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
258 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
259#endif
260 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
261 } while (end != start);
262 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
263}
264
265void cpu_cache_wbinval_page(unsigned long page, int flushi)
266{
267 cpu_dcache_wbinval_page(page);
268 if (flushi)
269 cpu_icache_inval_page(page);
270}
271
272/*
273 * Range
274 */
275void cpu_icache_inval_range(unsigned long start, unsigned long end)
276{
277 unsigned long line_size;
278
279 line_size = L1_cache_info[ICACHE].line_size;
280
281 while (end > start) {
282 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
283 start += line_size;
284 }
285 __nds32__isb();
286}
287
288void cpu_dcache_inval_range(unsigned long start, unsigned long end)
289{
290 unsigned long line_size;
291
292 line_size = L1_cache_info[DCACHE].line_size;
293
294 while (end > start) {
295 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
296 start += line_size;
297 }
298}
299
300void cpu_dcache_wb_range(unsigned long start, unsigned long end)
301{
302#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
303 unsigned long line_size;
304
305 line_size = L1_cache_info[DCACHE].line_size;
306
307 while (end > start) {
308 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
309 start += line_size;
310 }
311 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
312#endif
313}
314
315void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
316{
317 unsigned long line_size;
318
319 line_size = L1_cache_info[DCACHE].line_size;
320
321 while (end > start) {
322#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
323 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
324#endif
325 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
326 start += line_size;
327 }
328 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
329}
330
331void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
332{
333 unsigned long line_size, align_start, align_end;
334
335 line_size = L1_cache_info[DCACHE].line_size;
336 align_start = start & ~(line_size - 1);
337 align_end = (end + line_size - 1) & ~(line_size - 1);
338 cpu_dcache_wbinval_range(align_start, align_end);
339
340 if (flushi) {
341 line_size = L1_cache_info[ICACHE].line_size;
342 align_start = start & ~(line_size - 1);
343 align_end = (end + line_size - 1) & ~(line_size - 1);
344 cpu_icache_inval_range(align_start, align_end);
345 }
346}
347
348void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
349 unsigned long start, unsigned long end,
350 bool flushi, bool wbd)
351{
352 unsigned long line_size, t_start, t_end;
353
354 if (!flushi && !wbd)
355 return;
356 line_size = L1_cache_info[DCACHE].line_size;
357 start = start & ~(line_size - 1);
358 end = (end + line_size - 1) & ~(line_size - 1);
359
360 if ((end - start) > (8 * PAGE_SIZE)) {
361 if (wbd)
362 cpu_dcache_wbinval_all();
363 if (flushi)
364 cpu_icache_inval_all();
365 return;
366 }
367
368 t_start = (start + PAGE_SIZE) & PAGE_MASK;
369 t_end = ((end - 1) & PAGE_MASK);
370
371 if ((start & PAGE_MASK) == t_end) {
372 if (va_present(vma->vm_mm, start)) {
373 if (wbd)
374 cpu_dcache_wbinval_range(start, end);
375 if (flushi)
376 cpu_icache_inval_range(start, end);
377 }
378 return;
379 }
380
381 if (va_present(vma->vm_mm, start)) {
382 if (wbd)
383 cpu_dcache_wbinval_range(start, t_start);
384 if (flushi)
385 cpu_icache_inval_range(start, t_start);
386 }
387
388 if (va_present(vma->vm_mm, end - 1)) {
389 if (wbd)
390 cpu_dcache_wbinval_range(t_end, end);
391 if (flushi)
392 cpu_icache_inval_range(t_end, end);
393 }
394
395 while (t_start < t_end) {
396 if (va_present(vma->vm_mm, t_start)) {
397 if (wbd)
398 cpu_dcache_wbinval_page(t_start);
399 if (flushi)
400 cpu_icache_inval_page(t_start);
401 }
402 t_start += PAGE_SIZE;
403 }
404}
405
406#ifdef CONFIG_CACHE_L2
407static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
408{
409 if (atl2c_base) {
410 unsigned long p_start = __pa(start);
411 unsigned long p_end = __pa(end);
412 unsigned long cmd;
413 unsigned long line_size;
414 /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
415 line_size = L2_CACHE_LINE_SIZE();
416 p_start = p_start & (~(line_size - 1));
417 p_end = (p_end + line_size - 1) & (~(line_size - 1));
418 cmd =
419 (p_start & ~(line_size - 1)) | op |
420 CCTL_SINGLE_CMD;
421 do {
422 L2_CMD_RDY();
423 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
424 cmd += line_size;
425 p_start += line_size;
426 } while (p_end > p_start);
427 cmd = CCTL_CMD_L2_SYNC;
428 L2_CMD_RDY();
429 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
430 L2_CMD_RDY();
431 }
432}
433#else
434#define cpu_l2cache_op(start,end,op) do { } while (0)
435#endif
436/*
437 * DMA
438 */
439void cpu_dma_wb_range(unsigned long start, unsigned long end)
440{
441 unsigned long line_size;
442 unsigned long flags;
443 line_size = L1_cache_info[DCACHE].line_size;
444 start = start & (~(line_size - 1));
445 end = (end + line_size - 1) & (~(line_size - 1));
446 if (unlikely(start == end))
447 return;
448
449 local_irq_save(flags);
450 cpu_dcache_wb_range(start, end);
451 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
452 __nds32__msync_all();
453 local_irq_restore(flags);
454}
455
456void cpu_dma_inval_range(unsigned long start, unsigned long end)
457{
458 unsigned long line_size;
459 unsigned long old_start = start;
460 unsigned long old_end = end;
461 unsigned long flags;
462 line_size = L1_cache_info[DCACHE].line_size;
463 start = start & (~(line_size - 1));
464 end = (end + line_size - 1) & (~(line_size - 1));
465 if (unlikely(start == end))
466 return;
467 local_irq_save(flags);
468 if (start != old_start) {
469 cpu_dcache_wbinval_range(start, start + line_size);
470 cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
471 }
472 if (end != old_end) {
473 cpu_dcache_wbinval_range(end - line_size, end);
474 cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
475 }
476 cpu_dcache_inval_range(start, end);
477 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
478 __nds32__msync_all();
479 local_irq_restore(flags);
480
481}
482
483void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
484{
485 unsigned long line_size;
486 unsigned long flags;
487 line_size = L1_cache_info[DCACHE].line_size;
488 start = start & (~(line_size - 1));
489 end = (end + line_size - 1) & (~(line_size - 1));
490 if (unlikely(start == end))
491 return;
492
493 local_irq_save(flags);
494 cpu_dcache_wbinval_range(start, end);
495 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
496 __nds32__msync_all();
497 local_irq_restore(flags);
498}
499
500void cpu_proc_init(void)
501{
502}
503
504void cpu_proc_fin(void)
505{
506}
507
508void cpu_do_idle(void)
509{
510 __nds32__standby_no_wake_grant();
511}
512
513void cpu_reset(unsigned long reset)
514{
515 u32 tmp;
516 GIE_DISABLE();
517 tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
518 tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
519 __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
520 cpu_dcache_wbinval_all();
521 cpu_icache_inval_all();
522
523 __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
524}
525
526void cpu_switch_mm(struct mm_struct *mm)
527{
528 unsigned long cid;
529 cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
530 cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
531 __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
532 __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
533}
diff --git a/arch/nds32/mm/tlb.c b/arch/nds32/mm/tlb.c
new file mode 100644
index 000000000000..dd41f5e0712f
--- /dev/null
+++ b/arch/nds32/mm/tlb.c
@@ -0,0 +1,50 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/spinlock_types.h>
5#include <linux/mm.h>
6#include <linux/sched.h>
7#include <asm/nds32.h>
8#include <nds32_intrinsic.h>
9
10unsigned int cpu_last_cid = { TLB_MISC_mskCID + (2 << TLB_MISC_offCID) };
11
12DEFINE_SPINLOCK(cid_lock);
13
14void local_flush_tlb_range(struct vm_area_struct *vma,
15 unsigned long start, unsigned long end)
16{
17 unsigned long flags, ocid, ncid;
18
19 if ((end - start) > 0x400000) {
20 __nds32__tlbop_flua();
21 __nds32__isb();
22 return;
23 }
24
25 spin_lock_irqsave(&cid_lock, flags);
26 ocid = __nds32__mfsr(NDS32_SR_TLB_MISC);
27 ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
28 __nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC);
29 while (start < end) {
30 __nds32__tlbop_inv(start);
31 __nds32__isb();
32 start += PAGE_SIZE;
33 }
34 __nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC);
35 spin_unlock_irqrestore(&cid_lock, flags);
36}
37
38void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
39{
40 unsigned long flags, ocid, ncid;
41
42 spin_lock_irqsave(&cid_lock, flags);
43 ocid = __nds32__mfsr(NDS32_SR_TLB_MISC);
44 ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
45 __nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC);
46 __nds32__tlbop_inv(addr);
47 __nds32__isb();
48 __nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC);
49 spin_unlock_irqrestore(&cid_lock, flags);
50}